ngram
listlengths
0
67.8k
[ "\"\"\" Training \"\"\" opt = Options().parse() opt.print_freq = opt.batchsize seed(opt.manualseed) print(\"Seed:\", str(torch.seed())) if", "seed ## def main(): \"\"\" Training \"\"\" opt = Options().parse() opt.print_freq = opt.batchsize", "= opt.batchsize seed(opt.manualseed) print(\"Seed:\", str(torch.seed())) if opt.phase == \"inference\": opt.batchsize=1 data = load_data(opt)", "the following command from the terminal. run train.py \\ --dataset cifar10 \\ --abnormal_class", "\\ --display \\ \"\"\" ## # LIBRARIES import time from options import Options", "torch import numpy as np from lib.models.skipganomaly import seed ## def main(): \"\"\"", "Options from lib.data.dataloader import load_data from lib.models import load_model import torch import numpy", "train_start = time.time() model.train() train_time = time.time() - train_start print (f'Train time: {train_time}", "time.time() - train_start print (f'Train time: {train_time} secs') if __name__ == '__main__': main()", "train.py \\ --dataset cifar10 \\ --abnormal_class airplane \\ --display \\ \"\"\" ## #", "from the terminal. run train.py \\ --dataset cifar10 \\ --abnormal_class airplane \\ --display", "time from options import Options from lib.data.dataloader import load_data from lib.models import load_model", "opt.batchsize seed(opt.manualseed) print(\"Seed:\", str(torch.seed())) if opt.phase == \"inference\": opt.batchsize=1 data = load_data(opt) model", "== \"inference\": model.inference() else: if opt.path_to_weights: model.test() else: train_start = time.time() model.train() train_time", "= time.time() model.train() train_time = time.time() - train_start print (f'Train time: {train_time} secs')", "train_time = time.time() - train_start print (f'Train time: {train_time} secs') if __name__ ==", "load_data from lib.models import load_model import torch import numpy as np from lib.models.skipganomaly", "= load_data(opt) model = load_model(opt, data) if opt.phase == \"inference\": model.inference() else: if", "airplane \\ --display \\ \"\"\" ## # LIBRARIES import time from options import", "if opt.path_to_weights: model.test() else: train_start = time.time() model.train() train_time = time.time() - train_start", "load_model(opt, data) if opt.phase == \"inference\": model.inference() else: if opt.path_to_weights: model.test() else: train_start", "--dataset cifar10 \\ --abnormal_class airplane \\ --display \\ \"\"\" ## # LIBRARIES import", "\"\"\" opt = Options().parse() opt.print_freq = opt.batchsize seed(opt.manualseed) print(\"Seed:\", str(torch.seed())) if opt.phase ==", "the terminal. run train.py \\ --dataset cifar10 \\ --abnormal_class airplane \\ --display \\", "else: if opt.path_to_weights: model.test() else: train_start = time.time() model.train() train_time = time.time() -", "seed(opt.manualseed) print(\"Seed:\", str(torch.seed())) if opt.phase == \"inference\": opt.batchsize=1 data = load_data(opt) model =", "from lib.models.skipganomaly import seed ## def main(): \"\"\" Training \"\"\" opt = Options().parse()", "opt.print_freq = opt.batchsize seed(opt.manualseed) print(\"Seed:\", str(torch.seed())) if opt.phase == \"inference\": opt.batchsize=1 data =", "run train.py \\ --dataset cifar10 \\ --abnormal_class airplane \\ --display \\ \"\"\" ##", "LIBRARIES import time from options import Options from lib.data.dataloader import load_data from lib.models", "Run the following command from the terminal. run train.py \\ --dataset cifar10 \\", "model = load_model(opt, data) if opt.phase == \"inference\": model.inference() else: if opt.path_to_weights: model.test()", "from lib.models import load_model import torch import numpy as np from lib.models.skipganomaly import", "## def main(): \"\"\" Training \"\"\" opt = Options().parse() opt.print_freq = opt.batchsize seed(opt.manualseed)", "TRAIN SKIP/GANOMALY . Example: Run the following command from the terminal. run train.py", "import load_model import torch import numpy as np from lib.models.skipganomaly import seed ##", "\"inference\": opt.batchsize=1 data = load_data(opt) model = load_model(opt, data) if opt.phase == \"inference\":", "--display \\ \"\"\" ## # LIBRARIES import time from options import Options from", "main(): \"\"\" Training \"\"\" opt = Options().parse() opt.print_freq = opt.batchsize seed(opt.manualseed) print(\"Seed:\", str(torch.seed()))", ". Example: Run the following command from the terminal. run train.py \\ --dataset", "Training \"\"\" opt = Options().parse() opt.print_freq = opt.batchsize seed(opt.manualseed) print(\"Seed:\", str(torch.seed())) if opt.phase", "else: train_start = time.time() model.train() train_time = time.time() - train_start print (f'Train time:", "lib.models import load_model import torch import numpy as np from lib.models.skipganomaly import seed", "## # LIBRARIES import time from options import Options from lib.data.dataloader import load_data", "SKIP/GANOMALY . Example: Run the following command from the terminal. run train.py \\", "cifar10 \\ --abnormal_class airplane \\ --display \\ \"\"\" ## # LIBRARIES import time", "import load_data from lib.models import load_model import torch import numpy as np from", "import numpy as np from lib.models.skipganomaly import seed ## def main(): \"\"\" Training", "Options().parse() opt.print_freq = opt.batchsize seed(opt.manualseed) print(\"Seed:\", str(torch.seed())) if opt.phase == \"inference\": opt.batchsize=1 data", "\\ \"\"\" ## # LIBRARIES import time from options import Options from lib.data.dataloader", "model.inference() else: if opt.path_to_weights: model.test() else: train_start = time.time() model.train() train_time = time.time()", "lib.models.skipganomaly import seed ## def main(): \"\"\" Training \"\"\" opt = Options().parse() opt.print_freq", "opt = Options().parse() opt.print_freq = opt.batchsize seed(opt.manualseed) print(\"Seed:\", str(torch.seed())) if opt.phase == \"inference\":", "model.train() train_time = time.time() - train_start print (f'Train time: {train_time} secs') if __name__", "<filename>run.py \"\"\" TRAIN SKIP/GANOMALY . Example: Run the following command from the terminal.", "\\ --dataset cifar10 \\ --abnormal_class airplane \\ --display \\ \"\"\" ## # LIBRARIES", "\"inference\": model.inference() else: if opt.path_to_weights: model.test() else: train_start = time.time() model.train() train_time =", "from lib.data.dataloader import load_data from lib.models import load_model import torch import numpy as", "\"\"\" ## # LIBRARIES import time from options import Options from lib.data.dataloader import", "if opt.phase == \"inference\": model.inference() else: if opt.path_to_weights: model.test() else: train_start = time.time()", "import Options from lib.data.dataloader import load_data from lib.models import load_model import torch import", "np from lib.models.skipganomaly import seed ## def main(): \"\"\" Training \"\"\" opt =", "= Options().parse() opt.print_freq = opt.batchsize seed(opt.manualseed) print(\"Seed:\", str(torch.seed())) if opt.phase == \"inference\": opt.batchsize=1", "= load_model(opt, data) if opt.phase == \"inference\": model.inference() else: if opt.path_to_weights: model.test() else:", "= time.time() - train_start print (f'Train time: {train_time} secs') if __name__ == '__main__':", "as np from lib.models.skipganomaly import seed ## def main(): \"\"\" Training \"\"\" opt", "options import Options from lib.data.dataloader import load_data from lib.models import load_model import torch", "data = load_data(opt) model = load_model(opt, data) if opt.phase == \"inference\": model.inference() else:", "terminal. run train.py \\ --dataset cifar10 \\ --abnormal_class airplane \\ --display \\ \"\"\"", "command from the terminal. run train.py \\ --dataset cifar10 \\ --abnormal_class airplane \\", "opt.phase == \"inference\": opt.batchsize=1 data = load_data(opt) model = load_model(opt, data) if opt.phase", "opt.path_to_weights: model.test() else: train_start = time.time() model.train() train_time = time.time() - train_start print", "following command from the terminal. run train.py \\ --dataset cifar10 \\ --abnormal_class airplane", "import time from options import Options from lib.data.dataloader import load_data from lib.models import", "import torch import numpy as np from lib.models.skipganomaly import seed ## def main():", "== \"inference\": opt.batchsize=1 data = load_data(opt) model = load_model(opt, data) if opt.phase ==", "\\ --abnormal_class airplane \\ --display \\ \"\"\" ## # LIBRARIES import time from", "# LIBRARIES import time from options import Options from lib.data.dataloader import load_data from", "--abnormal_class airplane \\ --display \\ \"\"\" ## # LIBRARIES import time from options", "\"\"\" TRAIN SKIP/GANOMALY . Example: Run the following command from the terminal. run", "print(\"Seed:\", str(torch.seed())) if opt.phase == \"inference\": opt.batchsize=1 data = load_data(opt) model = load_model(opt,", "def main(): \"\"\" Training \"\"\" opt = Options().parse() opt.print_freq = opt.batchsize seed(opt.manualseed) print(\"Seed:\",", "from options import Options from lib.data.dataloader import load_data from lib.models import load_model import", "opt.phase == \"inference\": model.inference() else: if opt.path_to_weights: model.test() else: train_start = time.time() model.train()", "lib.data.dataloader import load_data from lib.models import load_model import torch import numpy as np", "if opt.phase == \"inference\": opt.batchsize=1 data = load_data(opt) model = load_model(opt, data) if", "load_data(opt) model = load_model(opt, data) if opt.phase == \"inference\": model.inference() else: if opt.path_to_weights:", "numpy as np from lib.models.skipganomaly import seed ## def main(): \"\"\" Training \"\"\"", "opt.batchsize=1 data = load_data(opt) model = load_model(opt, data) if opt.phase == \"inference\": model.inference()", "Example: Run the following command from the terminal. run train.py \\ --dataset cifar10", "load_model import torch import numpy as np from lib.models.skipganomaly import seed ## def", "str(torch.seed())) if opt.phase == \"inference\": opt.batchsize=1 data = load_data(opt) model = load_model(opt, data)", "model.test() else: train_start = time.time() model.train() train_time = time.time() - train_start print (f'Train", "time.time() model.train() train_time = time.time() - train_start print (f'Train time: {train_time} secs') if", "import seed ## def main(): \"\"\" Training \"\"\" opt = Options().parse() opt.print_freq =", "data) if opt.phase == \"inference\": model.inference() else: if opt.path_to_weights: model.test() else: train_start =" ]
[ "mixins, viewsets from ..models import Rank from ..serializers import RankSerializer class RanksViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):", "viewsets from ..models import Rank from ..serializers import RankSerializer class RanksViewSet(mixins.ListModelMixin, viewsets.GenericViewSet): serializer_class", "..models import Rank from ..serializers import RankSerializer class RanksViewSet(mixins.ListModelMixin, viewsets.GenericViewSet): serializer_class = RankSerializer", "rest_framework import mixins, viewsets from ..models import Rank from ..serializers import RankSerializer class", "Rank from ..serializers import RankSerializer class RanksViewSet(mixins.ListModelMixin, viewsets.GenericViewSet): serializer_class = RankSerializer queryset =", "import mixins, viewsets from ..models import Rank from ..serializers import RankSerializer class RanksViewSet(mixins.ListModelMixin,", "from ..models import Rank from ..serializers import RankSerializer class RanksViewSet(mixins.ListModelMixin, viewsets.GenericViewSet): serializer_class =", "import Rank from ..serializers import RankSerializer class RanksViewSet(mixins.ListModelMixin, viewsets.GenericViewSet): serializer_class = RankSerializer queryset", "from ..serializers import RankSerializer class RanksViewSet(mixins.ListModelMixin, viewsets.GenericViewSet): serializer_class = RankSerializer queryset = Rank.objects.filter(is_tab=True).order_by(\"order\")", "from rest_framework import mixins, viewsets from ..models import Rank from ..serializers import RankSerializer" ]
[ "msg def remove_member(self, member_id: int): if self.state and \"members\" in self.state: self.state[\"members\"].remove(member_id) self.save_state()", "of 1000\\n\" for player_id in self.get_members(): player = Player.load(player_id) player_leveled_up = player.add_points_and_check_for_levelup(word_points) if", "return StringUtil.readable_list(player_names) def get_letters(self) -> list: party_letters = set() for player_id in self.get_members():", "for player_id in self.get_members(): player = Player.load(player_id) player_names.append(player.get_mention_tag()) return StringUtil.readable_list(player_names) def get_letters(self) ->", "in self.get_members(): player = Player.load(player_id) player_leveled_up = player.add_points_and_check_for_levelup(word_points) if player_leveled_up: msg += f\"{player.get_mention_tag()}", "import save_party, load_party, disband_party class Party: def __init__(self, party_id: str = ''): if", "{word_event_chance} out of 1000\\n\" for player_id in self.get_members(): player = Player.load(player_id) player_leveled_up =", "not found in dictionary {dictionary}\") return f\"Sorry, the word '{word}' isn't in my", "and size of party word_money = party_size * party_size + word_size word_event_chance =", "= \"\" if len(members_added): msg += f\"Added {StringUtil.readable_list(members_added)} to your party. \" if", "this is a corrupted party; purge it disband_party(self.get_id()) def get_members(self) -> list: return", "list_party_letters = list(party_letters) list_party_letters.sort() return list_party_letters def make_word(self, word: str, dictionary: Dictionary) ->", "return f\"Sorry, the word '{word}' isn't in my vocabulary!\" letters = list(word) missing_letters", "+= f\"you formed the word '{word}'\\n{'everyone' if len(self.get_members()) > 1 else 'and'} scored", "[] already_partying_members = [] for member in members: player = Player(member) if player.get_party_id()", "random event, with chance {word_event_chance} out of 1000\\n\" for player_id in self.get_members(): player", "xp/score/points based on length of word and size of party word_money = party_size", "+ party_size # add xp/score/points based on length of word and size of", "your party. \" if len(already_partying_members): msg += f\"Couldn't add {StringUtil.readable_list(already_partying_members)} -- they're already", "{word_money} glyphs\\n\" if party_size <= 1: disband_party(self.party_id) return msg def __str__(self): return f\"Party", "str: msg = \"\" if not dictionary.check_word(word): logging.info(f\"Word '{word}' not found in dictionary", "- 2) + party_size # add xp/score/points based on length of word and", "and received {word_money} glyphs\\n\" if party_size <= 1: disband_party(self.party_id) return msg def __str__(self):", "player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag()) self.save_state() msg = \"\" if len(members_added): msg += f\"Added {StringUtil.readable_list(members_added)} to", "import Dictionary from .player import Player from .util.string_util import StringUtil from .util.datastore import", "Party: def __init__(self, party_id: str = ''): if party_id: self.party_id = party_id self.state", "str = ''): if party_id: self.party_id = party_id self.state = load_party(self.party_id) else: self.state", "get_letters(self) -> list: party_letters = set() for player_id in self.get_members(): player = Player.load(player_id)", "vocabulary!\" letters = list(word) missing_letters = [] for letter in letters: if letter", "party_size) if random.randint(1, 1000) < word_event_chance: msg += f\"You got a random event,", "- 3) * (word_size - 2) + party_size # add xp/score/points based on", "msg += f\"Added {StringUtil.readable_list(members_added)} to your party. \" if len(already_partying_members): msg += f\"Couldn't", "party already_partying_members.append(player.get_mention_tag()) else: if player.get_id() not in self.state[\"members\"]: self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag()) self.save_state() msg", "word_size word_event_chance = (word_size * 2 + party_size) if random.randint(1, 1000) < word_event_chance:", "= [] for member in members: player = Player(member) if player.get_party_id() and player.get_party_id()", "logging, uuid, random from .dictionary import Dictionary from .player import Player from .util.string_util", "self.state[\"members\"].remove(member_id) self.save_state() if len(self.get_members()) <= 1: disband_party(self.get_id()) else: # this is a corrupted", "player.get_party_id() and player.get_party_id() != self.get_id(): # player is in another party already_partying_members.append(player.get_mention_tag()) else:", "self.state[\"members\"]: self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag()) self.save_state() msg = \"\" if len(members_added): msg += f\"Added", "= load_party(self.party_id) def get_id(self) -> int: return self.party_id def add_members(self, members) -> str:", "# player is in another party already_partying_members.append(player.get_mention_tag()) else: if player.get_id() not in self.state[\"members\"]:", "party_size # add xp/score/points based on length of word and size of party", "and player.get_party_id() != self.get_id(): # player is in another party already_partying_members.append(player.get_mention_tag()) else: if", "if len(missing_letters): return f\"unable to spell the word {word}; you don't have the", "len(self.get_members()) <= 1: disband_party(self.get_id()) else: # this is a corrupted party; purge it", "{player_leveled_up}! :rocket:\\n\" player.add_money(word_money) for letter in letters: # give each player xp for", "* party_size + word_size word_event_chance = (word_size * 2 + party_size) if random.randint(1,", "word_money = party_size * party_size + word_size word_event_chance = (word_size * 2 +", "disband_party(self.get_id()) def get_members(self) -> list: return self.state[\"members\"] def get_members_as_string(self) -> str: player_names =", "+= f\"You got a random event, with chance {word_event_chance} out of 1000\\n\" for", "save_state(self): save_party(self.party_id, self.state) def load_state(self): self.state = load_party(self.party_id) def get_id(self) -> int: return", "'{word}' isn't in my vocabulary!\" letters = list(word) missing_letters = [] for letter", "+ word_size word_event_chance = (word_size * 2 + party_size) if random.randint(1, 1000) <", ".dictionary import Dictionary from .player import Player from .util.string_util import StringUtil from .util.datastore", "self.party_id = uuid.uuid4().hex self.save_state() logging.info(f\"initialized party {self.party_id}: {str(self.state['members'])}\") def save_state(self): save_party(self.party_id, self.state) def", "is in another party already_partying_members.append(player.get_mention_tag()) else: if player.get_id() not in self.state[\"members\"]: self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id())", "received {word_money} glyphs\\n\" if party_size <= 1: disband_party(self.party_id) return msg def __str__(self): return", "= party_id self.state = load_party(self.party_id) else: self.state = {} self.state[\"members\"] = [] self.party_id", "not in self.get_letters(): missing_letters.append(letter) missing_letters = list(set(missing_letters)) missing_letters.sort() if len(missing_letters): return f\"unable to", "is a corrupted party; purge it disband_party(self.get_id()) def get_members(self) -> list: return self.state[\"members\"]", "corrupted party; purge it disband_party(self.get_id()) def get_members(self) -> list: return self.state[\"members\"] def get_members_as_string(self)", "if not dictionary.check_word(word): logging.info(f\"Word '{word}' not found in dictionary {dictionary}\") return f\"Sorry, the", "random.randint(1, 1000) < word_event_chance: msg += f\"You got a random event, with chance", "f\"Couldn't add {StringUtil.readable_list(already_partying_members)} -- they're already in another party!\" return msg def remove_member(self,", "self.get_members(): player = Player.load(player_id) player_leveled_up = player.add_points_and_check_for_levelup(word_points) if player_leveled_up: msg += f\"{player.get_mention_tag()} is", "player_leveled_up: msg += f\"{player.get_mention_tag()} is now level {player_leveled_up}! :rocket:\\n\" player.add_money(word_money) for letter in", "-> str: player_names = [] for player_id in self.get_members(): player = Player.load(player_id) player_names.append(player.get_mention_tag())", "def load_state(self): self.state = load_party(self.party_id) def get_id(self) -> int: return self.party_id def add_members(self,", "now level {player_leveled_up}! :rocket:\\n\" player.add_money(word_money) for letter in letters: # give each player", "points and received {word_money} glyphs\\n\" if party_size <= 1: disband_party(self.party_id) return msg def", "word_points = (word_size - 3) * (word_size - 2) + party_size # add", "get_id(self) -> int: return self.party_id def add_members(self, members) -> str: members_added = []", "msg += f\"You got a random event, with chance {word_event_chance} out of 1000\\n\"", "letter in letters: # give each player xp for each letter in word", "missing_letters = list(set(missing_letters)) missing_letters.sort() if len(missing_letters): return f\"unable to spell the word {word};", "-> int: return self.party_id def add_members(self, members) -> str: members_added = [] already_partying_members", "1 else 'and'} scored {word_points} points and received {word_money} glyphs\\n\" if party_size <=", "len(already_partying_members): msg += f\"Couldn't add {StringUtil.readable_list(already_partying_members)} -- they're already in another party!\" return", "f\"you formed the word '{word}'\\n{'everyone' if len(self.get_members()) > 1 else 'and'} scored {word_points}", "of word and size of party word_money = party_size * party_size + word_size", "player.add_money(word_money) for letter in letters: # give each player xp for each letter", "(word_size - 3) * (word_size - 2) + party_size # add xp/score/points based", "for player_id in self.get_members(): player = Player.load(player_id) player_leveled_up = player.add_points_and_check_for_levelup(word_points) if player_leveled_up: msg", "f\"unable to spell the word {word}; you don't have the letter(s) {StringUtil.readable_list(missing_letters, 'bold')}\"", "def remove_member(self, member_id: int): if self.state and \"members\" in self.state: self.state[\"members\"].remove(member_id) self.save_state() if", "don't have the letter(s) {StringUtil.readable_list(missing_letters, 'bold')}\" # Calculate points and money gained party_size", "len(members_added): msg += f\"Added {StringUtil.readable_list(members_added)} to your party. \" if len(already_partying_members): msg +=", "party_size = len(self.get_members()) word_size = len(word) word_points = (word_size - 3) * (word_size", "random from .dictionary import Dictionary from .player import Player from .util.string_util import StringUtil", "__init__(self, party_id: str = ''): if party_id: self.party_id = party_id self.state = load_party(self.party_id)", "missing_letters.sort() if len(missing_letters): return f\"unable to spell the word {word}; you don't have", "\"members\" in self.state: self.state[\"members\"].remove(member_id) self.save_state() if len(self.get_members()) <= 1: disband_party(self.get_id()) else: # this", "Player.load(player_id) player_names.append(player.get_mention_tag()) return StringUtil.readable_list(player_names) def get_letters(self) -> list: party_letters = set() for player_id", "< word_event_chance: msg += f\"You got a random event, with chance {word_event_chance} out", "(word_size - 2) + party_size # add xp/score/points based on length of word", "StringUtil.readable_list(player_names) def get_letters(self) -> list: party_letters = set() for player_id in self.get_members(): player", "-> str: members_added = [] already_partying_members = [] for member in members: player", "it disband_party(self.get_id()) def get_members(self) -> list: return self.state[\"members\"] def get_members_as_string(self) -> str: player_names", "on length of word and size of party word_money = party_size * party_size", "word_size = len(word) word_points = (word_size - 3) * (word_size - 2) +", "members) -> str: members_added = [] already_partying_members = [] for member in members:", "str: members_added = [] already_partying_members = [] for member in members: player =", "in player.get_letters(): party_letters.add(letter) list_party_letters = list(party_letters) list_party_letters.sort() return list_party_letters def make_word(self, word: str,", "\"\" if len(members_added): msg += f\"Added {StringUtil.readable_list(members_added)} to your party. \" if len(already_partying_members):", "word: str, dictionary: Dictionary) -> str: msg = \"\" if not dictionary.check_word(word): logging.info(f\"Word", "Calculate points and money gained party_size = len(self.get_members()) word_size = len(word) word_points =", "in another party!\" return msg def remove_member(self, member_id: int): if self.state and \"members\"", "if len(members_added): msg += f\"Added {StringUtil.readable_list(members_added)} to your party. \" if len(already_partying_members): msg", "not in self.state[\"members\"]: self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag()) self.save_state() msg = \"\" if len(members_added): msg", "save_party, load_party, disband_party class Party: def __init__(self, party_id: str = ''): if party_id:", "msg = \"\" if not dictionary.check_word(word): logging.info(f\"Word '{word}' not found in dictionary {dictionary}\")", "= Player.load(player_id) for letter in player.get_letters(): party_letters.add(letter) list_party_letters = list(party_letters) list_party_letters.sort() return list_party_letters", "list_party_letters.sort() return list_party_letters def make_word(self, word: str, dictionary: Dictionary) -> str: msg =", "{} self.state[\"members\"] = [] self.party_id = uuid.uuid4().hex self.save_state() logging.info(f\"initialized party {self.party_id}: {str(self.state['members'])}\") def", "letter in letters: if letter not in self.get_letters(): missing_letters.append(letter) missing_letters = list(set(missing_letters)) missing_letters.sort()", "= (word_size - 3) * (word_size - 2) + party_size # add xp/score/points", "else 'and'} scored {word_points} points and received {word_money} glyphs\\n\" if party_size <= 1:", "= player.add_points_and_check_for_levelup(word_points) if player_leveled_up: msg += f\"{player.get_mention_tag()} is now level {player_leveled_up}! :rocket:\\n\" player.add_money(word_money)", "save_party(self.party_id, self.state) def load_state(self): self.state = load_party(self.party_id) def get_id(self) -> int: return self.party_id", "a random event, with chance {word_event_chance} out of 1000\\n\" for player_id in self.get_members():", "if random.randint(1, 1000) < word_event_chance: msg += f\"You got a random event, with", "word player.add_letter_xp(letter, 1) player.remove_letters(letters) msg += f\"you formed the word '{word}'\\n{'everyone' if len(self.get_members())", "{word_points} points and received {word_money} glyphs\\n\" if party_size <= 1: disband_party(self.party_id) return msg", "-- they're already in another party!\" return msg def remove_member(self, member_id: int): if", "2) + party_size # add xp/score/points based on length of word and size", "party; purge it disband_party(self.get_id()) def get_members(self) -> list: return self.state[\"members\"] def get_members_as_string(self) ->", "not dictionary.check_word(word): logging.info(f\"Word '{word}' not found in dictionary {dictionary}\") return f\"Sorry, the word", "+= f\"{player.get_mention_tag()} is now level {player_leveled_up}! :rocket:\\n\" player.add_money(word_money) for letter in letters: #", "if party_id: self.party_id = party_id self.state = load_party(self.party_id) else: self.state = {} self.state[\"members\"]", "= ''): if party_id: self.party_id = party_id self.state = load_party(self.party_id) else: self.state =", "party {self.party_id}: {str(self.state['members'])}\") def save_state(self): save_party(self.party_id, self.state) def load_state(self): self.state = load_party(self.party_id) def", "# this is a corrupted party; purge it disband_party(self.get_id()) def get_members(self) -> list:", "'bold')}\" # Calculate points and money gained party_size = len(self.get_members()) word_size = len(word)", "for letter in letters: if letter not in self.get_letters(): missing_letters.append(letter) missing_letters = list(set(missing_letters))", "= uuid.uuid4().hex self.save_state() logging.info(f\"initialized party {self.party_id}: {str(self.state['members'])}\") def save_state(self): save_party(self.party_id, self.state) def load_state(self):", "player_leveled_up = player.add_points_and_check_for_levelup(word_points) if player_leveled_up: msg += f\"{player.get_mention_tag()} is now level {player_leveled_up}! :rocket:\\n\"", "= set() for player_id in self.get_members(): player = Player.load(player_id) for letter in player.get_letters():", "letter in word player.add_letter_xp(letter, 1) player.remove_letters(letters) msg += f\"you formed the word '{word}'\\n{'everyone'", "gained party_size = len(self.get_members()) word_size = len(word) word_points = (word_size - 3) *", "if self.state and \"members\" in self.state: self.state[\"members\"].remove(member_id) self.save_state() if len(self.get_members()) <= 1: disband_party(self.get_id())", "make_word(self, word: str, dictionary: Dictionary) -> str: msg = \"\" if not dictionary.check_word(word):", ".player import Player from .util.string_util import StringUtil from .util.datastore import save_party, load_party, disband_party", "f\"{player.get_mention_tag()} is now level {player_leveled_up}! :rocket:\\n\" player.add_money(word_money) for letter in letters: # give", "> 1 else 'and'} scored {word_points} points and received {word_money} glyphs\\n\" if party_size", "else: # this is a corrupted party; purge it disband_party(self.get_id()) def get_members(self) ->", "member_id: int): if self.state and \"members\" in self.state: self.state[\"members\"].remove(member_id) self.save_state() if len(self.get_members()) <=", "def get_letters(self) -> list: party_letters = set() for player_id in self.get_members(): player =", "Player(member) if player.get_party_id() and player.get_party_id() != self.get_id(): # player is in another party", "already_partying_members.append(player.get_mention_tag()) else: if player.get_id() not in self.state[\"members\"]: self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag()) self.save_state() msg =", "in my vocabulary!\" letters = list(word) missing_letters = [] for letter in letters:", "'{word}' not found in dictionary {dictionary}\") return f\"Sorry, the word '{word}' isn't in", "uuid, random from .dictionary import Dictionary from .player import Player from .util.string_util import", "player.add_points_and_check_for_levelup(word_points) if player_leveled_up: msg += f\"{player.get_mention_tag()} is now level {player_leveled_up}! :rocket:\\n\" player.add_money(word_money) for", "self.get_letters(): missing_letters.append(letter) missing_letters = list(set(missing_letters)) missing_letters.sort() if len(missing_letters): return f\"unable to spell the", "members_added.append(player.get_mention_tag()) self.save_state() msg = \"\" if len(members_added): msg += f\"Added {StringUtil.readable_list(members_added)} to your", "word_event_chance = (word_size * 2 + party_size) if random.randint(1, 1000) < word_event_chance: msg", "in members: player = Player(member) if player.get_party_id() and player.get_party_id() != self.get_id(): # player", "in self.get_members(): player = Player.load(player_id) for letter in player.get_letters(): party_letters.add(letter) list_party_letters = list(party_letters)", "from .util.datastore import save_party, load_party, disband_party class Party: def __init__(self, party_id: str =", "!= self.get_id(): # player is in another party already_partying_members.append(player.get_mention_tag()) else: if player.get_id() not", "= list(party_letters) list_party_letters.sort() return list_party_letters def make_word(self, word: str, dictionary: Dictionary) -> str:", "party_size + word_size word_event_chance = (word_size * 2 + party_size) if random.randint(1, 1000)", "player.get_party_id() != self.get_id(): # player is in another party already_partying_members.append(player.get_mention_tag()) else: if player.get_id()", "members_added = [] already_partying_members = [] for member in members: player = Player(member)", "Player.load(player_id) for letter in player.get_letters(): party_letters.add(letter) list_party_letters = list(party_letters) list_party_letters.sort() return list_party_letters def", "self.get_members(): player = Player.load(player_id) player_names.append(player.get_mention_tag()) return StringUtil.readable_list(player_names) def get_letters(self) -> list: party_letters =", "from .player import Player from .util.string_util import StringUtil from .util.datastore import save_party, load_party,", "1000\\n\" for player_id in self.get_members(): player = Player.load(player_id) player_leveled_up = player.add_points_and_check_for_levelup(word_points) if player_leveled_up:", "the word {word}; you don't have the letter(s) {StringUtil.readable_list(missing_letters, 'bold')}\" # Calculate points", "= Player.load(player_id) player_leveled_up = player.add_points_and_check_for_levelup(word_points) if player_leveled_up: msg += f\"{player.get_mention_tag()} is now level", "{self.party_id}: {str(self.state['members'])}\") def save_state(self): save_party(self.party_id, self.state) def load_state(self): self.state = load_party(self.party_id) def get_id(self)", "in word player.add_letter_xp(letter, 1) player.remove_letters(letters) msg += f\"you formed the word '{word}'\\n{'everyone' if", "<reponame>maxsaltonstall/letters-with-strangers<filename>bot/models/party.py import logging, uuid, random from .dictionary import Dictionary from .player import Player", "self.state = load_party(self.party_id) def get_id(self) -> int: return self.party_id def add_members(self, members) ->", "[] for player_id in self.get_members(): player = Player.load(player_id) player_names.append(player.get_mention_tag()) return StringUtil.readable_list(player_names) def get_letters(self)", "length of word and size of party word_money = party_size * party_size +", "return list_party_letters def make_word(self, word: str, dictionary: Dictionary) -> str: msg = \"\"", "* 2 + party_size) if random.randint(1, 1000) < word_event_chance: msg += f\"You got", "player xp for each letter in word player.add_letter_xp(letter, 1) player.remove_letters(letters) msg += f\"you", "self.save_state() if len(self.get_members()) <= 1: disband_party(self.get_id()) else: # this is a corrupted party;", "the letter(s) {StringUtil.readable_list(missing_letters, 'bold')}\" # Calculate points and money gained party_size = len(self.get_members())", "letter not in self.get_letters(): missing_letters.append(letter) missing_letters = list(set(missing_letters)) missing_letters.sort() if len(missing_letters): return f\"unable", "spell the word {word}; you don't have the letter(s) {StringUtil.readable_list(missing_letters, 'bold')}\" # Calculate", "= list(set(missing_letters)) missing_letters.sort() if len(missing_letters): return f\"unable to spell the word {word}; you", "{str(self.state['members'])}\") def save_state(self): save_party(self.party_id, self.state) def load_state(self): self.state = load_party(self.party_id) def get_id(self) ->", "logging.info(f\"initialized party {self.party_id}: {str(self.state['members'])}\") def save_state(self): save_party(self.party_id, self.state) def load_state(self): self.state = load_party(self.party_id)", "return self.party_id def add_members(self, members) -> str: members_added = [] already_partying_members = []", "party. \" if len(already_partying_members): msg += f\"Couldn't add {StringUtil.readable_list(already_partying_members)} -- they're already in", "they're already in another party!\" return msg def remove_member(self, member_id: int): if self.state", "{StringUtil.readable_list(missing_letters, 'bold')}\" # Calculate points and money gained party_size = len(self.get_members()) word_size =", "= len(self.get_members()) word_size = len(word) word_points = (word_size - 3) * (word_size -", "list: return self.state[\"members\"] def get_members_as_string(self) -> str: player_names = [] for player_id in", "from .util.string_util import StringUtil from .util.datastore import save_party, load_party, disband_party class Party: def", "each letter in word player.add_letter_xp(letter, 1) player.remove_letters(letters) msg += f\"you formed the word", "self.save_state() logging.info(f\"initialized party {self.party_id}: {str(self.state['members'])}\") def save_state(self): save_party(self.party_id, self.state) def load_state(self): self.state =", "self.party_id def add_members(self, members) -> str: members_added = [] already_partying_members = [] for", "return f\"unable to spell the word {word}; you don't have the letter(s) {StringUtil.readable_list(missing_letters,", "with chance {word_event_chance} out of 1000\\n\" for player_id in self.get_members(): player = Player.load(player_id)", "formed the word '{word}'\\n{'everyone' if len(self.get_members()) > 1 else 'and'} scored {word_points} points", "self.state[\"members\"] def get_members_as_string(self) -> str: player_names = [] for player_id in self.get_members(): player", "class Party: def __init__(self, party_id: str = ''): if party_id: self.party_id = party_id", "my vocabulary!\" letters = list(word) missing_letters = [] for letter in letters: if", "+ party_size) if random.randint(1, 1000) < word_event_chance: msg += f\"You got a random", "add xp/score/points based on length of word and size of party word_money =", "player is in another party already_partying_members.append(player.get_mention_tag()) else: if player.get_id() not in self.state[\"members\"]: self.state[\"members\"].append(player.get_id())", "player_id in self.get_members(): player = Player.load(player_id) for letter in player.get_letters(): party_letters.add(letter) list_party_letters =", "return msg def remove_member(self, member_id: int): if self.state and \"members\" in self.state: self.state[\"members\"].remove(member_id)", "for each letter in word player.add_letter_xp(letter, 1) player.remove_letters(letters) msg += f\"you formed the", "and money gained party_size = len(self.get_members()) word_size = len(word) word_points = (word_size -", "member in members: player = Player(member) if player.get_party_id() and player.get_party_id() != self.get_id(): #", "-> list: party_letters = set() for player_id in self.get_members(): player = Player.load(player_id) for", "party word_money = party_size * party_size + word_size word_event_chance = (word_size * 2", "1: disband_party(self.get_id()) else: # this is a corrupted party; purge it disband_party(self.get_id()) def", "party_id self.state = load_party(self.party_id) else: self.state = {} self.state[\"members\"] = [] self.party_id =", "else: self.state = {} self.state[\"members\"] = [] self.party_id = uuid.uuid4().hex self.save_state() logging.info(f\"initialized party", "get_members(self) -> list: return self.state[\"members\"] def get_members_as_string(self) -> str: player_names = [] for", "-> str: msg = \"\" if not dictionary.check_word(word): logging.info(f\"Word '{word}' not found in", "Dictionary) -> str: msg = \"\" if not dictionary.check_word(word): logging.info(f\"Word '{word}' not found", "party_letters.add(letter) list_party_letters = list(party_letters) list_party_letters.sort() return list_party_letters def make_word(self, word: str, dictionary: Dictionary)", "''): if party_id: self.party_id = party_id self.state = load_party(self.party_id) else: self.state = {}", "def get_members(self) -> list: return self.state[\"members\"] def get_members_as_string(self) -> str: player_names = []", "len(self.get_members()) word_size = len(word) word_points = (word_size - 3) * (word_size - 2)", "= Player.load(player_id) player_names.append(player.get_mention_tag()) return StringUtil.readable_list(player_names) def get_letters(self) -> list: party_letters = set() for", "scored {word_points} points and received {word_money} glyphs\\n\" if party_size <= 1: disband_party(self.party_id) return", "return self.state[\"members\"] def get_members_as_string(self) -> str: player_names = [] for player_id in self.get_members():", "word '{word}'\\n{'everyone' if len(self.get_members()) > 1 else 'and'} scored {word_points} points and received", "list(set(missing_letters)) missing_letters.sort() if len(missing_letters): return f\"unable to spell the word {word}; you don't", "player.remove_letters(letters) msg += f\"you formed the word '{word}'\\n{'everyone' if len(self.get_members()) > 1 else", "party_id: self.party_id = party_id self.state = load_party(self.party_id) else: self.state = {} self.state[\"members\"] =", "another party already_partying_members.append(player.get_mention_tag()) else: if player.get_id() not in self.state[\"members\"]: self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag()) self.save_state()", "self.state[\"members\"] = [] self.party_id = uuid.uuid4().hex self.save_state() logging.info(f\"initialized party {self.party_id}: {str(self.state['members'])}\") def save_state(self):", "len(word) word_points = (word_size - 3) * (word_size - 2) + party_size #", "already in another party!\" return msg def remove_member(self, member_id: int): if self.state and", "= Player(member) if player.get_party_id() and player.get_party_id() != self.get_id(): # player is in another", "self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag()) self.save_state() msg = \"\" if len(members_added): msg += f\"Added {StringUtil.readable_list(members_added)}", "for letter in letters: # give each player xp for each letter in", "uuid.uuid4().hex self.save_state() logging.info(f\"initialized party {self.party_id}: {str(self.state['members'])}\") def save_state(self): save_party(self.party_id, self.state) def load_state(self): self.state", "{dictionary}\") return f\"Sorry, the word '{word}' isn't in my vocabulary!\" letters = list(word)", "\"\" if not dictionary.check_word(word): logging.info(f\"Word '{word}' not found in dictionary {dictionary}\") return f\"Sorry,", "for member in members: player = Player(member) if player.get_party_id() and player.get_party_id() != self.get_id():", "load_state(self): self.state = load_party(self.party_id) def get_id(self) -> int: return self.party_id def add_members(self, members)", "set() for player_id in self.get_members(): player = Player.load(player_id) for letter in player.get_letters(): party_letters.add(letter)", "points and money gained party_size = len(self.get_members()) word_size = len(word) word_points = (word_size", "and \"members\" in self.state: self.state[\"members\"].remove(member_id) self.save_state() if len(self.get_members()) <= 1: disband_party(self.get_id()) else: #", "player = Player.load(player_id) player_names.append(player.get_mention_tag()) return StringUtil.readable_list(player_names) def get_letters(self) -> list: party_letters = set()", "from .dictionary import Dictionary from .player import Player from .util.string_util import StringUtil from", "list_party_letters def make_word(self, word: str, dictionary: Dictionary) -> str: msg = \"\" if", "load_party(self.party_id) else: self.state = {} self.state[\"members\"] = [] self.party_id = uuid.uuid4().hex self.save_state() logging.info(f\"initialized", "the word '{word}' isn't in my vocabulary!\" letters = list(word) missing_letters = []", "in letters: # give each player xp for each letter in word player.add_letter_xp(letter,", "[] for member in members: player = Player(member) if player.get_party_id() and player.get_party_id() !=", "is now level {player_leveled_up}! :rocket:\\n\" player.add_money(word_money) for letter in letters: # give each", "player_id in self.get_members(): player = Player.load(player_id) player_names.append(player.get_mention_tag()) return StringUtil.readable_list(player_names) def get_letters(self) -> list:", "list(party_letters) list_party_letters.sort() return list_party_letters def make_word(self, word: str, dictionary: Dictionary) -> str: msg", "{StringUtil.readable_list(already_partying_members)} -- they're already in another party!\" return msg def remove_member(self, member_id: int):", "= party_size * party_size + word_size word_event_chance = (word_size * 2 + party_size)", "if party_size <= 1: disband_party(self.party_id) return msg def __str__(self): return f\"Party members: {self.get_members_as_string()}\"", "chance {word_event_chance} out of 1000\\n\" for player_id in self.get_members(): player = Player.load(player_id) player_leveled_up", "for player_id in self.get_members(): player = Player.load(player_id) for letter in player.get_letters(): party_letters.add(letter) list_party_letters", "= load_party(self.party_id) else: self.state = {} self.state[\"members\"] = [] self.party_id = uuid.uuid4().hex self.save_state()", "1) player.remove_letters(letters) msg += f\"you formed the word '{word}'\\n{'everyone' if len(self.get_members()) > 1", "logging.info(f\"Word '{word}' not found in dictionary {dictionary}\") return f\"Sorry, the word '{word}' isn't", "'{word}'\\n{'everyone' if len(self.get_members()) > 1 else 'and'} scored {word_points} points and received {word_money}", "found in dictionary {dictionary}\") return f\"Sorry, the word '{word}' isn't in my vocabulary!\"", "f\"Sorry, the word '{word}' isn't in my vocabulary!\" letters = list(word) missing_letters =", "len(self.get_members()) > 1 else 'and'} scored {word_points} points and received {word_money} glyphs\\n\" if", "int: return self.party_id def add_members(self, members) -> str: members_added = [] already_partying_members =", "out of 1000\\n\" for player_id in self.get_members(): player = Player.load(player_id) player_leveled_up = player.add_points_and_check_for_levelup(word_points)", "size of party word_money = party_size * party_size + word_size word_event_chance = (word_size", "word '{word}' isn't in my vocabulary!\" letters = list(word) missing_letters = [] for", "disband_party class Party: def __init__(self, party_id: str = ''): if party_id: self.party_id =", "# add xp/score/points based on length of word and size of party word_money", "self.state: self.state[\"members\"].remove(member_id) self.save_state() if len(self.get_members()) <= 1: disband_party(self.get_id()) else: # this is a", "= (word_size * 2 + party_size) if random.randint(1, 1000) < word_event_chance: msg +=", "import logging, uuid, random from .dictionary import Dictionary from .player import Player from", "int): if self.state and \"members\" in self.state: self.state[\"members\"].remove(member_id) self.save_state() if len(self.get_members()) <= 1:", "Player.load(player_id) player_leveled_up = player.add_points_and_check_for_levelup(word_points) if player_leveled_up: msg += f\"{player.get_mention_tag()} is now level {player_leveled_up}!", "dictionary: Dictionary) -> str: msg = \"\" if not dictionary.check_word(word): logging.info(f\"Word '{word}' not", "def get_id(self) -> int: return self.party_id def add_members(self, members) -> str: members_added =", "{word}; you don't have the letter(s) {StringUtil.readable_list(missing_letters, 'bold')}\" # Calculate points and money", "word {word}; you don't have the letter(s) {StringUtil.readable_list(missing_letters, 'bold')}\" # Calculate points and", "add_members(self, members) -> str: members_added = [] already_partying_members = [] for member in", "event, with chance {word_event_chance} out of 1000\\n\" for player_id in self.get_members(): player =", "msg += f\"you formed the word '{word}'\\n{'everyone' if len(self.get_members()) > 1 else 'and'}", "word and size of party word_money = party_size * party_size + word_size word_event_chance", "\" if len(already_partying_members): msg += f\"Couldn't add {StringUtil.readable_list(already_partying_members)} -- they're already in another", "{StringUtil.readable_list(members_added)} to your party. \" if len(already_partying_members): msg += f\"Couldn't add {StringUtil.readable_list(already_partying_members)} --", "[] self.party_id = uuid.uuid4().hex self.save_state() logging.info(f\"initialized party {self.party_id}: {str(self.state['members'])}\") def save_state(self): save_party(self.party_id, self.state)", "glyphs\\n\" if party_size <= 1: disband_party(self.party_id) return msg def __str__(self): return f\"Party members:", "party!\" return msg def remove_member(self, member_id: int): if self.state and \"members\" in self.state:", "letters: # give each player xp for each letter in word player.add_letter_xp(letter, 1)", "str: player_names = [] for player_id in self.get_members(): player = Player.load(player_id) player_names.append(player.get_mention_tag()) return", "StringUtil from .util.datastore import save_party, load_party, disband_party class Party: def __init__(self, party_id: str", "-> list: return self.state[\"members\"] def get_members_as_string(self) -> str: player_names = [] for player_id", "another party!\" return msg def remove_member(self, member_id: int): if self.state and \"members\" in", "f\"You got a random event, with chance {word_event_chance} out of 1000\\n\" for player_id", "level {player_leveled_up}! :rocket:\\n\" player.add_money(word_money) for letter in letters: # give each player xp", "add {StringUtil.readable_list(already_partying_members)} -- they're already in another party!\" return msg def remove_member(self, member_id:", "letter in player.get_letters(): party_letters.add(letter) list_party_letters = list(party_letters) list_party_letters.sort() return list_party_letters def make_word(self, word:", "import Player from .util.string_util import StringUtil from .util.datastore import save_party, load_party, disband_party class", "Dictionary from .player import Player from .util.string_util import StringUtil from .util.datastore import save_party,", "got a random event, with chance {word_event_chance} out of 1000\\n\" for player_id in", "of party word_money = party_size * party_size + word_size word_event_chance = (word_size *", ":rocket:\\n\" player.add_money(word_money) for letter in letters: # give each player xp for each", "in self.state: self.state[\"members\"].remove(member_id) self.save_state() if len(self.get_members()) <= 1: disband_party(self.get_id()) else: # this is", "= [] already_partying_members = [] for member in members: player = Player(member) if", "word_event_chance: msg += f\"You got a random event, with chance {word_event_chance} out of", "+= f\"Couldn't add {StringUtil.readable_list(already_partying_members)} -- they're already in another party!\" return msg def", "party_size * party_size + word_size word_event_chance = (word_size * 2 + party_size) if", "self.state = load_party(self.party_id) else: self.state = {} self.state[\"members\"] = [] self.party_id = uuid.uuid4().hex", "you don't have the letter(s) {StringUtil.readable_list(missing_letters, 'bold')}\" # Calculate points and money gained", "list: party_letters = set() for player_id in self.get_members(): player = Player.load(player_id) for letter", "self.state = {} self.state[\"members\"] = [] self.party_id = uuid.uuid4().hex self.save_state() logging.info(f\"initialized party {self.party_id}:", "else: if player.get_id() not in self.state[\"members\"]: self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag()) self.save_state() msg = \"\"", "msg += f\"Couldn't add {StringUtil.readable_list(already_partying_members)} -- they're already in another party!\" return msg", "to spell the word {word}; you don't have the letter(s) {StringUtil.readable_list(missing_letters, 'bold')}\" #", "= {} self.state[\"members\"] = [] self.party_id = uuid.uuid4().hex self.save_state() logging.info(f\"initialized party {self.party_id}: {str(self.state['members'])}\")", "= list(word) missing_letters = [] for letter in letters: if letter not in", "have the letter(s) {StringUtil.readable_list(missing_letters, 'bold')}\" # Calculate points and money gained party_size =", "if len(already_partying_members): msg += f\"Couldn't add {StringUtil.readable_list(already_partying_members)} -- they're already in another party!\"", "len(missing_letters): return f\"unable to spell the word {word}; you don't have the letter(s)", "3) * (word_size - 2) + party_size # add xp/score/points based on length", "dictionary {dictionary}\") return f\"Sorry, the word '{word}' isn't in my vocabulary!\" letters =", "= [] for player_id in self.get_members(): player = Player.load(player_id) player_names.append(player.get_mention_tag()) return StringUtil.readable_list(player_names) def", "self.party_id = party_id self.state = load_party(self.party_id) else: self.state = {} self.state[\"members\"] = []", "if player_leveled_up: msg += f\"{player.get_mention_tag()} is now level {player_leveled_up}! :rocket:\\n\" player.add_money(word_money) for letter", "self.save_state() msg = \"\" if len(members_added): msg += f\"Added {StringUtil.readable_list(members_added)} to your party.", "player_names.append(player.get_mention_tag()) return StringUtil.readable_list(player_names) def get_letters(self) -> list: party_letters = set() for player_id in", "def make_word(self, word: str, dictionary: Dictionary) -> str: msg = \"\" if not", "list(word) missing_letters = [] for letter in letters: if letter not in self.get_letters():", "in letters: if letter not in self.get_letters(): missing_letters.append(letter) missing_letters = list(set(missing_letters)) missing_letters.sort() if", "player.add_letter_xp(letter, 1) player.remove_letters(letters) msg += f\"you formed the word '{word}'\\n{'everyone' if len(self.get_members()) >", "= len(word) word_points = (word_size - 3) * (word_size - 2) + party_size", "1000) < word_event_chance: msg += f\"You got a random event, with chance {word_event_chance}", "in self.get_members(): player = Player.load(player_id) player_names.append(player.get_mention_tag()) return StringUtil.readable_list(player_names) def get_letters(self) -> list: party_letters", "import StringUtil from .util.datastore import save_party, load_party, disband_party class Party: def __init__(self, party_id:", "player = Player(member) if player.get_party_id() and player.get_party_id() != self.get_id(): # player is in", "party_letters = set() for player_id in self.get_members(): player = Player.load(player_id) for letter in", "= \"\" if not dictionary.check_word(word): logging.info(f\"Word '{word}' not found in dictionary {dictionary}\") return", "dictionary.check_word(word): logging.info(f\"Word '{word}' not found in dictionary {dictionary}\") return f\"Sorry, the word '{word}'", "xp for each letter in word player.add_letter_xp(letter, 1) player.remove_letters(letters) msg += f\"you formed", "def __init__(self, party_id: str = ''): if party_id: self.party_id = party_id self.state =", "party_id: str = ''): if party_id: self.party_id = party_id self.state = load_party(self.party_id) else:", "'and'} scored {word_points} points and received {word_money} glyphs\\n\" if party_size <= 1: disband_party(self.party_id)", "self.state) def load_state(self): self.state = load_party(self.party_id) def get_id(self) -> int: return self.party_id def", "if len(self.get_members()) > 1 else 'and'} scored {word_points} points and received {word_money} glyphs\\n\"", "the word '{word}'\\n{'everyone' if len(self.get_members()) > 1 else 'and'} scored {word_points} points and", "disband_party(self.get_id()) else: # this is a corrupted party; purge it disband_party(self.get_id()) def get_members(self)", "missing_letters = [] for letter in letters: if letter not in self.get_letters(): missing_letters.append(letter)", "in self.get_letters(): missing_letters.append(letter) missing_letters = list(set(missing_letters)) missing_letters.sort() if len(missing_letters): return f\"unable to spell", "based on length of word and size of party word_money = party_size *", "if player.get_id() not in self.state[\"members\"]: self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag()) self.save_state() msg = \"\" if", "f\"Added {StringUtil.readable_list(members_added)} to your party. \" if len(already_partying_members): msg += f\"Couldn't add {StringUtil.readable_list(already_partying_members)}", "if letter not in self.get_letters(): missing_letters.append(letter) missing_letters = list(set(missing_letters)) missing_letters.sort() if len(missing_letters): return", "members: player = Player(member) if player.get_party_id() and player.get_party_id() != self.get_id(): # player is", "<= 1: disband_party(self.get_id()) else: # this is a corrupted party; purge it disband_party(self.get_id())", "already_partying_members = [] for member in members: player = Player(member) if player.get_party_id() and", "= [] for letter in letters: if letter not in self.get_letters(): missing_letters.append(letter) missing_letters", "str, dictionary: Dictionary) -> str: msg = \"\" if not dictionary.check_word(word): logging.info(f\"Word '{word}'", "player_id in self.get_members(): player = Player.load(player_id) player_leveled_up = player.add_points_and_check_for_levelup(word_points) if player_leveled_up: msg +=", "load_party, disband_party class Party: def __init__(self, party_id: str = ''): if party_id: self.party_id", "def get_members_as_string(self) -> str: player_names = [] for player_id in self.get_members(): player =", "get_members_as_string(self) -> str: player_names = [] for player_id in self.get_members(): player = Player.load(player_id)", "player = Player.load(player_id) for letter in player.get_letters(): party_letters.add(letter) list_party_letters = list(party_letters) list_party_letters.sort() return", "# Calculate points and money gained party_size = len(self.get_members()) word_size = len(word) word_points", "player = Player.load(player_id) player_leveled_up = player.add_points_and_check_for_levelup(word_points) if player_leveled_up: msg += f\"{player.get_mention_tag()} is now", "in dictionary {dictionary}\") return f\"Sorry, the word '{word}' isn't in my vocabulary!\" letters", "# give each player xp for each letter in word player.add_letter_xp(letter, 1) player.remove_letters(letters)", "player_names = [] for player_id in self.get_members(): player = Player.load(player_id) player_names.append(player.get_mention_tag()) return StringUtil.readable_list(player_names)", "in self.state[\"members\"]: self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag()) self.save_state() msg = \"\" if len(members_added): msg +=", "if player.get_party_id() and player.get_party_id() != self.get_id(): # player is in another party already_partying_members.append(player.get_mention_tag())", "+= f\"Added {StringUtil.readable_list(members_added)} to your party. \" if len(already_partying_members): msg += f\"Couldn't add", "letters = list(word) missing_letters = [] for letter in letters: if letter not", "isn't in my vocabulary!\" letters = list(word) missing_letters = [] for letter in", "def save_state(self): save_party(self.party_id, self.state) def load_state(self): self.state = load_party(self.party_id) def get_id(self) -> int:", "money gained party_size = len(self.get_members()) word_size = len(word) word_points = (word_size - 3)", "msg = \"\" if len(members_added): msg += f\"Added {StringUtil.readable_list(members_added)} to your party. \"", "missing_letters.append(letter) missing_letters = list(set(missing_letters)) missing_letters.sort() if len(missing_letters): return f\"unable to spell the word", "if len(self.get_members()) <= 1: disband_party(self.get_id()) else: # this is a corrupted party; purge", "(word_size * 2 + party_size) if random.randint(1, 1000) < word_event_chance: msg += f\"You", "msg += f\"{player.get_mention_tag()} is now level {player_leveled_up}! :rocket:\\n\" player.add_money(word_money) for letter in letters:", "* (word_size - 2) + party_size # add xp/score/points based on length of", "remove_member(self, member_id: int): if self.state and \"members\" in self.state: self.state[\"members\"].remove(member_id) self.save_state() if len(self.get_members())", "purge it disband_party(self.get_id()) def get_members(self) -> list: return self.state[\"members\"] def get_members_as_string(self) -> str:", "def add_members(self, members) -> str: members_added = [] already_partying_members = [] for member", "give each player xp for each letter in word player.add_letter_xp(letter, 1) player.remove_letters(letters) msg", "2 + party_size) if random.randint(1, 1000) < word_event_chance: msg += f\"You got a", "in another party already_partying_members.append(player.get_mention_tag()) else: if player.get_id() not in self.state[\"members\"]: self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag())", ".util.string_util import StringUtil from .util.datastore import save_party, load_party, disband_party class Party: def __init__(self,", "for letter in player.get_letters(): party_letters.add(letter) list_party_letters = list(party_letters) list_party_letters.sort() return list_party_letters def make_word(self,", "Player from .util.string_util import StringUtil from .util.datastore import save_party, load_party, disband_party class Party:", "[] for letter in letters: if letter not in self.get_letters(): missing_letters.append(letter) missing_letters =", "player.get_letters(): party_letters.add(letter) list_party_letters = list(party_letters) list_party_letters.sort() return list_party_letters def make_word(self, word: str, dictionary:", ".util.datastore import save_party, load_party, disband_party class Party: def __init__(self, party_id: str = ''):", "to your party. \" if len(already_partying_members): msg += f\"Couldn't add {StringUtil.readable_list(already_partying_members)} -- they're", "a corrupted party; purge it disband_party(self.get_id()) def get_members(self) -> list: return self.state[\"members\"] def", "letters: if letter not in self.get_letters(): missing_letters.append(letter) missing_letters = list(set(missing_letters)) missing_letters.sort() if len(missing_letters):", "self.state and \"members\" in self.state: self.state[\"members\"].remove(member_id) self.save_state() if len(self.get_members()) <= 1: disband_party(self.get_id()) else:", "self.get_members(): player = Player.load(player_id) for letter in player.get_letters(): party_letters.add(letter) list_party_letters = list(party_letters) list_party_letters.sort()", "load_party(self.party_id) def get_id(self) -> int: return self.party_id def add_members(self, members) -> str: members_added", "letter(s) {StringUtil.readable_list(missing_letters, 'bold')}\" # Calculate points and money gained party_size = len(self.get_members()) word_size", "player.get_id() not in self.state[\"members\"]: self.state[\"members\"].append(player.get_id()) player.set_party_id(self.get_id()) members_added.append(player.get_mention_tag()) self.save_state() msg = \"\" if len(members_added):", "each player xp for each letter in word player.add_letter_xp(letter, 1) player.remove_letters(letters) msg +=", "self.get_id(): # player is in another party already_partying_members.append(player.get_mention_tag()) else: if player.get_id() not in", "= [] self.party_id = uuid.uuid4().hex self.save_state() logging.info(f\"initialized party {self.party_id}: {str(self.state['members'])}\") def save_state(self): save_party(self.party_id," ]
[ "= 10 random_state = np.random.RandomState(1234567890) df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y':", "1, 2, 2]}) p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_jitter(size=10, color='red',", "= geom_point(position=position_jitter) assert isinstance(position.from_geom(geom), position_jitter) def test_dodge_empty_data(): empty_df = pd.DataFrame({'x': [], 'y': []})", "+ geom_point(size=10, color='red', position=position_nudge(.25, .25))) assert p + _theme == 'nudge' def test_stack():", "(ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot(position='dodge2', size=2)) assert p + _theme == 'dodge2'", "'y', color='c')) + geom_boxplot( position=position_dodge2(preserve='single'), varwidth=True, size=2) ) assert p + _theme ==", "_theme = theme(subplots_adjust={'right': 0.85}) def test_jitter(): df1 = pd.DataFrame({'x': [1, 2, 1, 2],", "np.random.RandomState(1234567890) df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]})", "2, 1, 2], 'y': [1, 1, 2, 2]}) p = (ggplot(df1, aes('x', 'y'))", "n = 6 m = 10 random_state = np.random.RandomState(1234567890) df1 = pd.DataFrame({'x': [1,", "p = (ggplot(df, aes('x', 'y', fill='letters')) + geom_point(size=10, fill='black') + geom_point(size=10, position=position)) assert", "'stack' def test_stack_negative(): df = df1.copy() _loc = df.columns.get_loc df.iloc[0, _loc('y')] *= -1", "df.iloc[len(df)-1, _loc('y')] *= -1 p = (ggplot(df) + geom_col(aes('factor(x)', 'y', fill='factor(y)'), position='stack') +", "assert p + _theme == 'stack-negative' def test_fill(): p = (ggplot(df2, aes('factor(z)')) +", "def test_position_from_geom(): geom = geom_point(position='jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position='position_jitter') assert isinstance(position.from_geom(geom),", "(ggplot(df) + geom_col(aes('factor(x)', 'y', fill='factor(y)'), position='stack') + geom_text(aes('factor(x)', 'y', label='y'), position=position_stack(vjust=0.5)) ) assert", "'y')) + geom_point(size=10) + geom_jitter(size=10, color='red', random_state=random_state) + geom_jitter(size=10, color='blue', width=0.1, height=0.1, random_state=random_state))", "aes(y=after_stat('count'), label=after_stat('count')), stat='count', position=d, va='bottom') ) assert p + _theme == 'dodge_preserve_single_text' def", "fill='factor(y)'), position='stack') + geom_text(aes('factor(x)', 'y', label='y'), position=position_stack(vjust=0.5)) ) assert p + _theme ==", "+ geom_text(aes('factor(x)', 'y', label='y'), position=position_stack(vjust=0.5)) ) assert p + _theme == 'stack-negative' def", "isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position='position_jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter()) assert isinstance(position.from_geom(geom),", "def test_dodge_empty_data(): empty_df = pd.DataFrame({'x': [], 'y': []}) p = (ggplot(df1, aes('x', 'y'))", "assert p + _theme == 'jitterdodge' def test_position_from_geom(): geom = geom_point(position='jitter') assert isinstance(position.from_geom(geom),", "= position_dodge(preserve='single', width=0.9) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=d) + geom_text( aes(y=after_stat('count'),", "= (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot(position='dodge2', size=2)) assert p + _theme ==", "p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_jitter(size=10, color='red', random_state=random_state) + geom_jitter(size=10,", "size=2) ) assert p + _theme == 'dodge2_varwidth' def test_jitterdodge(): df = pd.DataFrame({", "color='c')) + geom_boxplot(position='dodge2', size=2)) assert p + _theme == 'dodge2' def test_dodge2_varwidth(): p", "+ _theme == 'dodge' def test_dodge_preserve_single(): df1 = pd.DataFrame({'x': ['a', 'b', 'b'], 'y':", "== 'dodge2_varwidth' def test_jitterdodge(): df = pd.DataFrame({ 'x': np.ones(n*2), 'y': np.repeat(np.arange(n), 2), 'letters':", "geom = geom_point(position=position_jitter) assert isinstance(position.from_geom(geom), position_jitter) def test_dodge_empty_data(): empty_df = pd.DataFrame({'x': [], 'y':", "[1, 1, 2, 2]}) df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)), 'z': np.repeat(range(n//2), range(3, n*2,", "pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]}) df2 = pd.DataFrame({'x':", "geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme) from", "np.repeat(range(n//2), range(3, n*2, 4))}) df3 = pd.DataFrame({ 'x': random_state.choice(['A', 'B'], n*m), 'y': random_state.randint(0,", "position='stack') + geom_text(aes('factor(x)', 'y', label='y'), position=position_stack(vjust=0.5)) ) assert p + _theme == 'stack-negative'", "*= -1 p = (ggplot(df) + geom_col(aes('factor(x)', 'y', fill='factor(y)'), position='stack') + geom_text(aes('factor(x)', 'y',", "color='red', random_state=random_state) + geom_jitter(size=10, color='blue', width=0.1, height=0.1, random_state=random_state)) assert p + _theme ==", "_theme == 'dodge2_varwidth' def test_jitterdodge(): df = pd.DataFrame({ 'x': np.ones(n*2), 'y': np.repeat(np.arange(n), 2),", "p + _theme == 'stack-negative' def test_fill(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'),", "string import numpy as np import pandas as pd import pytest from plotnine", "False], n*m) }) random_state.seed(1234567890) _theme = theme(subplots_adjust={'right': 0.85}) def test_jitter(): df1 = pd.DataFrame({'x':", "= pd.DataFrame({ 'x': np.ones(n*2), 'y': np.repeat(np.arange(n), 2), 'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)}) position = position_jitterdodge(random_state=random_state)", "[1, 2, 1, 2], 'y': [1, 1, 2, 2]}) p = (ggplot(df1, aes('x',", "position_jitter, position_jitterdodge, position_nudge, position_stack, theme) from plotnine.positions.position import position from plotnine.exceptions import PlotnineError", "assert isinstance(position.from_geom(geom), position_jitter) def test_dodge_empty_data(): empty_df = pd.DataFrame({'x': [], 'y': []}) p =", "'fill' def test_dodge(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='dodge')) assert p +", "= pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)), 'z': np.repeat(range(n//2), range(3, n*2, 4))}) df3 = pd.DataFrame({ 'x':", "= (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='dodge')) assert p + _theme == 'dodge' def", "def test_jitterdodge(): df = pd.DataFrame({ 'x': np.ones(n*2), 'y': np.repeat(np.arange(n), 2), 'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)})", "position from plotnine.exceptions import PlotnineError n = 6 m = 10 random_state =", "d = position_dodge(preserve='single', width=0.9) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=d) + geom_text(", "position_jitterdodge(random_state=random_state) p = (ggplot(df, aes('x', 'y', fill='letters')) + geom_point(size=10, fill='black') + geom_point(size=10, position=position))", "aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='dodge')) assert p + _theme == 'dodge' def test_dodge_preserve_single(): df1", "va='bottom') ) assert p + _theme == 'dodge_preserve_single_text' def test_dodge2(): p = (ggplot(df3,", "df1 = pd.DataFrame({'x': ['a', 'b', 'b'], 'y': ['a', 'a', 'b']}) p = (ggplot(df1,", "aes('x', fill='y')) + geom_bar(position=position_dodge(preserve='single'))) assert p + _theme == 'dodge_preserve_single' def test_dodge_preserve_single_text(): df1", "'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)}) position = position_jitterdodge(random_state=random_state) p = (ggplot(df, aes('x', 'y', fill='letters')) +", "'b']}) d = position_dodge(preserve='single', width=0.9) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=d) +", "2]}) p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_jitter(size=10, color='red', random_state=random_state) +", "== 'dodge_preserve_single' def test_dodge_preserve_single_text(): df1 = pd.DataFrame({'x': ['a', 'b', 'b', 'b'], 'y': ['a',", "def test_stack(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='stack')) assert p + _theme", "geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme) from plotnine.positions.position import", "random_state.choice(['A', 'B'], n*m), 'y': random_state.randint(0, 20, n*m), 'c': random_state.choice([False, False, True, False], n*m)", "test_dodge_preserve_single(): df1 = pd.DataFrame({'x': ['a', 'b', 'b'], 'y': ['a', 'a', 'b']}) p =", "0.85}) def test_jitter(): df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1,", "geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme) from plotnine.positions.position import position", "size=2)) assert p + _theme == 'dodge2' def test_dodge2_varwidth(): p = (ggplot(df3, aes('x',", "random_state=random_state)) assert p + _theme == 'jitter' with pytest.raises(PlotnineError): geom_jitter(position=position_jitter(), width=0.1) def test_nudge():", "def test_dodge(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='dodge')) assert p + _theme", "geom = geom_point(position='position_jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter()) assert isinstance(position.from_geom(geom), position_jitter) geom", "np.repeat(range(n+1), range(n+1)), 'z': np.repeat(range(n//2), range(3, n*2, 4))}) df3 = pd.DataFrame({ 'x': random_state.choice(['A', 'B'],", "p + _theme == 'dodge_preserve_single_text' def test_dodge2(): p = (ggplot(df3, aes('x', 'y', color='c'))", "+ geom_point(size=10, fill='black') + geom_point(size=10, position=position)) assert p + _theme == 'jitterdodge' def", "test_stack(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='stack')) assert p + _theme ==", "df = df1.copy() _loc = df.columns.get_loc df.iloc[0, _loc('y')] *= -1 df.iloc[len(df)-1, _loc('y')] *=", "def test_jitter(): df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1, 2,", "'b']}) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=position_dodge(preserve='single'))) assert p + _theme ==", "fill='y')) + geom_bar(position=position_dodge(preserve='single'))) assert p + _theme == 'dodge_preserve_single' def test_dodge_preserve_single_text(): df1 =", "+ _theme == 'dodge2' def test_dodge2_varwidth(): p = (ggplot(df3, aes('x', 'y', color='c')) +", "_theme == 'dodge_preserve_single' def test_dodge_preserve_single_text(): df1 = pd.DataFrame({'x': ['a', 'b', 'b', 'b'], 'y':", "empty_df = pd.DataFrame({'x': [], 'y': []}) p = (ggplot(df1, aes('x', 'y')) + geom_point()", "(ggplot(df, aes('x', 'y', fill='letters')) + geom_point(size=10, fill='black') + geom_point(size=10, position=position)) assert p +", "assert p + _theme == 'stack' def test_stack_negative(): df = df1.copy() _loc =", "p + _theme == 'stack' def test_stack_negative(): df = df1.copy() _loc = df.columns.get_loc", "p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=d) + geom_text( aes(y=after_stat('count'), label=after_stat('count')), stat='count', position=d,", "= (ggplot(df1, aes('x', fill='y')) + geom_bar(position=position_dodge(preserve='single'))) assert p + _theme == 'dodge_preserve_single' def", "import numpy as np import pandas as pd import pytest from plotnine import", "'dodge_preserve_single_text' def test_dodge2(): p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot(position='dodge2', size=2)) assert", "p + _theme == 'jitter' with pytest.raises(PlotnineError): geom_jitter(position=position_jitter(), width=0.1) def test_nudge(): p =", "2, 2]}) df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)), 'z': np.repeat(range(n//2), range(3, n*2, 4))}) df3", "[1, 2, 1, 2], 'y': [1, 1, 2, 2]}) df2 = pd.DataFrame({'x': np.repeat(range(n+1),", "pytest.raises(PlotnineError): geom_jitter(position=position_jitter(), width=0.1) def test_nudge(): p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) +", "from plotnine.exceptions import PlotnineError n = 6 m = 10 random_state = np.random.RandomState(1234567890)", "random_state = np.random.RandomState(1234567890) df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1,", "+ geom_bar(aes(fill='factor(x)'), position='fill')) assert p + _theme == 'fill' def test_dodge(): p =", "+ geom_text( aes(y=after_stat('count'), label=after_stat('count')), stat='count', position=d, va='bottom') ) assert p + _theme ==", "position_jitter) geom = geom_point(position='position_jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter()) assert isinstance(position.from_geom(geom), position_jitter)", "2], 'y': [1, 1, 2, 2]}) df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)), 'z': np.repeat(range(n//2),", "-1 p = (ggplot(df) + geom_col(aes('factor(x)', 'y', fill='factor(y)'), position='stack') + geom_text(aes('factor(x)', 'y', label='y'),", "2, 1, 2], 'y': [1, 1, 2, 2]}) df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)),", "+ geom_col(aes('factor(x)', 'y', fill='factor(y)'), position='stack') + geom_text(aes('factor(x)', 'y', label='y'), position=position_stack(vjust=0.5)) ) assert p", "(ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='dodge')) assert p + _theme == 'dodge' def test_dodge_preserve_single():", "== 'stack' def test_stack_negative(): df = df1.copy() _loc = df.columns.get_loc df.iloc[0, _loc('y')] *=", "= pd.DataFrame({ 'x': random_state.choice(['A', 'B'], n*m), 'y': random_state.randint(0, 20, n*m), 'c': random_state.choice([False, False,", ") assert p + _theme == 'dodge_preserve_single_text' def test_dodge2(): p = (ggplot(df3, aes('x',", "geom_col(aes('factor(x)', 'y', fill='factor(y)'), position='stack') + geom_text(aes('factor(x)', 'y', label='y'), position=position_stack(vjust=0.5)) ) assert p +", "width=0.9) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=d) + geom_text( aes(y=after_stat('count'), label=after_stat('count')), stat='count',", "'a', 'b']}) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=position_dodge(preserve='single'))) assert p + _theme", "test_jitter(): df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]})", "'b', 'b', 'b'], 'y': ['a', 'a', 'b', 'b']}) d = position_dodge(preserve='single', width=0.9) p", "np.repeat(np.arange(n), 2), 'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)}) position = position_jitterdodge(random_state=random_state) p = (ggplot(df, aes('x', 'y',", "True, False], n*m) }) random_state.seed(1234567890) _theme = theme(subplots_adjust={'right': 0.85}) def test_jitter(): df1 =", "'y': ['a', 'a', 'b']}) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=position_dodge(preserve='single'))) assert p", "['a', 'a', 'b']}) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=position_dodge(preserve='single'))) assert p +", "position_stack, theme) from plotnine.positions.position import position from plotnine.exceptions import PlotnineError n = 6", "test_dodge2_varwidth(): p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot( position=position_dodge2(preserve='single'), varwidth=True, size=2) )", "_theme == 'stack' def test_stack_negative(): df = df1.copy() _loc = df.columns.get_loc df.iloc[0, _loc('y')]", "+ _theme == 'dodge_preserve_single' def test_dodge_preserve_single_text(): df1 = pd.DataFrame({'x': ['a', 'b', 'b', 'b'],", "geom_text(aes('factor(x)', 'y', label='y'), position=position_stack(vjust=0.5)) ) assert p + _theme == 'stack-negative' def test_fill():", "label=after_stat('count')), stat='count', position=d, va='bottom') ) assert p + _theme == 'dodge_preserve_single_text' def test_dodge2():", "== 'stack-negative' def test_fill(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='fill')) assert p", "assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter) assert isinstance(position.from_geom(geom), position_jitter) def test_dodge_empty_data(): empty_df =", "= (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='stack')) assert p + _theme == 'stack' def", "assert p + _theme == 'fill' def test_dodge(): p = (ggplot(df2, aes('factor(z)')) +", "+ geom_point(size=10, position=position)) assert p + _theme == 'jitterdodge' def test_position_from_geom(): geom =", "= (ggplot(df) + geom_col(aes('factor(x)', 'y', fill='factor(y)'), position='stack') + geom_text(aes('factor(x)', 'y', label='y'), position=position_stack(vjust=0.5)) )", "random_state=random_state) + geom_jitter(size=10, color='blue', width=0.1, height=0.1, random_state=random_state)) assert p + _theme == 'jitter'", "+ geom_jitter(size=10, color='blue', width=0.1, height=0.1, random_state=random_state)) assert p + _theme == 'jitter' with", "plotnine.positions.position import position from plotnine.exceptions import PlotnineError n = 6 m = 10", "range(n+1)), 'z': np.repeat(range(n//2), range(3, n*2, 4))}) df3 = pd.DataFrame({ 'x': random_state.choice(['A', 'B'], n*m),", "2), 'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)}) position = position_jitterdodge(random_state=random_state) p = (ggplot(df, aes('x', 'y', fill='letters'))", "_theme == 'nudge' def test_stack(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='stack')) assert", "_theme == 'dodge_preserve_single_text' def test_dodge2(): p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot(position='dodge2',", "geom_point(size=10) + geom_jitter(size=10, color='red', random_state=random_state) + geom_jitter(size=10, color='blue', width=0.1, height=0.1, random_state=random_state)) assert p", "'y': []}) p = (ggplot(df1, aes('x', 'y')) + geom_point() + geom_rect( empty_df, aes(xmin='x',", "= (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_point(size=10, color='red', position=position_nudge(.25, .25))) assert p", "n*m), 'y': random_state.randint(0, 20, n*m), 'c': random_state.choice([False, False, True, False], n*m) }) random_state.seed(1234567890)", "position=position_stack(vjust=0.5)) ) assert p + _theme == 'stack-negative' def test_fill(): p = (ggplot(df2,", "position=d, va='bottom') ) assert p + _theme == 'dodge_preserve_single_text' def test_dodge2(): p =", "p = (ggplot(df1, aes('x', 'y')) + geom_point() + geom_rect( empty_df, aes(xmin='x', xmax='x+1', ymin='y',", "p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='fill')) assert p + _theme == 'fill'", "['a', 'b', 'b'], 'y': ['a', 'a', 'b']}) p = (ggplot(df1, aes('x', fill='y')) +", "with pytest.raises(PlotnineError): geom_jitter(position=position_jitter(), width=0.1) def test_nudge(): p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10)", "2]}) df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)), 'z': np.repeat(range(n//2), range(3, n*2, 4))}) df3 =", "height=0.1, random_state=random_state)) assert p + _theme == 'jitter' with pytest.raises(PlotnineError): geom_jitter(position=position_jitter(), width=0.1) def", "= pd.DataFrame({'x': ['a', 'b', 'b'], 'y': ['a', 'a', 'b']}) p = (ggplot(df1, aes('x',", "pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)), 'z': np.repeat(range(n//2), range(3, n*2, 4))}) df3 = pd.DataFrame({ 'x': random_state.choice(['A',", "+ geom_point(size=10) + geom_jitter(size=10, color='red', random_state=random_state) + geom_jitter(size=10, color='blue', width=0.1, height=0.1, random_state=random_state)) assert", "position = position_jitterdodge(random_state=random_state) p = (ggplot(df, aes('x', 'y', fill='letters')) + geom_point(size=10, fill='black') +", "as pd import pytest from plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col,", "4))}) df3 = pd.DataFrame({ 'x': random_state.choice(['A', 'B'], n*m), 'y': random_state.randint(0, 20, n*m), 'c':", "}) random_state.seed(1234567890) _theme = theme(subplots_adjust={'right': 0.85}) def test_jitter(): df1 = pd.DataFrame({'x': [1, 2,", "width=0.1, height=0.1, random_state=random_state)) assert p + _theme == 'jitter' with pytest.raises(PlotnineError): geom_jitter(position=position_jitter(), width=0.1)", "theme(subplots_adjust={'right': 0.85}) def test_jitter(): df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1,", "2)}) position = position_jitterdodge(random_state=random_state) p = (ggplot(df, aes('x', 'y', fill='letters')) + geom_point(size=10, fill='black')", "from plotnine.positions.position import position from plotnine.exceptions import PlotnineError n = 6 m =", "position='stack')) assert p + _theme == 'stack' def test_stack_negative(): df = df1.copy() _loc", "test_dodge(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='dodge')) assert p + _theme ==", "'y', label='y'), position=position_stack(vjust=0.5)) ) assert p + _theme == 'stack-negative' def test_fill(): p", "plotnine.exceptions import PlotnineError n = 6 m = 10 random_state = np.random.RandomState(1234567890) df1", "'nudge' def test_stack(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='stack')) assert p +", "df.columns.get_loc df.iloc[0, _loc('y')] *= -1 df.iloc[len(df)-1, _loc('y')] *= -1 p = (ggplot(df) +", "color='blue', width=0.1, height=0.1, random_state=random_state)) assert p + _theme == 'jitter' with pytest.raises(PlotnineError): geom_jitter(position=position_jitter(),", "10 random_state = np.random.RandomState(1234567890) df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1,", "varwidth=True, size=2) ) assert p + _theme == 'dodge2_varwidth' def test_jitterdodge(): df =", "= (ggplot(df, aes('x', 'y', fill='letters')) + geom_point(size=10, fill='black') + geom_point(size=10, position=position)) assert p", "geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge,", "6 m = 10 random_state = np.random.RandomState(1234567890) df1 = pd.DataFrame({'x': [1, 2, 1,", "geom_point(size=10) + geom_point(size=10, color='red', position=position_nudge(.25, .25))) assert p + _theme == 'nudge' def", "def test_dodge_preserve_single_text(): df1 = pd.DataFrame({'x': ['a', 'b', 'b', 'b'], 'y': ['a', 'a', 'b',", "'jitterdodge' def test_position_from_geom(): geom = geom_point(position='jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position='position_jitter') assert", "range(3, n*2, 4))}) df3 = pd.DataFrame({ 'x': random_state.choice(['A', 'B'], n*m), 'y': random_state.randint(0, 20,", ") assert p + _theme == 'stack-negative' def test_fill(): p = (ggplot(df2, aes('factor(z)'))", "_theme == 'stack-negative' def test_fill(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='fill')) assert", "'dodge_preserve_single' def test_dodge_preserve_single_text(): df1 = pd.DataFrame({'x': ['a', 'b', 'b', 'b'], 'y': ['a', 'a',", "def test_dodge2_varwidth(): p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot( position=position_dodge2(preserve='single'), varwidth=True, size=2)", "geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme) from plotnine.positions.position", "test_position_from_geom(): geom = geom_point(position='jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position='position_jitter') assert isinstance(position.from_geom(geom), position_jitter)", "theme) from plotnine.positions.position import position from plotnine.exceptions import PlotnineError n = 6 m", "aes('x', fill='y')) + geom_bar(position=d) + geom_text( aes(y=after_stat('count'), label=after_stat('count')), stat='count', position=d, va='bottom') ) assert", "assert p + _theme == 'dodge' def test_dodge_preserve_single(): df1 = pd.DataFrame({'x': ['a', 'b',", "plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge,", "= pd.DataFrame({'x': [], 'y': []}) p = (ggplot(df1, aes('x', 'y')) + geom_point() +", "= geom_point(position='jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position='position_jitter') assert isinstance(position.from_geom(geom), position_jitter) geom =", "import pytest from plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text,", "p + _theme == 'dodge2_varwidth' def test_jitterdodge(): df = pd.DataFrame({ 'x': np.ones(n*2), 'y':", "[1, 1, 2, 2]}) p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_jitter(size=10,", "geom_bar(aes(fill='factor(x)'), position='stack')) assert p + _theme == 'stack' def test_stack_negative(): df = df1.copy()", "aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='stack')) assert p + _theme == 'stack' def test_stack_negative(): df", "assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter()) assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter) assert", "+ _theme == 'stack-negative' def test_fill(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='fill'))", "+ _theme == 'jitterdodge' def test_position_from_geom(): geom = geom_point(position='jitter') assert isinstance(position.from_geom(geom), position_jitter) geom", "geom_point(position='jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position='position_jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter())", "= pd.DataFrame({'x': ['a', 'b', 'b', 'b'], 'y': ['a', 'a', 'b', 'b']}) d =", "PlotnineError n = 6 m = 10 random_state = np.random.RandomState(1234567890) df1 = pd.DataFrame({'x':", "aes('x', 'y')) + geom_point(size=10) + geom_jitter(size=10, color='red', random_state=random_state) + geom_jitter(size=10, color='blue', width=0.1, height=0.1,", "random_state.seed(1234567890) _theme = theme(subplots_adjust={'right': 0.85}) def test_jitter(): df1 = pd.DataFrame({'x': [1, 2, 1,", "aes('x', 'y')) + geom_point(size=10) + geom_point(size=10, color='red', position=position_nudge(.25, .25))) assert p + _theme", "geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme)", "test_dodge2(): p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot(position='dodge2', size=2)) assert p +", "+ geom_boxplot( position=position_dodge2(preserve='single'), varwidth=True, size=2) ) assert p + _theme == 'dodge2_varwidth' def", "= (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_jitter(size=10, color='red', random_state=random_state) + geom_jitter(size=10, color='blue',", "import PlotnineError n = 6 m = 10 random_state = np.random.RandomState(1234567890) df1 =", "geom_bar(aes(fill='factor(x)'), position='dodge')) assert p + _theme == 'dodge' def test_dodge_preserve_single(): df1 = pd.DataFrame({'x':", "'z': np.repeat(range(n//2), range(3, n*2, 4))}) df3 = pd.DataFrame({ 'x': random_state.choice(['A', 'B'], n*m), 'y':", "p + _theme == 'dodge' def test_dodge_preserve_single(): df1 = pd.DataFrame({'x': ['a', 'b', 'b'],", "isinstance(position.from_geom(geom), position_jitter) def test_dodge_empty_data(): empty_df = pd.DataFrame({'x': [], 'y': []}) p = (ggplot(df1,", "'b'], 'y': ['a', 'a', 'b']}) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=position_dodge(preserve='single'))) assert", "'B'], n*m), 'y': random_state.randint(0, 20, n*m), 'c': random_state.choice([False, False, True, False], n*m) })", "== 'fill' def test_dodge(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='dodge')) assert p", "= (ggplot(df1, aes('x', fill='y')) + geom_bar(position=d) + geom_text( aes(y=after_stat('count'), label=after_stat('count')), stat='count', position=d, va='bottom')", "'stack-negative' def test_fill(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='fill')) assert p +", "position_jitter) geom = geom_point(position=position_jitter()) assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter) assert isinstance(position.from_geom(geom), position_jitter)", "== 'dodge_preserve_single_text' def test_dodge2(): p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot(position='dodge2', size=2))", "numpy as np import pandas as pd import pytest from plotnine import (ggplot,", "position='fill')) assert p + _theme == 'fill' def test_dodge(): p = (ggplot(df2, aes('factor(z)'))", "_theme == 'fill' def test_dodge(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='dodge')) assert", "position_jitter) geom = geom_point(position=position_jitter) assert isinstance(position.from_geom(geom), position_jitter) def test_dodge_empty_data(): empty_df = pd.DataFrame({'x': [],", "+ geom_jitter(size=10, color='red', random_state=random_state) + geom_jitter(size=10, color='blue', width=0.1, height=0.1, random_state=random_state)) assert p +", "pytest from plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect,", "'b', 'b'], 'y': ['a', 'a', 'b', 'b']}) d = position_dodge(preserve='single', width=0.9) p =", "assert p + _theme == 'nudge' def test_stack(): p = (ggplot(df2, aes('factor(z)')) +", "np import pandas as pd import pytest from plotnine import (ggplot, aes, geom_point,", "-1 df.iloc[len(df)-1, _loc('y')] *= -1 p = (ggplot(df) + geom_col(aes('factor(x)', 'y', fill='factor(y)'), position='stack')", "fill='letters')) + geom_point(size=10, fill='black') + geom_point(size=10, position=position)) assert p + _theme == 'jitterdodge'", "import (ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2,", "== 'dodge2' def test_dodge2_varwidth(): p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot( position=position_dodge2(preserve='single'),", "aes('x', 'y')) + geom_point() + geom_rect( empty_df, aes(xmin='x', xmax='x+1', ymin='y', ymax='y+1'), position='dodge') )", "'y')) + geom_point(size=10) + geom_point(size=10, color='red', position=position_nudge(.25, .25))) assert p + _theme ==", "aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge,", "+ geom_bar(aes(fill='factor(x)'), position='stack')) assert p + _theme == 'stack' def test_stack_negative(): df =", "aes('x', 'y', color='c')) + geom_boxplot(position='dodge2', size=2)) assert p + _theme == 'dodge2' def", "df3 = pd.DataFrame({ 'x': random_state.choice(['A', 'B'], n*m), 'y': random_state.randint(0, 20, n*m), 'c': random_state.choice([False,", "aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='fill')) assert p + _theme == 'fill' def test_dodge(): p", "pd.DataFrame({'x': [], 'y': []}) p = (ggplot(df1, aes('x', 'y')) + geom_point() + geom_rect(", "geom_point(size=10, position=position)) assert p + _theme == 'jitterdodge' def test_position_from_geom(): geom = geom_point(position='jitter')", "isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter()) assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter) assert isinstance(position.from_geom(geom),", "df = pd.DataFrame({ 'x': np.ones(n*2), 'y': np.repeat(np.arange(n), 2), 'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)}) position =", "(ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='fill')) assert p + _theme == 'fill' def test_dodge():", "pd.DataFrame({ 'x': random_state.choice(['A', 'B'], n*m), 'y': random_state.randint(0, 20, n*m), 'c': random_state.choice([False, False, True,", "position='dodge')) assert p + _theme == 'dodge' def test_dodge_preserve_single(): df1 = pd.DataFrame({'x': ['a',", "geom_text( aes(y=after_stat('count'), label=after_stat('count')), stat='count', position=d, va='bottom') ) assert p + _theme == 'dodge_preserve_single_text'", "'c': random_state.choice([False, False, True, False], n*m) }) random_state.seed(1234567890) _theme = theme(subplots_adjust={'right': 0.85}) def", "= df1.copy() _loc = df.columns.get_loc df.iloc[0, _loc('y')] *= -1 df.iloc[len(df)-1, _loc('y')] *= -1", "np.ones(n*2), 'y': np.repeat(np.arange(n), 2), 'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)}) position = position_jitterdodge(random_state=random_state) p = (ggplot(df,", "assert p + _theme == 'dodge_preserve_single_text' def test_dodge2(): p = (ggplot(df3, aes('x', 'y',", "'y', color='c')) + geom_boxplot(position='dodge2', size=2)) assert p + _theme == 'dodge2' def test_dodge2_varwidth():", "(ggplot(df1, aes('x', 'y')) + geom_point() + geom_rect( empty_df, aes(xmin='x', xmax='x+1', ymin='y', ymax='y+1'), position='dodge')", "geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack,", "p + _theme == 'dodge2' def test_dodge2_varwidth(): p = (ggplot(df3, aes('x', 'y', color='c'))", "(ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_jitter(size=10, color='red', random_state=random_state) + geom_jitter(size=10, color='blue', width=0.1,", "geom_jitter(size=10, color='red', random_state=random_state) + geom_jitter(size=10, color='blue', width=0.1, height=0.1, random_state=random_state)) assert p + _theme", "p + _theme == 'fill' def test_dodge(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'),", "aes('x', 'y', color='c')) + geom_boxplot( position=position_dodge2(preserve='single'), varwidth=True, size=2) ) assert p + _theme", "'dodge2' def test_dodge2_varwidth(): p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot( position=position_dodge2(preserve='single'), varwidth=True,", "= (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot( position=position_dodge2(preserve='single'), varwidth=True, size=2) ) assert p", "test_dodge_preserve_single_text(): df1 = pd.DataFrame({'x': ['a', 'b', 'b', 'b'], 'y': ['a', 'a', 'b', 'b']})", "import position from plotnine.exceptions import PlotnineError n = 6 m = 10 random_state", "n*m) }) random_state.seed(1234567890) _theme = theme(subplots_adjust={'right': 0.85}) def test_jitter(): df1 = pd.DataFrame({'x': [1,", "'y')) + geom_point() + geom_rect( empty_df, aes(xmin='x', xmax='x+1', ymin='y', ymax='y+1'), position='dodge') ) p.draw_test()", "import string import numpy as np import pandas as pd import pytest from", "+ geom_bar(aes(fill='factor(x)'), position='dodge')) assert p + _theme == 'dodge' def test_dodge_preserve_single(): df1 =", "position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme) from plotnine.positions.position import position from plotnine.exceptions", "'b', 'b']}) d = position_dodge(preserve='single', width=0.9) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=d)", "20, n*m), 'c': random_state.choice([False, False, True, False], n*m) }) random_state.seed(1234567890) _theme = theme(subplots_adjust={'right':", "_theme == 'dodge' def test_dodge_preserve_single(): df1 = pd.DataFrame({'x': ['a', 'b', 'b'], 'y': ['a',", "= df.columns.get_loc df.iloc[0, _loc('y')] *= -1 df.iloc[len(df)-1, _loc('y')] *= -1 p = (ggplot(df)", "n*m), 'c': random_state.choice([False, False, True, False], n*m) }) random_state.seed(1234567890) _theme = theme(subplots_adjust={'right': 0.85})", "+ _theme == 'dodge2_varwidth' def test_jitterdodge(): df = pd.DataFrame({ 'x': np.ones(n*2), 'y': np.repeat(np.arange(n),", "'dodge2_varwidth' def test_jitterdodge(): df = pd.DataFrame({ 'x': np.ones(n*2), 'y': np.repeat(np.arange(n), 2), 'letters': np.repeat(list(string.ascii_lowercase[:n]),", "_theme == 'dodge2' def test_dodge2_varwidth(): p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot(", "assert p + _theme == 'dodge2_varwidth' def test_jitterdodge(): df = pd.DataFrame({ 'x': np.ones(n*2),", "position=position_nudge(.25, .25))) assert p + _theme == 'nudge' def test_stack(): p = (ggplot(df2,", "n*2, 4))}) df3 = pd.DataFrame({ 'x': random_state.choice(['A', 'B'], n*m), 'y': random_state.randint(0, 20, n*m),", "df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]}) p", "p + _theme == 'nudge' def test_stack(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'),", "import pandas as pd import pytest from plotnine import (ggplot, aes, geom_point, geom_jitter,", "def test_stack_negative(): df = df1.copy() _loc = df.columns.get_loc df.iloc[0, _loc('y')] *= -1 df.iloc[len(df)-1,", ") assert p + _theme == 'dodge2_varwidth' def test_jitterdodge(): df = pd.DataFrame({ 'x':", "geom_bar(position=d) + geom_text( aes(y=after_stat('count'), label=after_stat('count')), stat='count', position=d, va='bottom') ) assert p + _theme", "'y', fill='letters')) + geom_point(size=10, fill='black') + geom_point(size=10, position=position)) assert p + _theme ==", "assert p + _theme == 'dodge_preserve_single' def test_dodge_preserve_single_text(): df1 = pd.DataFrame({'x': ['a', 'b',", "df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)), 'z': np.repeat(range(n//2), range(3, n*2, 4))}) df3 = pd.DataFrame({", "def test_nudge(): p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_point(size=10, color='red', position=position_nudge(.25,", "'b', 'b'], 'y': ['a', 'a', 'b']}) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=position_dodge(preserve='single')))", "color='c')) + geom_boxplot( position=position_dodge2(preserve='single'), varwidth=True, size=2) ) assert p + _theme == 'dodge2_varwidth'", "geom_bar(position=position_dodge(preserve='single'))) assert p + _theme == 'dodge_preserve_single' def test_dodge_preserve_single_text(): df1 = pd.DataFrame({'x': ['a',", "np.repeat(list(string.ascii_lowercase[:n]), 2)}) position = position_jitterdodge(random_state=random_state) p = (ggplot(df, aes('x', 'y', fill='letters')) + geom_point(size=10,", "pd.DataFrame({'x': ['a', 'b', 'b', 'b'], 'y': ['a', 'a', 'b', 'b']}) d = position_dodge(preserve='single',", "(ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot( position=position_dodge2(preserve='single'), varwidth=True, size=2) ) assert p +", "p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='stack')) assert p + _theme == 'stack'", "[]}) p = (ggplot(df1, aes('x', 'y')) + geom_point() + geom_rect( empty_df, aes(xmin='x', xmax='x+1',", "pd.DataFrame({ 'x': np.ones(n*2), 'y': np.repeat(np.arange(n), 2), 'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)}) position = position_jitterdodge(random_state=random_state) p", "'x': np.ones(n*2), 'y': np.repeat(np.arange(n), 2), 'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)}) position = position_jitterdodge(random_state=random_state) p =", "p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='dodge')) assert p + _theme == 'dodge'", "'y': [1, 1, 2, 2]}) df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)), 'z': np.repeat(range(n//2), range(3,", "'y', fill='factor(y)'), position='stack') + geom_text(aes('factor(x)', 'y', label='y'), position=position_stack(vjust=0.5)) ) assert p + _theme", "p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot( position=position_dodge2(preserve='single'), varwidth=True, size=2) ) assert", "+ _theme == 'stack' def test_stack_negative(): df = df1.copy() _loc = df.columns.get_loc df.iloc[0,", "position_nudge, position_stack, theme) from plotnine.positions.position import position from plotnine.exceptions import PlotnineError n =", "p + _theme == 'dodge_preserve_single' def test_dodge_preserve_single_text(): df1 = pd.DataFrame({'x': ['a', 'b', 'b',", "pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]}) p = (ggplot(df1,", "test_nudge(): p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_point(size=10, color='red', position=position_nudge(.25, .25)))", "position_dodge(preserve='single', width=0.9) p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=d) + geom_text( aes(y=after_stat('count'), label=after_stat('count')),", "(ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='stack')) assert p + _theme == 'stack' def test_stack_negative():", "+ _theme == 'fill' def test_dodge(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='dodge'))", "position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme) from plotnine.positions.position import position from plotnine.exceptions import", "as np import pandas as pd import pytest from plotnine import (ggplot, aes,", "geom_point(size=10, fill='black') + geom_point(size=10, position=position)) assert p + _theme == 'jitterdodge' def test_position_from_geom():", "False, True, False], n*m) }) random_state.seed(1234567890) _theme = theme(subplots_adjust={'right': 0.85}) def test_jitter(): df1", "assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position='position_jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter()) assert", "= theme(subplots_adjust={'right': 0.85}) def test_jitter(): df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y':", "[], 'y': []}) p = (ggplot(df1, aes('x', 'y')) + geom_point() + geom_rect( empty_df,", "= geom_point(position=position_jitter()) assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter) assert isinstance(position.from_geom(geom), position_jitter) def test_dodge_empty_data():", "'jitter' with pytest.raises(PlotnineError): geom_jitter(position=position_jitter(), width=0.1) def test_nudge(): p = (ggplot(df1, aes('x', 'y')) +", "df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]}) df2", "+ geom_point(size=10) + geom_point(size=10, color='red', position=position_nudge(.25, .25))) assert p + _theme == 'nudge'", "+ geom_bar(position=position_dodge(preserve='single'))) assert p + _theme == 'dodge_preserve_single' def test_dodge_preserve_single_text(): df1 = pd.DataFrame({'x':", "+ _theme == 'jitter' with pytest.raises(PlotnineError): geom_jitter(position=position_jitter(), width=0.1) def test_nudge(): p = (ggplot(df1,", "pandas as pd import pytest from plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar,", "'dodge' def test_dodge_preserve_single(): df1 = pd.DataFrame({'x': ['a', 'b', 'b'], 'y': ['a', 'a', 'b']})", "'a', 'b', 'b']}) d = position_dodge(preserve='single', width=0.9) p = (ggplot(df1, aes('x', fill='y')) +", "['a', 'a', 'b', 'b']}) d = position_dodge(preserve='single', width=0.9) p = (ggplot(df1, aes('x', fill='y'))", "== 'jitter' with pytest.raises(PlotnineError): geom_jitter(position=position_jitter(), width=0.1) def test_nudge(): p = (ggplot(df1, aes('x', 'y'))", "geom_jitter(position=position_jitter(), width=0.1) def test_nudge(): p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_point(size=10,", "= (ggplot(df1, aes('x', 'y')) + geom_point() + geom_rect( empty_df, aes(xmin='x', xmax='x+1', ymin='y', ymax='y+1'),", "*= -1 df.iloc[len(df)-1, _loc('y')] *= -1 p = (ggplot(df) + geom_col(aes('factor(x)', 'y', fill='factor(y)'),", "position_jitter) def test_dodge_empty_data(): empty_df = pd.DataFrame({'x': [], 'y': []}) p = (ggplot(df1, aes('x',", "geom = geom_point(position=position_jitter()) assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter) assert isinstance(position.from_geom(geom), position_jitter) def", "geom_point(position=position_jitter()) assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter) assert isinstance(position.from_geom(geom), position_jitter) def test_dodge_empty_data(): empty_df", "2], 'y': [1, 1, 2, 2]}) p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10)", "_loc('y')] *= -1 p = (ggplot(df) + geom_col(aes('factor(x)', 'y', fill='factor(y)'), position='stack') + geom_text(aes('factor(x)',", "'b'], 'y': ['a', 'a', 'b', 'b']}) d = position_dodge(preserve='single', width=0.9) p = (ggplot(df1,", "+ geom_bar(position=d) + geom_text( aes(y=after_stat('count'), label=after_stat('count')), stat='count', position=d, va='bottom') ) assert p +", "assert p + _theme == 'jitter' with pytest.raises(PlotnineError): geom_jitter(position=position_jitter(), width=0.1) def test_nudge(): p", "= np.random.RandomState(1234567890) df1 = pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1, 2,", "test_jitterdodge(): df = pd.DataFrame({ 'x': np.ones(n*2), 'y': np.repeat(np.arange(n), 2), 'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)}) position", "'y': np.repeat(np.arange(n), 2), 'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)}) position = position_jitterdodge(random_state=random_state) p = (ggplot(df, aes('x',", "'y': [1, 1, 2, 2]}) p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) +", "_loc('y')] *= -1 df.iloc[len(df)-1, _loc('y')] *= -1 p = (ggplot(df) + geom_col(aes('factor(x)', 'y',", "geom_point(position=position_jitter) assert isinstance(position.from_geom(geom), position_jitter) def test_dodge_empty_data(): empty_df = pd.DataFrame({'x': [], 'y': []}) p", "geom_point(position='position_jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter()) assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter)", "p = (ggplot(df1, aes('x', fill='y')) + geom_bar(position=position_dodge(preserve='single'))) assert p + _theme == 'dodge_preserve_single'", "geom_point(size=10, color='red', position=position_nudge(.25, .25))) assert p + _theme == 'nudge' def test_stack(): p", "(ggplot(df1, aes('x', fill='y')) + geom_bar(position=position_dodge(preserve='single'))) assert p + _theme == 'dodge_preserve_single' def test_dodge_preserve_single_text():", "= (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='fill')) assert p + _theme == 'fill' def", "df1.copy() _loc = df.columns.get_loc df.iloc[0, _loc('y')] *= -1 df.iloc[len(df)-1, _loc('y')] *= -1 p", "test_stack_negative(): df = df1.copy() _loc = df.columns.get_loc df.iloc[0, _loc('y')] *= -1 df.iloc[len(df)-1, _loc('y')]", "test_fill(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='fill')) assert p + _theme ==", "p + _theme == 'jitterdodge' def test_position_from_geom(): geom = geom_point(position='jitter') assert isinstance(position.from_geom(geom), position_jitter)", "1, 2], 'y': [1, 1, 2, 2]}) p = (ggplot(df1, aes('x', 'y')) +", "geom_jitter(size=10, color='blue', width=0.1, height=0.1, random_state=random_state)) assert p + _theme == 'jitter' with pytest.raises(PlotnineError):", "= 6 m = 10 random_state = np.random.RandomState(1234567890) df1 = pd.DataFrame({'x': [1, 2,", "assert p + _theme == 'dodge2' def test_dodge2_varwidth(): p = (ggplot(df3, aes('x', 'y',", "random_state.choice([False, False, True, False], n*m) }) random_state.seed(1234567890) _theme = theme(subplots_adjust={'right': 0.85}) def test_jitter():", "== 'dodge' def test_dodge_preserve_single(): df1 = pd.DataFrame({'x': ['a', 'b', 'b'], 'y': ['a', 'a',", "= position_jitterdodge(random_state=random_state) p = (ggplot(df, aes('x', 'y', fill='letters')) + geom_point(size=10, fill='black') + geom_point(size=10,", "label='y'), position=position_stack(vjust=0.5)) ) assert p + _theme == 'stack-negative' def test_fill(): p =", "pd.DataFrame({'x': ['a', 'b', 'b'], 'y': ['a', 'a', 'b']}) p = (ggplot(df1, aes('x', fill='y'))", "['a', 'b', 'b', 'b'], 'y': ['a', 'a', 'b', 'b']}) d = position_dodge(preserve='single', width=0.9)", "+ _theme == 'nudge' def test_stack(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='stack'))", "df1 = pd.DataFrame({'x': ['a', 'b', 'b', 'b'], 'y': ['a', 'a', 'b', 'b']}) d", "= geom_point(position='position_jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter()) assert isinstance(position.from_geom(geom), position_jitter) geom =", "geom = geom_point(position='jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position='position_jitter') assert isinstance(position.from_geom(geom), position_jitter) geom", "(ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_point(size=10, color='red', position=position_nudge(.25, .25))) assert p +", "from plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat,", "def test_dodge2(): p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot(position='dodge2', size=2)) assert p", "p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_point(size=10, color='red', position=position_nudge(.25, .25))) assert", "_theme == 'jitter' with pytest.raises(PlotnineError): geom_jitter(position=position_jitter(), width=0.1) def test_nudge(): p = (ggplot(df1, aes('x',", "fill='y')) + geom_bar(position=d) + geom_text( aes(y=after_stat('count'), label=after_stat('count')), stat='count', position=d, va='bottom') ) assert p", "== 'nudge' def test_stack(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='stack')) assert p", "geom_boxplot( position=position_dodge2(preserve='single'), varwidth=True, size=2) ) assert p + _theme == 'dodge2_varwidth' def test_jitterdodge():", "+ _theme == 'dodge_preserve_single_text' def test_dodge2(): p = (ggplot(df3, aes('x', 'y', color='c')) +", "isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position=position_jitter) assert isinstance(position.from_geom(geom), position_jitter) def test_dodge_empty_data(): empty_df = pd.DataFrame({'x':", "after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme) from plotnine.positions.position import position from", ".25))) assert p + _theme == 'nudge' def test_stack(): p = (ggplot(df2, aes('factor(z)'))", "'y': random_state.randint(0, 20, n*m), 'c': random_state.choice([False, False, True, False], n*m) }) random_state.seed(1234567890) _theme", "m = 10 random_state = np.random.RandomState(1234567890) df1 = pd.DataFrame({'x': [1, 2, 1, 2],", "_loc = df.columns.get_loc df.iloc[0, _loc('y')] *= -1 df.iloc[len(df)-1, _loc('y')] *= -1 p =", "+ geom_boxplot(position='dodge2', size=2)) assert p + _theme == 'dodge2' def test_dodge2_varwidth(): p =", "stat='count', position=d, va='bottom') ) assert p + _theme == 'dodge_preserve_single_text' def test_dodge2(): p", "def test_dodge_preserve_single(): df1 = pd.DataFrame({'x': ['a', 'b', 'b'], 'y': ['a', 'a', 'b']}) p", "color='red', position=position_nudge(.25, .25))) assert p + _theme == 'nudge' def test_stack(): p =", "aes('x', 'y', fill='letters')) + geom_point(size=10, fill='black') + geom_point(size=10, position=position)) assert p + _theme", "position_jitterdodge, position_nudge, position_stack, theme) from plotnine.positions.position import position from plotnine.exceptions import PlotnineError n", "geom_bar(aes(fill='factor(x)'), position='fill')) assert p + _theme == 'fill' def test_dodge(): p = (ggplot(df2,", "def test_fill(): p = (ggplot(df2, aes('factor(z)')) + geom_bar(aes(fill='factor(x)'), position='fill')) assert p + _theme", "(ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter,", "fill='black') + geom_point(size=10, position=position)) assert p + _theme == 'jitterdodge' def test_position_from_geom(): geom", "test_dodge_empty_data(): empty_df = pd.DataFrame({'x': [], 'y': []}) p = (ggplot(df1, aes('x', 'y')) +", "pd import pytest from plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot,", "width=0.1) def test_nudge(): p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_point(size=10, color='red',", "'x': random_state.choice(['A', 'B'], n*m), 'y': random_state.randint(0, 20, n*m), 'c': random_state.choice([False, False, True, False],", "p = (ggplot(df3, aes('x', 'y', color='c')) + geom_boxplot(position='dodge2', size=2)) assert p + _theme", "1, 2, 2]}) df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)), 'z': np.repeat(range(n//2), range(3, n*2, 4))})", "2, 2]}) p = (ggplot(df1, aes('x', 'y')) + geom_point(size=10) + geom_jitter(size=10, color='red', random_state=random_state)", "p = (ggplot(df) + geom_col(aes('factor(x)', 'y', fill='factor(y)'), position='stack') + geom_text(aes('factor(x)', 'y', label='y'), position=position_stack(vjust=0.5))", "= pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]}) df2 =", "'y': ['a', 'a', 'b', 'b']}) d = position_dodge(preserve='single', width=0.9) p = (ggplot(df1, aes('x',", "random_state.randint(0, 20, n*m), 'c': random_state.choice([False, False, True, False], n*m) }) random_state.seed(1234567890) _theme =", "(ggplot(df1, aes('x', fill='y')) + geom_bar(position=d) + geom_text( aes(y=after_stat('count'), label=after_stat('count')), stat='count', position=d, va='bottom') )", "= pd.DataFrame({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]}) p =", "position=position)) assert p + _theme == 'jitterdodge' def test_position_from_geom(): geom = geom_point(position='jitter') assert", "1, 2], 'y': [1, 1, 2, 2]}) df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)), 'z':", "df.iloc[0, _loc('y')] *= -1 df.iloc[len(df)-1, _loc('y')] *= -1 p = (ggplot(df) + geom_col(aes('factor(x)',", "_theme == 'jitterdodge' def test_position_from_geom(): geom = geom_point(position='jitter') assert isinstance(position.from_geom(geom), position_jitter) geom =", "position=position_dodge2(preserve='single'), varwidth=True, size=2) ) assert p + _theme == 'dodge2_varwidth' def test_jitterdodge(): df", "geom_boxplot(position='dodge2', size=2)) assert p + _theme == 'dodge2' def test_dodge2_varwidth(): p = (ggplot(df3,", "== 'jitterdodge' def test_position_from_geom(): geom = geom_point(position='jitter') assert isinstance(position.from_geom(geom), position_jitter) geom = geom_point(position='position_jitter')" ]
[ "make list of 'v_|l' last names, which can possibly have multiple # tokens", "[etal] names = name_name_sep.join(ls) if lower_name: names = names.lower() format_dict['names'] = names year", "key == 'crossref': try: val = val['citekey'] #might be an entry except TypeError:", "it looks like a macro, but is not: could be a regular entry", "buffer ): \"\"\"Process a bibentry field and return tuple of name, value.\"\"\" (tag,start,stop,subtags)", "useful for .bib files? \"\"\" bibfile_logger.debug(\"BibEntry.format_names: arg is:\"+str(names_formatter)) names = self.get_names() #get a", "return self[\"citekey\"] citekey = property(get_citekey,set_citekey,None,\"property: 'citekey'\") def get_fields(self): return self._fields def set_fields(self, lst):", ":TODO: return BibName instance for each available name field?? :Parameters: - `entry_formatter`: EntryFormatter", "to year sfx = ''; c = 1 # while result+sfx in used_citekeys:", "KeyError) :note: this method introduces the only dependence on simpleparse (via bibname) :TODO:", "names_formatter): \"\"\"return formatted BibName-object if possible else raw name :type `names_formatter`: NamesFormatter :note:", "temp if pair[1]] else: #keep None when occurs in entry list result =", "################################## months_en = ('January','February','March','April','May','June', 'July','August','September','October','November','December') monthslower_en = [m.lower() for m in months_en] monthmacros_en", "if key == \"key\": bibfile_logger.info( \"Setting 'key' as an entry *field*. (Recall 'citekey'", "%s\" % (mlen, key, val)) stringrep += \",\\n\".join(field_list) stringrep += '\\n}\\n' return stringrep", "get_fields(self): return self._fields def set_fields(self, lst): self._fields = lst fields = property(get_fields, set_fields,", "for t in subtags[1][3]: if(t) : str += dispatch(self, t, buffer) # concatenate", "self.get_names() #get a BibName instance (or possibly, a string) #keep string if stuck", "(Recall 'citekey' holds the entry id.)\") if key not in self._fields and key", "a new citekey based on the entry's data. This is for creating predictable", "to do it) result = names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names result = \"+str(result)) return result def", "key (label) Example: The label style is a dict with the following fields::", "else: if field in self: bibfile_logger.info(\"Empty field %s in entry\\n%s.\\n.\"%(self,field)) found = None", "add handling of month macros if field == 'month' and result in monthmacros_en:", "not # distinguish case format_dict['year'] = year+sfx c += 1 result = label_template%format_dict", "temp] #attach cross references for entry in result: if entry: crossref = entry.get('crossref',", "buffer) + 1 if the_type.upper() != 'COMMENT' : bibfile_logger.warning(\"\"\"Entry at line %d has", "CAN test 'field in self' (even though an entry will not raise #KeyError!", "an entry *field*. (Recall 'citekey' holds the entry id.)\") if key not in", "if field in self: bibfile_logger.info(\"Empty field %s in entry\\n%s.\\n.\"%(self,field)) found = None return", "etal = style['etal'] # first, make names name_formatter = NameFormatter(template = name_template) names_dicts", "do it return entry_formatter.format_entry(self) # A default label style for citekeys created by", "field keys are case-insensitive and fields are stored in the order added. :note:", "``simpleparse.dispatchprocessor``. To fill a BibFile instance, bfi, call bibgrammar.Parse(src, bfi). :copyright: <NAME> and", "tuple4 return buffer[start+1:stop-1] def number(self, tuple4, buffer ): \"\"\"return a number as a", "f in self.get_fields(): found = reo.search( self[f] ) if found: break # no", "line %d has comment syntax but entry_type is %s: Details: %s\"\"\" % (lineno,", "distinguish case format_dict['year'] = year+sfx c += 1 result = label_template%format_dict return result", "MIT (see LICENSE) :requires: Python 2.4+ :TODO: make this framework more general, perhaps", "be a string elif key == 'journal': if val.isalpha() and val.islower(): #:TODO: allow", "if(t) : str += dispatch(self, t, buffer) # concatenate hashed together strings return", "from . import bibgrammar ##################################################################### ############### GLOBAL VARIABLES ################################## months_en = ('January','February','March','April','May','June', 'July','August','September','October','November','December')", "format of the citetekey is determined by a `label_style` (see below) :Returns: string", "to lowercase entry[k] = v self.entries.append(entry) def macro( self, tuple4, buffer ): \"\"\"Process", "string\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start:stop] def entry_type( self, tuple4, buffer ): \"\"\"Return", "bibtex file. Access entries by key. :note: a BibFile object should simply *store*", "True addquotes = False #spacer = ' '*(mlen - len(key) ) val =", "monthslower_en] MONTH_DICT = dict( zip(monthmacros_en, months_en) ) ##################################################################### class BibEntry(dict): \"\"\" Stores a", "= \"restructuredtext en\" __authors__ = [\"<NAME>\", \"<NAME>\"] __version__ = '1.13' __needs__ = '2.4'", "used in turn by bibsearch.py :Parameters: `string_or_compiled` : string to compile or compiled", "string(self, tuple4, buffer ): \"\"\"Return a string, stripping leading and trailing markers\"\"\" (tag,start,stop,subtags)", "purpose. This is to create consistent citation keys that are easy to type", "accent characters from names when making label \"\"\" from .bibstyles.shared import NameFormatter from", "class BibEntry(dict): \"\"\" Stores a single bibliographic entry. Provides a dictionary interface to", "buffer))) else : bibfile_logger.info(\"Comment entry on line %d:\" % lineno + \" \"", "open(sys.argv[1]).read() # bfile = BibFile() # bibgrammar.Parse(src, bfile) # for entry in bfile.entries", "a 'key' field :note: 2008-03-29 'entry_type' instead of 'type' since BibTeX allows a", "): \"\"\"return a number as a string\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start:stop] def", "\"\\n\" + buffer[start:stop]) def comment_entry(self, tuple4, buffer): \"\"\"Process the given production and it's", "for pretty format except ValueError: #no fields (not a true entry) mlen =", "in a BibName object def format_with(self, entry_formatter): bibfile_logger.debug(\"BibEntry.format_with: arg is:\"+str(entry_formatter)) #ask the EntryFormatter", "fields.\") field_list = [] for key in self._fields: addbraces = True addquotes =", "# importing bibname here to avoid recursive import from bibstuff import bibname #ai:", "see NameFormatter class max_names = 2, name_name_sep = \"+\", etal = 'etal', anonymous", "# dummy key -- or should we be strict? for field in subtags[1][3]", "self.make_names(entry_formatter, try_fields=try_fields) def make_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string) (from", "entry id.)\") if key not in self._fields and key not in [\"citekey\",\"entry_type\"] and", "buffer ): \"\"\"Return lookup on name or name if not in map.\"\"\" (tag,start,stop,subtags)", "# ------------------------- # usage: bibfile.py DATABASE_FILE # if __name__ == \"__main__\": # import", "len(sys.argv) > 1 : # src = open(sys.argv[1]).read() # bfile = BibFile() #", "reo = re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #must", ":note: 2008-03-29 'entry_type' instead of 'type' since BibTeX allows a 'type' field \"\"\"", "'\"' + val + '\"' elif addbraces: val = \"{\" + val +", "# for entry in bfile.entries : # print entry # else : #", "% (mlen, key, val)) stringrep += \",\\n\".join(field_list) stringrep += '\\n}\\n' return stringrep def", "def get_entrylist(self, citekeys, discard=True): \"\"\"Return list, the BibEntry instances that were found (and", "None if field missing (-> no KeyError) :note: this method introduces the only", "def make_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string) (from \"raw\" names).", "field(self, tuple4, buffer ): \"\"\"Process a bibentry field and return tuple of name,", "entry on line %d:\" % lineno + \" \" + getString(subtags[1], buffer)) def", "apparently has no fields.\") field_list = [] for key in self._fields: addbraces =", "unless discarded). \"\"\" if not citekeys: bibfile_logger.warning(\"get_entrylist: No keys provided; returning empty cited-entry", "#:note: entry will force k to lowercase entry[k] = v self.entries.append(entry) def macro(", ": # otherwise it is really a macro entry for field in subtags[1][3]:", "[name_formatter.format_name(name_dict) for name_dict in names_dicts] if len(ls) > max_names: ls = ls[:max_names] +", "fields) \"\"\" if isinstance(string_or_compiled, str): if ignore_case: reo = re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE)", "# Bibfile # ------- # Data storage for bibtex file # ---------------------------------------------------------- class", "start, buffer)+1 bibfile_logger.warning(\"Entry at line %d has macro syntax, but entry_type is %s\"", "len(ls) > max_names: ls = ls[:max_names] + [etal] names = name_name_sep.join(ls) if lower_name:", "---------------------------------------------------------- class BibFile( DispatchProcessor ): \"\"\"Stores parsed bibtex file. Access entries by key.", "else : # otherwise it is really a macro entry for field in", "= tuple4 name, str = dispatch(self, subtags[0], buffer) \"\"\" the_type = getString(subtags[0], buffer)", "(BSD) https://github.com/sciunto-org/python-bibtexparser \"\"\" __docformat__ = \"restructuredtext en\" __authors__ = [\"<NAME>\", \"<NAME>\"] __version__ =", "\"\"\"return string representation of entry \"\"\" stringrep = '@%s{%s,\\n' % (self.entry_type.upper() , self.citekey)", "True if addquotes: val = '\"' + val + '\"' elif addbraces: val", "self returns None if field missing (-> no KeyError) :note: this method introduces", "found (and None for entries not found, unless discarded). \"\"\" if not citekeys:", "= crossref[field] else: result = '' #:note: 20080331 add handling of month macros", "If field is omitted, search is through all fields. :note: used by BibFile's", "pass def set_entry_type(self, val): self[\"entry_type\"] = val.lower() #:note: entry_type stored as lowercase def", "tuple4 str = '' for t in subtags[1][3]: if(t) : str += dispatch(self,", "== 'month' and result in monthmacros_en: result = MONTH_DICT[result] return result def __delitem__(self,key)", "# macro name def name(self, tuple4, buffer ): \"\"\"Return lookup on name or", "more general, perhaps along the lines of the btparse library in `btOOL <http://www.gerg.ca/software/btOOL/>`_", "label style for citekeys created by make_citekey() # first max_names names included, then", "is None: for field in try_fields: raw_names = self[field] if raw_names: break else:", "return ls # self test # ------------------------- # usage: bibfile.py DATABASE_FILE # if", "= dispatch(self, subtags[0], buffer) entry.citekey = dispatch(self, subtags[1], buffer) for field in subtags[2][3]", "of entries and a macro map) and provide access to these parts \"\"\"", "add support for bibtexparser (BSD) https://github.com/sciunto-org/python-bibtexparser \"\"\" __docformat__ = \"restructuredtext en\" __authors__ =", "val) if key == \"key\": bibfile_logger.info( \"Setting 'key' as an entry *field*. (Recall", "% (self.entry_type.upper() , self.citekey) try: mlen = max( len(key_str) for key_str in self._fields", "# bfile = BibFile() # bibgrammar.Parse(src, bfile) # for entry in bfile.entries :", "ignore_case=True): \"\"\"Find regular expression in entry. Return MatchObject if string_or_compiled found in entry", "instance to format itself (and it asks a NamesFormatter to do it) result", "for entries not found, unless discarded). \"\"\" if not citekeys: bibfile_logger.warning(\"get_entrylist: No keys", "= str \"\"\" self._macroMap[name] = str def preamble( self, tuple4, buffer ): \"\"\"Process", "Python 2.4+ :TODO: make this framework more general, perhaps along the lines of", "string, stripping leading and trailing markers\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start+1:stop-1] def number(self,", "in entry list result = [pair[1] for pair in temp] #attach cross references", "= self.get_names() #get a BibName instance (or possibly, a string) #keep string if", "return BibName instance for each available name field?? :Parameters: - `entry_formatter`: EntryFormatter instance", "#bibstuff imports # from . import bibgrammar ##################################################################### ############### GLOBAL VARIABLES ################################## months_en", "turn by bibsearch.py :Parameters: `string_or_compiled` : string to compile or compiled regex pattern", "parsing, must provide a function for each production name. \"\"\" def string(self, tuple4,", "fields # :note: CAN test 'field in self' (even though an entry will", "| re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #->must have a compiled regular", "def __getitem__(self, field): #field is usually a BibTeX field but can be a", "= True, # anonymous = 'anon', # article = \"%(names)s-%(year)s-%(jrnl)s\", # book =", "names). :change: 2006-08-02 altered to return BibName instance and not set _names :note:", "monthmacros_en = [m[:3] for m in monthslower_en] MONTH_DICT = dict( zip(monthmacros_en, months_en) )", "== \"key\": bibfile_logger.info( \"Seeking 'key' as an entry *field*. (Recall 'citekey' holds the", "field, buffer) #:note: entry will force k to lowercase entry[k] = v self.entries.append(entry)", "buffer) self._macroMap[name] = str \"\"\" self._macroMap[name] = str def preamble( self, tuple4, buffer", "'type' field \"\"\" def __init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs) self._fields = [] def __repr__(self): \"\"\"return string", "in monthslower_en] MONTH_DICT = dict( zip(monthmacros_en, months_en) ) ##################################################################### class BibEntry(dict): \"\"\" Stores", "# while result+sfx in used_citekeys: while label_template%format_dict in used_citekeys: sfx = ascii_lowercase[c%26]*(1+c//26) #", "comment_entry(self, tuple4, buffer): \"\"\"Process the given production and it's children\"\"\" (tag,start,stop,subtags) = tuple4", "that are easy to type and guess and that are valid BibTeX citation", "in bibstuff.bibstyles; but it serves a very different purpose. This is to create", "__init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs) self._fields = [] def __repr__(self): \"\"\"return string representation of entry \"\"\"", "for each available name field?? :Parameters: - `entry_formatter`: EntryFormatter instance to provide style", "= '' #:note: 20080331 add handling of month macros if field == 'month'", "BibName instance to format itself (and it asks a NamesFormatter to do it)", "= [] for key in self._fields: addbraces = True addquotes = False #spacer", "string field to search in self (default: search all fields) \"\"\" if isinstance(string_or_compiled,", "in format_citation :note: 2006-08-08 no longer sets a `_names` attribute :TODO: add default", "citekey field = field.lower() if field == \"key\": bibfile_logger.info( \"Seeking 'key' as an", "return self._macroMap.get(buffer[start:stop],buffer[start:stop]) def field(self, tuple4, buffer ): \"\"\"Process a bibentry field and return", "through all fields. :note: used by BibFile's find_re method, which is used in", "val.isalpha() and val.islower(): #:TODO: allow punctuation!! addbraces = False #i.e., assume it is", "= v self.entries.append(entry) def macro( self, tuple4, buffer ): \"\"\"Process a macro entry", "in used_citekeys: sfx = ascii_lowercase[c%26]*(1+c//26) # :note: lowercase since # BibTeX does not", "(e.g., two piece last names) ls = [name_formatter.format_name(name_dict) for name_dict in names_dicts] if", ":Parameters: - `entry_formatter`: EntryFormatter instance to provide style information - `try_fields`: list of", "default_type = \"%(names)s-%(year)s\", ) #style2 shd be rst compatible # citekey_label_style2 = dict(", "style information - `try_fields`: list of field names to try sequentially; none empty", "very different purpose. This is to create consistent citation keys that are easy", "provide access to these parts \"\"\" def __init__(self) : self.entries = [] self._macroMap", "#bibfile_logger.debug(\"entry: ready to add field: \"+str(dispatch(self, field, buffer))) k,v = dispatch(self, field, buffer)", "allows a 'type' field \"\"\" def __init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs) self._fields = [] def __repr__(self):", "year = self['year'] or '????' format_dict['year'] = year if entry_type == \"article\": jrnl", "%-*s = %s\" % (mlen, key, val)) stringrep += \",\\n\".join(field_list) stringrep += '\\n}\\n'", "if entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)] return ls # self test # ------------------------- # usage:", "reo.search( self[field] ) else: if field in self: bibfile_logger.info(\"Empty field %s in entry\\n%s.\\n.\"%(self,field))", "preamble syntax but entry_type is %s\" % (lineno,the_type)) else : bibfile_logger.warning(\"Preamble entry on", "= \"{\" + val + \"}\" field_list.append(\" %-*s = %s\" % (mlen, key,", "bibfile_logger.warning(\"Preamble entry on line %d:\" % lineno + \"\\n\" + buffer[start:stop]) def comment_entry(self,", "# ---------------------------------------------------------- class BibFile( DispatchProcessor ): \"\"\"Stores parsed bibtex file. Access entries by", "fields = property(get_fields, set_fields, None, \"property: 'fields'\") def search_fields(self, string_or_compiled, field='', ignore_case=True): \"\"\"Find", "self.get('crossref', '') if isinstance(crossref, self.__class__): result = crossref[field] else: result = '' #:note:", "try_fields=try_fields) def make_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string) (from \"raw\"", "a function for each production name. \"\"\" def string(self, tuple4, buffer ): \"\"\"Return", "(tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0], buffer) lineno = lines(0,start,buffer)+1 if the_type.upper() !=", "AUTHORS :license: MIT (see LICENSE) :requires: Python 2.4+ :TODO: make this framework more", "have multiple # tokens (e.g., two piece last names) ls = [name_formatter.format_name(name_dict) for", "__strict__ = False # should we be strict with bibtex format? ####################### IMPORTS", "entry will not raise #KeyError! see TODO above) BUT do not test 'field", "bibfile_logger.warn(\"Entry apparently has no fields.\") field_list = [] for key in self._fields: addbraces", "has preamble syntax but entry_type is %s\" % (lineno,the_type)) else : bibfile_logger.warning(\"Preamble entry", "): \"\"\"Process the given production and it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type =", "subtags[0], buffer) entry.citekey = dispatch(self, subtags[1], buffer) for field in subtags[2][3] : #bibfile_logger.debug(\"entry:", "<http://www.gerg.ca/software/btOOL/>`_ :TODO: add support for bibtexparser (BSD) https://github.com/sciunto-org/python-bibtexparser \"\"\" __docformat__ = \"restructuredtext en\"", "\"restructuredtext en\" __authors__ = [\"<NAME>\", \"<NAME>\"] __version__ = '1.13' __needs__ = '2.4' #", "result+sfx in used_citekeys: while label_template%format_dict in used_citekeys: sfx = ascii_lowercase[c%26]*(1+c//26) # :note: lowercase", "valid BibTeX citation keys. :Parameters: - used_citekeys : list a list of the", "<NAME>, see AUTHORS :license: MIT (see LICENSE) :requires: Python 2.4+ :TODO: make this", "# options: # __strict__ = False allows empty citekeys __strict__ = False #", "are stored in the order added. :note: 2006-08-10 use 'citekey' instead of 'key'", "return self.make_names(entry_formatter, try_fields=try_fields) def make_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string)", "BibTeX allows a 'key' field :note: 2008-03-29 'entry_type' instead of 'type' since BibTeX", "tuple4, buffer ): \"\"\"Process a bibentry field and return tuple of name, value.\"\"\"", "in subtags[1][3] : k,v = dispatch(self, field, buffer) #:note: entry will force k", "'key' as an entry *field*. (Recall 'citekey' holds the entry id.)\") if key", "and <NAME>, see AUTHORS :license: MIT (see LICENSE) :requires: Python 2.4+ :TODO: make", "(even though an entry will not raise #KeyError! see TODO above) BUT do", "a BibFile object should simply *store* .bib file parts (a list of entries", "subtags[0], buffer) entry.citekey = 'KEY' # dummy key -- or should we be", "# concatenate hashed together strings return (dispatch(self, subtags[0], buffer), str) def entry( self,", "= False, # name_name_sep = ('.','.'), # etal = '', # lower_name =", "syntax but entry_type is %s\" % (lineno,the_type)) else : bibfile_logger.warning(\"Preamble entry on line", "def get_entry_type(self): return self[\"entry_type\"] entry_type = property(get_entry_type, set_entry_type, None, \"property: 'entry_type'\") def set_citekey(self,", "= reo.search( self[field] ) else: if field in self: bibfile_logger.info(\"Empty field %s in", "= 'l{_}', # max_names = 2, # use_max_names = False, # name_name_sep =", "citekey_label_style2 = dict( # name_first = 'l{_}', # name_other = 'l{_}', # max_names", "= 'anon', lower_name = False, article = \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc =", "self._fields ) # for pretty format except ValueError: #no fields (not a true", "entries and a macro map) and provide access to these parts \"\"\" def", "missing (-> no KeyError) :note: this method introduces the only dependence on simpleparse", "the_type = getString(subtags[0], buffer) if the_type.upper() != 'STRING' : # it looks like", "will force k to lowercase entry[k] = v self.entries.append(entry) def macro( self, tuple4,", "guess and that are valid BibTeX citation keys. :Parameters: - used_citekeys : list", "subtags[1][3]: name, str = dispatch(self, field, buffer) self._macroMap[name] = str \"\"\" self._macroMap[name] =", "a regular entry with no key lineno = lines(0, start, buffer)+1 bibfile_logger.warning(\"Entry at", "= string_or_compiled \"\"\" Find regex in bib_entry. If field is omitted, search is", "with the citation styles in bibstuff.bibstyles; but it serves a very different purpose.", "tuple4, buffer ): \"\"\"Return lookup on name or name if not in map.\"\"\"", "BibFile and BibEntry for accessing the parts of a bibtex database. BibFile inherits", "importing bibname here to avoid recursive import from bibstuff import bibname #ai: shd", "= '' for t in subtags[1][3]: if(t) : str += dispatch(self, t, buffer)", "usually a BibTeX field but can be a citekey field = field.lower() if", "dictionary interface to the fields: field keys are case-insensitive and fields are stored", "= year if entry_type == \"article\": jrnl = self['journal'] jrnl = ''.join(jrnl.split()).lower() #", "return getString((tag,start,stop,subtags), buffer) def citekey( self, tuple4, buffer ): \"\"\"Return the entry's citekey\"\"\"", "str): if ignore_case: reo = re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else: reo = re.compile(string_or_compiled,", "getString((tag,start,stop,subtags), buffer) def citekey( self, tuple4, buffer ): \"\"\"Return the entry's citekey\"\"\" (tag,start,stop,subtags)", ", the_type)) if not __strict__: # we can add a dummy key and", "import bibname #ai: shd move all bibname into here? possibly if entry_formatter is", "addbraces = True addquotes = False #spacer = ' '*(mlen - len(key) )", "with it if isinstance(names,str): result = names else: #assume a BibName instance #ask", "= style[entry_type] except KeyError: label_template = style['default_type'] name_template = style['name_template'] max_names = style['max_names']", "citekeys: bibfile_logger.warning(\"get_entrylist: No keys provided; returning empty cited-entry list.\") return [] temp =", "a regular entry entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey =", "pass try: self._fields.remove(key) except ValueError: pass def set_entry_type(self, val): self[\"entry_type\"] = val.lower() #:note:", "use 'citekey' instead of 'key' since BibTeX allows a 'key' field :note: 2008-03-29", "is %s: Details: %s\"\"\" % (lineno, the_type, getString(subtags[1], buffer))) else : bibfile_logger.info(\"Comment entry", "entry except TypeError: pass #->must be a string elif key == 'journal': if", "citation styles in bibstuff.bibstyles; but it serves a very different purpose. This is", "except TypeError: pass #->must be a string elif key == 'journal': if val.isalpha()", "arg is:\"+str(entry_formatter)) #ask the EntryFormatter to do it return entry_formatter.format_entry(self) # A default", "and val: self._fields.append(key) def __getitem__(self, field): #field is usually a BibTeX field but", "field is omitted, search is through all fields. :note: used by bibsearch.py \"\"\"", "it is a macro elif key == 'month': # always use month macros", "empty filed -> name \"\"\" # importing bibname here to avoid recursive import", "fields. :note: used by bibsearch.py :Parameters: - `string_or_compiled` : string to compile or", "to these parts \"\"\" def __init__(self) : self.entries = [] self._macroMap = {}", "returning empty cited-entry list.\") return [] temp = [ (key,self.get_entry_by_citekey(key)) for key in", ": list a list of the already taken citation keys so that the", "then etal citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # \"van_der_Meer\" or \"van_DerStadt\" max_names", "(labels) for BibEntry objects. This is not integrated with the citation styles in", "bibsearch.py :Parameters: - `string_or_compiled` : string to compile or compiled regex pattern for", "\"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\") :TODO: Strip LaTeX accent characters from", "entry in result: if entry: crossref = entry.get('crossref', None) if isinstance(crossref, str): crossref", "self.get_fields(): found = reo.search( self[f] ) if found: break # no need to", "year sfx = ''; c = 1 # while result+sfx in used_citekeys: while", "article = \"%(names)s-%(year)s-%(jrnl)s\", # book = \"%(names)s-%(year)s\", # misc = \"%(names)s-%(year)s\", # default_type", "lineno + \" \" + getString(subtags[1], buffer)) def search_entries(self, string_or_compiled, field='', ignore_case=True): \"\"\"Return", ".bib files? \"\"\" bibfile_logger.debug(\"BibEntry.format_names: arg is:\"+str(names_formatter)) names = self.get_names() #get a BibName instance", "and provide access to these parts \"\"\" def __init__(self) : self.entries = []", "= '1.13' __needs__ = '2.4' # options: # __strict__ = False allows empty", "= False #spacer = ' '*(mlen - len(key) ) val = self[key] #handle", "a single bibliographic entry. Provides a dictionary interface to the fields: field keys", "included, then etal citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # \"van_der_Meer\" or \"van_DerStadt\"", "integrated with the citation styles in bibstuff.bibstyles; but it serves a very different", "\"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\", ) #style2 shd be rst compatible", "concatenate hashed together strings return (dispatch(self, subtags[0], buffer), str) def entry( self, tuple4,", "= field.lower() if field == \"key\": bibfile_logger.info( \"Seeking 'key' as an entry *field*.", "no longer sets `self._names` \"\"\" if entry_formatter is None: if not try_fields: try_fields", "entry in self.entries if entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)] return ls # self test #", "entry in bfile.entries : # print entry # else : # print \"self", "map) and provide access to these parts \"\"\" def __init__(self) : self.entries =", "entry_formatter.format_entry(self) # A default label style for citekeys created by make_citekey() # first", ":mod:`bibstuff.bibfile`: High level BibTeX file interface --------------------------------------------------------- Provides two classes, BibFile and BibEntry", "format itself (and it asks a NamesFormatter to do it) result = names.format(names_formatter)", "if len(sys.argv) > 1 : # src = open(sys.argv[1]).read() # bfile = BibFile()", "for field in subtags[2][3] : #bibfile_logger.debug(\"entry: ready to add field: \"+str(dispatch(self, field, buffer)))", "this decision (but it is used for formatting) #:note: 20080331 changed KeyError to", "False #spacer = ' '*(mlen - len(key) ) val = self[key] #handle crossref", "can be a citekey field = field.lower() if field == \"key\": bibfile_logger.info( \"Seeking", "call bibgrammar.Parse(src, bfi). :copyright: <NAME> and <NAME>, see AUTHORS :license: MIT (see LICENSE)", "\"\"\"Create and return a new citekey based on the entry's data. This is", "import from standard library import re, logging bibfile_logger = logging.getLogger('bibstuff_logger') # import dependencies", "of month macros if field == 'month' and result in monthmacros_en: result =", "the entry's citekey\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) # macro name def", "'KEY' # dummy key -- or should we be strict? for field in", "self: bibfile_logger.info(\"Empty field %s in entry\\n%s.\\n.\"%(self,field)) found = None return found def format_names(self,", "expression in entry. Return MatchObject if string_or_compiled found in entry else None. If", "matching change to `make_names`, no longer sets `self._names` \"\"\" if entry_formatter is None:", "= 2, # use_max_names = False, # name_name_sep = ('.','.'), # etal =", "can add a dummy key and treat this entry as a regular entry", "except KeyError: label_template = style['default_type'] name_template = style['name_template'] max_names = style['max_names'] name_name_sep =", "GLOBAL VARIABLES ################################## months_en = ('January','February','March','April','May','June', 'July','August','September','October','November','December') monthslower_en = [m.lower() for m in", "format_dict['year'] = year+sfx c += 1 result = label_template%format_dict return result # ----------------------------------------------------------", "to add field: \"+str(dispatch(self, field, buffer))) k,v = dispatch(self, field, buffer) #:note: entry", "discard: bibfile_logger.warning(\"Database entries not found for the following keys:\\n\"+\"\\n\".join(bad_keys)) if discard: result =", "if pair[1]] else: #keep None when occurs in entry list result = [pair[1]", "a citekey field = field.lower() if field == \"key\": bibfile_logger.info( \"Seeking 'key' as", ":note: used by BibFile's find_re method, which is used in turn by bibsearch.py", "file # ---------------------------------------------------------- class BibFile( DispatchProcessor ): \"\"\"Stores parsed bibtex file. Access entries", "are valid BibTeX citation keys. :Parameters: - used_citekeys : list a list of", "self[key] #handle crossref if key == 'crossref': try: val = val['citekey'] #might be", "though an entry will not raise #KeyError! see TODO above) BUT do not", "for f in self.get_fields(): found = reo.search( self[f] ) if found: break #", "\"\"\"Return a string, stripping leading and trailing markers\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start+1:stop-1]", "for pair in temp if not pair[1]] if bad_keys and discard: bibfile_logger.warning(\"Database entries", "buffer) #:note: entry will force k to lowercase entry[k] = v self.entries.append(entry) def", "based on the entry's data. This is for creating predictable and useful citekey", "this entry as a regular entry entry = BibEntry() entry.entry_type = dispatch(self, subtags[0],", ":Returns: string the citation key (label) Example: The label style is a dict", "> 1 : # src = open(sys.argv[1]).read() # bfile = BibFile() # bibgrammar.Parse(src,", "\"\"\" def __init__(self) : self.entries = [] self._macroMap = {} def get_entrylist(self, citekeys,", "2, name_name_sep = '+', etal = 'etal', anonymous = 'anon', lower_name = False,", "children. \"\"\" (tag,start,stop,subtags) = tuple4 entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer)", "Bibfile # ------- # Data storage for bibtex file # ---------------------------------------------------------- class BibFile(", "tuple4 return getString((tag,start,stop,subtags), buffer) def citekey( self, tuple4, buffer ): \"\"\"Return the entry's", "(lineno,the_type)) else : bibfile_logger.warning(\"Preamble entry on line %d:\" % lineno + \"\\n\" +", "expression reo = string_or_compiled if not field: #->try all fields (but not citekey)", "book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\") :TODO: Strip LaTeX accent", "storage for bibtex file # ---------------------------------------------------------- class BibFile( DispatchProcessor ): \"\"\"Stores parsed bibtex", "key, val): key = key.lower() dict.__setitem__(self, key, val) if key == \"key\": bibfile_logger.info(", "a very different purpose. This is to create consistent citation keys that are", "shd move all bibname into here? possibly if entry_formatter is None: for field", "in the order added. :note: 2006-08-10 use 'citekey' instead of 'key' since BibTeX", "None, \"property: 'fields'\") def search_fields(self, string_or_compiled, field='', ignore_case=True): \"\"\"Find regular expression in entry.", "`names_formatter`: NamesFormatter :note: called by CitationManager in format_citation :note: 2006-08-08 no longer sets", "keys provided; returning empty cited-entry list.\") return [] temp = [ (key,self.get_entry_by_citekey(key)) for", "as an entry *field*. (Recall 'citekey' holds the entry id.)\") if key not", "2, name_name_sep = \"+\", etal = 'etal', anonymous = 'anon', lower_name = False,", "= BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = dispatch(self, subtags[1], buffer) for", "*field*. (Recall 'citekey' holds the entry id.)\") if key not in self._fields and", "NameFormatter class max_names = 2, name_name_sep = \"+\", etal = 'etal', anonymous =", "bibsearch.py :Parameters: `string_or_compiled` : string to compile or compiled regex pattern for searching", "is through all fields. :note: used by bibsearch.py :Parameters: - `string_or_compiled` : string", "= \"+\", etal = 'etal', anonymous = 'anon', lower_name = False, article =", "field :note: 2008-03-29 'entry_type' instead of 'type' since BibTeX allows a 'type' field", "regex pattern for searching `field` : string field to search in self (default:", "string_or_compiled, field='', ignore_case=True): \"\"\"Return list of matching entries. Search for regular expression in", "order added. :note: 2006-08-10 use 'citekey' instead of 'key' since BibTeX allows a", "def __init__(self) : self.entries = [] self._macroMap = {} def get_entrylist(self, citekeys, discard=True):", "%s\" % (lineno , the_type)) if not __strict__: # we can add a", "pair[1]] if bad_keys and discard: bibfile_logger.warning(\"Database entries not found for the following keys:\\n\"+\"\\n\".join(bad_keys))", ":TODO: make this framework more general, perhaps along the lines of the btparse", "and treat this entry as a regular entry entry = BibEntry() entry.entry_type =", "options: # __strict__ = False allows empty citekeys __strict__ = False # should", "tuple4, buffer ): \"\"\"return a number as a string\"\"\" (tag,start,stop,subtags) = tuple4 return", "__repr__(self): \"\"\"return string representation of entry \"\"\" stringrep = '@%s{%s,\\n' % (self.entry_type.upper() ,", "used_citekeys: sfx = ascii_lowercase[c%26]*(1+c//26) # :note: lowercase since # BibTeX does not #", "def name(self, tuple4, buffer ): \"\"\"Return lookup on name or name if not", "self[field]: found = reo.search( self[field] ) else: if field in self: bibfile_logger.info(\"Empty field", "2006-08-09 matching change to `make_names`, no longer sets `self._names` \"\"\" if entry_formatter is", "val: self._fields.append(key) def __getitem__(self, field): #field is usually a BibTeX field but can", "instead of 'key' since BibTeX allows a 'key' field :note: 2008-03-29 'entry_type' instead", "\",\\n\".join(field_list) stringrep += '\\n}\\n' return stringrep def __setitem__(self, key, val): key = key.lower()", "#ask the EntryFormatter to do it return entry_formatter.format_entry(self) # A default label style", "check more fields # :note: CAN test 'field in self' (even though an", "discard=True): \"\"\"Return list, the BibEntry instances that were found (and None for entries", "\"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\") :TODO: Strip LaTeX", "\"\"\" self._macroMap[name] = str def preamble( self, tuple4, buffer ): \"\"\"Process the given", "+ getString(subtags[1], buffer)) def search_entries(self, string_or_compiled, field='', ignore_case=True): \"\"\"Return list of matching entries.", "default_type = \"%(names)s-%(year)s\", # ) def make_citekey(self, used_citekeys = [], style = citekey_label_style1):", "inherits from ``simpleparse.dispatchprocessor``. To fill a BibFile instance, bfi, call bibgrammar.Parse(src, bfi). :copyright:", "append suffix b or c or d... to year sfx = ''; c", "name_name_sep = '+', etal = 'etal', anonymous = 'anon', lower_name = False, article", "\"\"\" from .bibstyles.shared import NameFormatter from string import ascii_lowercase format_dict = {} entry_type", "if ignore_case: reo = re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE)", "[pair[0] for pair in temp if not pair[1]] if bad_keys and discard: bibfile_logger.warning(\"Database", "buffer ): \"\"\"Return the entry type\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) def", "ls # self test # ------------------------- # usage: bibfile.py DATABASE_FILE # if __name__", "result = names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names result = \"+str(result)) return result def get_names(self, entry_formatter=None, try_fields=None):", "anonymous = 'anon', # article = \"%(names)s-%(year)s-%(jrnl)s\", # book = \"%(names)s-%(year)s\", # misc", "for field in subtags[1][3] : k,v = dispatch(self, field, buffer) #:note: entry will", "label_template%format_dict return result # ---------------------------------------------------------- # Bibfile # ------- # Data storage for", "in temp] #attach cross references for entry in result: if entry: crossref =", "entry[k] = v self.entries.append(entry) def macro( self, tuple4, buffer ): \"\"\"Process a macro", "try sequentially; none empty filed -> name \"\"\" # importing bibname here to", "names = self.get_names() #get a BibName instance (or possibly, a string) #keep string", "empty citekeys __strict__ = False # should we be strict with bibtex format?", "add default name_template useful for .bib files? \"\"\" bibfile_logger.debug(\"BibEntry.format_names: arg is:\"+str(names_formatter)) names =", "#keep None when occurs in entry list result = [pair[1] for pair in", "field = entry_formatter.pick_raw_names(self,try_fields) return bibname.BibName(raw_names,from_field=field) #names are in a BibName object def format_with(self,", "): \"\"\"Return a string, stripping leading and trailing markers\"\"\" (tag,start,stop,subtags) = tuple4 return", "\"\"\"return (BibName-object if possible else string) (from \"raw\" names). :change: 2006-08-02 altered to", "if entry.citekey == citekey: return entry \"\"\"PRODUCTION FUNCTIONS: for parsing, must provide a", "no longer sets a `_names` attribute :TODO: add default name_template useful for .bib", "ascii_lowercase[c%26]*(1+c//26) # :note: lowercase since # BibTeX does not # distinguish case format_dict['year']", "= key.lower() try: dict.__delitem__(self, key) except KeyError: pass try: self._fields.remove(key) except ValueError: pass", "def __delitem__(self,key) : key = key.lower() try: dict.__delitem__(self, key) except KeyError: pass try:", "or \"van_DerStadt\" max_names = 2, name_name_sep = '+', etal = 'etal', anonymous =", "bibfile_logger.warning(\"\"\"Entry at line %d has comment syntax but entry_type is %s: Details: %s\"\"\"", "self, tuple4, buffer ): \"\"\"Return the entry type\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags),", "monthslower_en + monthmacros_en: val = val[:3].lower() addbraces = False elif key in (\"year\",\"number\",\"volume\",\"chapter\"):", "# keep macro jrnl = jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl'] = jrnl # short form, no", "since BibTeX allows a 'key' field :note: 2008-03-29 'entry_type' instead of 'type' since", "tuple4, buffer ): \"\"\"Return the entry type\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer)", "in self._fields ) # for pretty format except ValueError: #no fields (not a", "bibfile_logger.info(\"Empty field %s in entry\\n%s.\\n.\"%(self,field)) found = None return found def format_names(self, names_formatter):", "+ \"}\" field_list.append(\" %-*s = %s\" % (mlen, key, val)) stringrep += \",\\n\".join(field_list)", "= 2, name_name_sep = \"+\", etal = 'etal', anonymous = 'anon', lower_name =", "): \"\"\"Stores parsed bibtex file. Access entries by key. :note: a BibFile object", "expression in the fields of each entry. If field is omitted, search is", "ignore_case=True): \"\"\"Return list of matching entries. Search for regular expression in the fields", "pattern for searching - `field` : string field to search in self (default:", "do not test 'field in self' bc want test #for empty fields below", "# \"van_der_Meer\" or \"van_DerStadt\" max_names = 2, name_name_sep = '+', etal = 'etal',", "\"\"\" Stores a single bibliographic entry. Provides a dictionary interface to the fields:", "name_template useful for .bib files? \"\"\" bibfile_logger.debug(\"BibEntry.format_names: arg is:\"+str(names_formatter)) names = self.get_names() #get", "provide style information - `try_fields`: list of field names to try sequentially; none", ": bibfile_logger.info(\"Comment entry on line %d:\" % lineno + \" \" + getString(subtags[1],", "buffer), str) def entry( self, tuple4, buffer ): \"\"\"Process the bibentry and its", "it is used for formatting) #:note: 20080331 changed KeyError to return '' instead", "(not a true entry) mlen = 0 bibfile_logger.warn(\"Entry apparently has no fields.\") field_list", "else : bibfile_logger.warning(\"Preamble entry on line %d:\" % lineno + \"\\n\" + buffer[start:stop])", "if entry_formatter is None: for field in try_fields: raw_names = self[field] if raw_names:", "zip(monthmacros_en, months_en) ) ##################################################################### class BibEntry(dict): \"\"\" Stores a single bibliographic entry. Provides", "% (lineno,the_type)) else : bibfile_logger.warning(\"Preamble entry on line %d:\" % lineno + \"\\n\"", "if raw_names: break else: raw_names, field = entry_formatter.pick_raw_names(self,try_fields) return bibname.BibName(raw_names,from_field=field) #names are in", "name_first = 'l{_}', # name_other = 'l{_}', # max_names = 2, # use_max_names", "= \"%(names)s-%(year)s\", # misc = \"%(names)s-%(year)s\", # default_type = \"%(names)s-%(year)s\", # ) def", "names = names.lower() format_dict['names'] = names year = self['year'] or '????' format_dict['year'] =", "method introduces the only dependence on simpleparse (via bibname) :TODO: return BibName instance", "= tuple4 entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = dispatch(self,", "##################################################################### class BibEntry(dict): \"\"\" Stores a single bibliographic entry. Provides a dictionary interface", "pass if '@' in val: # need to protect '@' addquotes = True", "= label_template%format_dict return result # ---------------------------------------------------------- # Bibfile # ------- # Data storage", "characters from names when making label \"\"\" from .bibstyles.shared import NameFormatter from string", "field names to try sequentially; none empty filed -> name \"\"\" # importing", "= names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names result = \"+str(result)) return result def get_names(self, entry_formatter=None, try_fields=None): \"\"\"return", "logging bibfile_logger = logging.getLogger('bibstuff_logger') # import dependencies from simpleparse import dispatchprocessor as spdp", "\"Setting 'key' as an entry *field*. (Recall 'citekey' holds the entry id.)\") if", "dispatch(self, subtags[0], buffer) entry.citekey = dispatch(self, subtags[1], buffer) for field in subtags[2][3] :", "names = name_name_sep.join(ls) if lower_name: names = names.lower() format_dict['names'] = names year =", "monthmacros_en: result = MONTH_DICT[result] return result def __delitem__(self,key) : key = key.lower() try:", "no spaces # make unique result: if needed, append suffix b or c", "= [name_formatter.format_name(name_dict) for name_dict in names_dicts] if len(ls) > max_names: ls = ls[:max_names]", "found in entry else None. If field is omitted, search is through all", "= self.entry_type.lower() try: label_template = style[entry_type] except KeyError: label_template = style['default_type'] name_template =", "= \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\", ) #style2", "a dict with the following fields:: citekey_label_style1 = dict( name_template = 'v{_}_|l{}', #", "bfile) # for entry in bfile.entries : # print entry # else :", "unique result: if needed, append suffix b or c or d... to year", "jrnl # short form, no spaces # make unique result: if needed, append", "# should we be strict with bibtex format? ####################### IMPORTS ##################################### # import", "bib_entry. If field is omitted, search is through all fields. :note: used by", "we be strict with bibtex format? ####################### IMPORTS ##################################### # import from standard", "= True if addquotes: val = '\"' + val + '\"' elif addbraces:", "consistent citation keys that are easy to type and guess and that are", "search all fields) \"\"\" if isinstance(string_or_compiled, str): if ignore_case: reo = re.compile(string_or_compiled, re.MULTILINE", "BibFile instance, bfi, call bibgrammar.Parse(src, bfi). :copyright: <NAME> and <NAME>, see AUTHORS :license:", "= [\"<NAME>\", \"<NAME>\"] __version__ = '1.13' __needs__ = '2.4' # options: # __strict__", "occurs in entry list result = [pair[1] for pair in temp] #attach cross", "= self['year'] or '????' format_dict['year'] = year if entry_type == \"article\": jrnl =", "it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0], buffer) lineno = lines(0,start,buffer)+1 if", "`entry_formatter`: EntryFormatter instance to provide style information - `try_fields`: list of field names", "self, tuple4, buffer ): \"\"\"Process the given production and it's children\"\"\" (tag,start,stop,subtags) =", "stuck with it if isinstance(names,str): result = names else: #assume a BibName instance", "'journal': if val.isalpha() and val.islower(): #:TODO: allow punctuation!! addbraces = False #i.e., assume", "year+sfx c += 1 result = label_template%format_dict return result # ---------------------------------------------------------- # Bibfile", "<NAME> and <NAME>, see AUTHORS :license: MIT (see LICENSE) :requires: Python 2.4+ :TODO:", "not found for the following keys:\\n\"+\"\\n\".join(bad_keys)) if discard: result = [pair[1] for pair", "crossref = self.get('crossref', '') if isinstance(crossref, self.__class__): result = crossref[field] else: result =", "self._macroMap[name] = str \"\"\" self._macroMap[name] = str def preamble( self, tuple4, buffer ):", "result = \"+str(result)) return result def get_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible", "- `try_fields`: list of field names to try sequentially; none empty filed ->", "comment syntax but entry_type is %s: Details: %s\"\"\" % (lineno, the_type, getString(subtags[1], buffer)))", "= NameFormatter(template = name_template) names_dicts = self.get_names().get_names_dicts() # make list of 'v_|l' last", "get_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string) :note: 2006-08-09 matching change", "'' #:note: 20080331 add handling of month macros if field == 'month' and", "to provide style information - `try_fields`: list of field names to try sequentially;", "it asks a NamesFormatter to do it) result = names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names result =", "along the lines of the btparse library in `btOOL <http://www.gerg.ca/software/btOOL/>`_ :TODO: add support", "representation of entry \"\"\" stringrep = '@%s{%s,\\n' % (self.entry_type.upper() , self.citekey) try: mlen", "\"\"\"Return entry or None.\"\"\" for entry in self.entries: if entry.citekey == citekey: return", "bfile.entries : # print entry # else : # print \"self test usage:", "if the_type.upper() != 'PREAMBLE' : bibfile_logger.warning(\"Entry at line %d has preamble syntax but", "provide a function for each production name. \"\"\" def string(self, tuple4, buffer ):", "entry's data. This is for creating predictable and useful citekey (labels) for BibEntry", "crossref if key == 'crossref': try: val = val['citekey'] #might be an entry", "+ [etal] names = name_name_sep.join(ls) if lower_name: names = names.lower() format_dict['names'] = names", "%d has macro syntax, but entry_type is %s\" % (lineno , the_type)) if", "#no fields (not a true entry) mlen = 0 bibfile_logger.warn(\"Entry apparently has no", "following keys:\\n\"+\"\\n\".join(bad_keys)) if discard: result = [pair[1] for pair in temp if pair[1]]", "is really a macro entry for field in subtags[1][3]: name, str = dispatch(self,", "from standard library import re, logging bibfile_logger = logging.getLogger('bibstuff_logger') # import dependencies from", "self._fields = lst fields = property(get_fields, set_fields, None, \"property: 'fields'\") def search_fields(self, string_or_compiled,", "name_dict in names_dicts] if len(ls) > max_names: ls = ls[:max_names] + [etal] names", "[\"<NAME>\", \"<NAME>\"] __version__ = '1.13' __needs__ = '2.4' # options: # __strict__ =", "name_formatter = NameFormatter(template = name_template) names_dicts = self.get_names().get_names_dicts() # make list of 'v_|l'", "== \"article\": jrnl = self['journal'] jrnl = ''.join(jrnl.split()).lower() # keep macro jrnl =", "EntryFormatter to do it return entry_formatter.format_entry(self) # A default label style for citekeys", "(and None for entries not found, unless discarded). \"\"\" if not citekeys: bibfile_logger.warning(\"get_entrylist:", "if not citekeys: bibfile_logger.warning(\"get_entrylist: No keys provided; returning empty cited-entry list.\") return []", "regular expression in entry. Return MatchObject if string_or_compiled found in entry else None.", "the only dependence on simpleparse (via bibname) :TODO: return BibName instance for each", "= names else: #assume a BibName instance #ask BibName instance to format itself", "number as a string\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start:stop] def entry_type( self, tuple4,", "tuple4, buffer ): \"\"\"Return the entry's citekey\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer)", "ValueError: #no fields (not a true entry) mlen = 0 bibfile_logger.warn(\"Entry apparently has", "re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #must have a compiled regular expression", "This is to create consistent citation keys that are easy to type and", "\"van_der_Meer\" or \"van_DerStadt\" max_names = 2, name_name_sep = '+', etal = 'etal', anonymous", "False, article = \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\")", "+= '\\n}\\n' return stringrep def __setitem__(self, key, val): key = key.lower() dict.__setitem__(self, key,", "making label \"\"\" from .bibstyles.shared import NameFormatter from string import ascii_lowercase format_dict =", "None return found def format_names(self, names_formatter): \"\"\"return formatted BibName-object if possible else raw", "DATABASE_FILE # if __name__ == \"__main__\": # import sys # if len(sys.argv) >", "the entry's data. This is for creating predictable and useful citekey (labels) for", "key, val)) stringrep += \",\\n\".join(field_list) stringrep += '\\n}\\n' return stringrep def __setitem__(self, key,", "buffer[start+1:stop-1] def number(self, tuple4, buffer ): \"\"\"return a number as a string\"\"\" (tag,start,stop,subtags)", "entries by key. :note: a BibFile object should simply *store* .bib file parts", "'key' since BibTeX allows a 'key' field :note: 2008-03-29 'entry_type' instead of 'type'", "= val['citekey'] #might be an entry except TypeError: pass #->must be a string", "bibtexparser (BSD) https://github.com/sciunto-org/python-bibtexparser \"\"\" __docformat__ = \"restructuredtext en\" __authors__ = [\"<NAME>\", \"<NAME>\"] __version__", "'crossref': try: val = val['citekey'] #might be an entry except TypeError: pass #->must", "compiled regex pattern for searching - `field` : string field to search in", "= \"%(names)s-%(year)s\") :TODO: Strip LaTeX accent characters from names when making label \"\"\"", "for BibEntry objects. This is not integrated with the citation styles in bibstuff.bibstyles;", "entry and add macros to macro map\"\"\" (tag,start,stop,subtags) = tuple4 name, str =", "not raise #KeyError! see TODO above) BUT do not test 'field in self'", "= dict( name_template = 'v{_}_|l{}', # see NameFormatter class max_names = 2, name_name_sep", "Search for regular expression in the fields of each entry. If field is", "try: dict.__delitem__(self, key) except KeyError: pass try: self._fields.remove(key) except ValueError: pass def set_entry_type(self,", "on line %d:\" % lineno + \" \" + getString(subtags[1], buffer)) def search_entries(self,", "from names when making label \"\"\" from .bibstyles.shared import NameFormatter from string import", "avoid duplicates (by adding a,b,c,d... etc) - style : str The format of", "= entry.get('crossref', None) if isinstance(crossref, str): crossref = self.get_entry_by_citekey(crossref) if crossref: entry['crossref'] =", "a string elif key == 'journal': if val.isalpha() and val.islower(): #:TODO: allow punctuation!!", "key in (\"year\",\"number\",\"volume\",\"chapter\"): try: addbraces = not int(val) except: pass if '@' in", "style[entry_type] except KeyError: label_template = style['default_type'] name_template = style['name_template'] max_names = style['max_names'] name_name_sep", "# Data storage for bibtex file # ---------------------------------------------------------- class BibFile( DispatchProcessor ): \"\"\"Stores", "TODO above) BUT do not test 'field in self' bc want test #for", "FUNCTIONS: for parsing, must provide a function for each production name. \"\"\" def", "= not int(val) except: pass if '@' in val: # need to protect", "be rst compatible # citekey_label_style2 = dict( # name_first = 'l{_}', # name_other", ":note: CAN test 'field in self' (even though an entry will not raise", "= BibFile() # bibgrammar.Parse(src, bfile) # for entry in bfile.entries : # print", "BibFile( DispatchProcessor ): \"\"\"Stores parsed bibtex file. Access entries by key. :note: a", "# max_names = 2, # use_max_names = False, # name_name_sep = ('.','.'), #", "): \"\"\"Return lookup on name or name if not in map.\"\"\" (tag,start,stop,subtags) =", "b or c or d... to year sfx = ''; c = 1", "= entry_formatter.pick_raw_names(self,try_fields) return bibname.BibName(raw_names,from_field=field) #names are in a BibName object def format_with(self, entry_formatter):", "in entry else None. If field is omitted, search is through all fields.", "name :type `names_formatter`: NamesFormatter :note: called by CitationManager in format_citation :note: 2006-08-08 no", "to do it return entry_formatter.format_entry(self) # A default label style for citekeys created", "# ) def make_citekey(self, used_citekeys = [], style = citekey_label_style1): \"\"\"Create and return", "of 'v_|l' last names, which can possibly have multiple # tokens (e.g., two", "tuple4 return self._macroMap.get(buffer[start:stop],buffer[start:stop]) def field(self, tuple4, buffer ): \"\"\"Process a bibentry field and", "simply *store* .bib file parts (a list of entries and a macro map)", "+= 1 result = label_template%format_dict return result # ---------------------------------------------------------- # Bibfile # -------", "names when making label \"\"\" from .bibstyles.shared import NameFormatter from string import ascii_lowercase", "#:note: entry_type stored as lowercase def get_entry_type(self): return self[\"entry_type\"] entry_type = property(get_entry_type, set_entry_type,", "return BibName instance and not set _names :note: self returns None if field", "re.MULTILINE) else: #->must have a compiled regular expression reo = string_or_compiled \"\"\" Find", "needed, append suffix b or c or d... to year sfx = '';", "int(val) except: pass if '@' in val: # need to protect '@' addquotes", "keys so that the function can avoid duplicates (by adding a,b,c,d... etc) -", "string) #keep string if stuck with it if isinstance(names,str): result = names else:", "The format of the citetekey is determined by a `label_style` (see below) :Returns:", "in self' bc want test #for empty fields below elif self[field]: found =", "result = MONTH_DICT[result] return result def __delitem__(self,key) : key = key.lower() try: dict.__delitem__(self,", "add macros to macro map\"\"\" (tag,start,stop,subtags) = tuple4 name, str = dispatch(self, subtags[0],", "keys. :Parameters: - used_citekeys : list a list of the already taken citation", "re.compile(string_or_compiled, re.MULTILINE) else: #->must have a compiled regular expression reo = string_or_compiled \"\"\"", "lowercase def get_entry_type(self): return self[\"entry_type\"] entry_type = property(get_entry_type, set_entry_type, None, \"property: 'entry_type'\") def", "discard: result = [pair[1] for pair in temp if pair[1]] else: #keep None", "class max_names = 2, name_name_sep = \"+\", etal = 'etal', anonymous = 'anon',", "entry['crossref'] = crossref return result def get_entry_by_citekey(self, citekey): \"\"\"Return entry or None.\"\"\" for", "#for empty fields below elif self[field]: found = reo.search( self[field] ) else: if", "tuple4 the_type = getString(subtags[0], buffer) lineno = lines(0,start,buffer)+1 if the_type.upper() != 'PREAMBLE' :", "string representation of entry \"\"\" stringrep = '@%s{%s,\\n' % (self.entry_type.upper() , self.citekey) try:", "name_template = 'v{_}_|l{}', # \"van_der_Meer\" or \"van_DerStadt\" max_names = 2, name_name_sep = '+',", "hashed together strings return (dispatch(self, subtags[0], buffer), str) def entry( self, tuple4, buffer", "(and it asks a NamesFormatter to do it) result = names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names result", "mlen = 0 bibfile_logger.warn(\"Entry apparently has no fields.\") field_list = [] for key", "= re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #->must have", "\"\"\" bibfile_logger.debug(\"BibEntry.format_names: arg is:\"+str(names_formatter)) names = self.get_names() #get a BibName instance (or possibly,", "get_entrylist(self, citekeys, discard=True): \"\"\"Return list, the BibEntry instances that were found (and None", "# first max_names names included, then etal citekey_label_style1 = dict( name_template = 'v{_}_|l{}',", "months_en] monthmacros_en = [m[:3] for m in monthslower_en] MONTH_DICT = dict( zip(monthmacros_en, months_en)", "v self.entries.append(entry) def macro( self, tuple4, buffer ): \"\"\"Process a macro entry and", "create consistent citation keys that are easy to type and guess and that", "key.lower() try: dict.__delitem__(self, key) except KeyError: pass try: self._fields.remove(key) except ValueError: pass def", "(see LICENSE) :requires: Python 2.4+ :TODO: make this framework more general, perhaps along", "#i.e., assume it is a macro elif key == 'month': # always use", "(dispatch(self, subtags[0], buffer), str) def entry( self, tuple4, buffer ): \"\"\"Process the bibentry", "tokens (e.g., two piece last names) ls = [name_formatter.format_name(name_dict) for name_dict in names_dicts]", "not: could be a regular entry with no key lineno = lines(0, start,", "= None return found def format_names(self, names_formatter): \"\"\"return formatted BibName-object if possible else", "(tag,start,stop,subtags) = tuple4 return self._macroMap.get(buffer[start:stop],buffer[start:stop]) def field(self, tuple4, buffer ): \"\"\"Process a bibentry", "= val def get_citekey(self): return self[\"citekey\"] citekey = property(get_citekey,set_citekey,None,\"property: 'citekey'\") def get_fields(self): return", "'' instead of None except KeyError: crossref = self.get('crossref', '') if isinstance(crossref, self.__class__):", "BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = dispatch(self, subtags[1], buffer) for field", "btparse library in `btOOL <http://www.gerg.ca/software/btOOL/>`_ :TODO: add support for bibtexparser (BSD) https://github.com/sciunto-org/python-bibtexparser \"\"\"", "subtags[2][3] : #bibfile_logger.debug(\"entry: ready to add field: \"+str(dispatch(self, field, buffer))) k,v = dispatch(self,", "in self' (even though an entry will not raise #KeyError! see TODO above)", "= \"%(names)s-%(year)s\", ) #style2 shd be rst compatible # citekey_label_style2 = dict( #", "reo = re.compile(string_or_compiled, re.MULTILINE) else: #must have a compiled regular expression reo =", "self[\"citekey\"] = val def get_citekey(self): return self[\"citekey\"] citekey = property(get_citekey,set_citekey,None,\"property: 'citekey'\") def get_fields(self):", "isinstance(string_or_compiled, str): if ignore_case: reo = re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else: reo =", "field, buffer) self._macroMap[name] = str \"\"\" self._macroMap[name] = str def preamble( self, tuple4,", "if entry_formatter is None: if not try_fields: try_fields = ['author','editor','organization'] return self.make_names(entry_formatter, try_fields=try_fields)", "DispatchProcessor, getString, lines #bibstuff imports # from . import bibgrammar ##################################################################### ############### GLOBAL", "field is omitted, search is through all fields. :note: used by bibsearch.py :Parameters:", "do it) result = names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names result = \"+str(result)) return result def get_names(self,", "article = \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\") :TODO:", "t, buffer) # concatenate hashed together strings return (dispatch(self, subtags[0], buffer), str) def", "(BibName-object if possible else string) (from \"raw\" names). :change: 2006-08-02 altered to return", "EntryFormatter instance to provide style information - `try_fields`: list of field names to", "in entry\\n%s.\\n.\"%(self,field)) found = None return found def format_names(self, names_formatter): \"\"\"return formatted BibName-object", "getString(subtags[1], buffer))) else : bibfile_logger.info(\"Comment entry on line %d:\" % lineno + \"", "of 'type' since BibTeX allows a 'type' field \"\"\" def __init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs) self._fields", "set_fields(self, lst): self._fields = lst fields = property(get_fields, set_fields, None, \"property: 'fields'\") def", "name_template = 'v{_}_|l{}', # see NameFormatter class max_names = 2, name_name_sep = \"+\",", "dict.__getitem__(self, field) #:TODO: rethink this decision (but it is used for formatting) #:note:", "framework more general, perhaps along the lines of the btparse library in `btOOL", ": key = key.lower() try: dict.__delitem__(self, key) except KeyError: pass try: self._fields.remove(key) except", "\"\"\" :mod:`bibstuff.bibfile`: High level BibTeX file interface --------------------------------------------------------- Provides two classes, BibFile and", "field in try_fields: raw_names = self[field] if raw_names: break else: raw_names, field =", "tuple4, buffer ): \"\"\"Process the bibentry and its children. \"\"\" (tag,start,stop,subtags) = tuple4", "# we can add a dummy key and treat this entry as a", "k to lowercase entry[k] = v self.entries.append(entry) bibfile_logger.warning(\"Dummy key added to entry at", "# print entry # else : # print \"self test usage: bibfile.py DATABASE_FILE\"", "jrnl = jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl'] = jrnl # short form, no spaces # make", "'v{_}_|l{}', # see NameFormatter class max_names = 2, name_name_sep = \"+\", etal =", "\"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\", ) #style2 shd be rst compatible # citekey_label_style2 =", "in names_dicts] if len(ls) > max_names: ls = ls[:max_names] + [etal] names =", "for key in self._fields: addbraces = True addquotes = False #spacer = '", "string if stuck with it if isinstance(names,str): result = names else: #assume a", "bibfile_logger.warning(\"Entry at line %d has macro syntax, but entry_type is %s\" % (lineno", "really a macro entry for field in subtags[1][3]: name, str = dispatch(self, field,", "val[:3].lower() addbraces = False elif key in (\"year\",\"number\",\"volume\",\"chapter\"): try: addbraces = not int(val)", "# name_name_sep = ('.','.'), # etal = '', # lower_name = True, #", "== 'journal': if val.isalpha() and val.islower(): #:TODO: allow punctuation!! addbraces = False #i.e.,", "name, str = dispatch(self, subtags[0], buffer) \"\"\" the_type = getString(subtags[0], buffer) if the_type.upper()", "1 if the_type.upper() != 'COMMENT' : bibfile_logger.warning(\"\"\"Entry at line %d has comment syntax", "for key_str in self._fields ) # for pretty format except ValueError: #no fields", "= names.lower() format_dict['names'] = names year = self['year'] or '????' format_dict['year'] = year", "all fields) \"\"\" if isinstance(string_or_compiled, str): if ignore_case: reo = re.compile(string_or_compiled, re.MULTILINE |", "def search_fields(self, string_or_compiled, field='', ignore_case=True): \"\"\"Find regular expression in entry. Return MatchObject if", "a dummy key and treat this entry as a regular entry entry =", "field in subtags[1][3]: name, str = dispatch(self, field, buffer) self._macroMap[name] = str \"\"\"", "'citekey'\") def get_fields(self): return self._fields def set_fields(self, lst): self._fields = lst fields =", "entry's citekey\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) # macro name def name(self,", "library in `btOOL <http://www.gerg.ca/software/btOOL/>`_ :TODO: add support for bibtexparser (BSD) https://github.com/sciunto-org/python-bibtexparser \"\"\" __docformat__", ":note: a BibFile object should simply *store* .bib file parts (a list of", "= dict( name_template = 'v{_}_|l{}', # \"van_der_Meer\" or \"van_DerStadt\" max_names = 2, name_name_sep", "if isinstance(crossref, self.__class__): result = crossref[field] else: result = '' #:note: 20080331 add", "= names year = self['year'] or '????' format_dict['year'] = year if entry_type ==", "macros if possible if val.lower() in monthslower_en + monthmacros_en: val = val[:3].lower() addbraces", "sets `self._names` \"\"\" if entry_formatter is None: if not try_fields: try_fields = ['author','editor','organization']", ":Parameters: `string_or_compiled` : string to compile or compiled regex pattern for searching `field`", "is used in turn by bibsearch.py :Parameters: `string_or_compiled` : string to compile or", "['author','editor','organization'] return self.make_names(entry_formatter, try_fields=try_fields) def make_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else", "names to try sequentially; none empty filed -> name \"\"\" # importing bibname", "self.entries.append(entry) def macro( self, tuple4, buffer ): \"\"\"Process a macro entry and add", "#:note: 20080331 changed KeyError to return '' instead of None except KeyError: crossref", "crossref: entry['crossref'] = crossref return result def get_entry_by_citekey(self, citekey): \"\"\"Return entry or None.\"\"\"", "buffer) for field in subtags[2][3] : #bibfile_logger.debug(\"entry: ready to add field: \"+str(dispatch(self, field,", "pattern for searching `field` : string field to search in self (default: search", "# first, make names name_formatter = NameFormatter(template = name_template) names_dicts = self.get_names().get_names_dicts() #", "re.compile(string_or_compiled, re.MULTILINE) else: #must have a compiled regular expression reo = string_or_compiled if", "longer sets `self._names` \"\"\" if entry_formatter is None: if not try_fields: try_fields =", "by BibFile's find_re method, which is used in turn by bibsearch.py :Parameters: `string_or_compiled`", "and its children. \"\"\" (tag,start,stop,subtags) = tuple4 entry = BibEntry() entry.entry_type = dispatch(self,", "__docformat__ = \"restructuredtext en\" __authors__ = [\"<NAME>\", \"<NAME>\"] __version__ = '1.13' __needs__ =", "trailing markers\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start+1:stop-1] def number(self, tuple4, buffer ): \"\"\"return", "- `field` : string field to search in self (default: search all fields)", "else: result = '' #:note: 20080331 add handling of month macros if field", "entry_formatter is None: for field in try_fields: raw_names = self[field] if raw_names: break", "it is really a macro entry for field in subtags[1][3]: name, str =", "function for each production name. \"\"\" def string(self, tuple4, buffer ): \"\"\"Return a", "avoid recursive import from bibstuff import bibname #ai: shd move all bibname into", "self.citekey) try: mlen = max( len(key_str) for key_str in self._fields ) # for", "names included, then etal citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # \"van_der_Meer\" or", "- `entry_formatter`: EntryFormatter instance to provide style information - `try_fields`: list of field", "add field: \"+str(dispatch(self, field, buffer))) k,v = dispatch(self, field, buffer) #:note: entry will", "in the fields of each entry. If field is omitted, search is through", "0 bibfile_logger.warn(\"Entry apparently has no fields.\") field_list = [] for key in self._fields:", "compile or compiled regex pattern for searching `field` : string field to search", "be a regular entry with no key lineno = lines(0, start, buffer)+1 bibfile_logger.warning(\"Entry", "#handle crossref if key == 'crossref': try: val = val['citekey'] #might be an", "BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = 'KEY' # dummy key --", "an entry *field*. (Recall 'citekey' holds the entry id.)\") try: result = dict.__getitem__(self,", "\"\"\" the_type = getString(subtags[0], buffer) if the_type.upper() != 'STRING' : # it looks", "= tuple4 the_type = getString(subtags[0], buffer) lineno = spdp.lines(0, start, buffer) + 1", "en\" __authors__ = [\"<NAME>\", \"<NAME>\"] __version__ = '1.13' __needs__ = '2.4' # options:", "else: #assume a BibName instance #ask BibName instance to format itself (and it", ") else: if field in self: bibfile_logger.info(\"Empty field %s in entry\\n%s.\\n.\"%(self,field)) found =", "of field names to try sequentially; none empty filed -> name \"\"\" #", "CitationManager in format_citation :note: 2006-08-08 no longer sets a `_names` attribute :TODO: add", "of 'key' since BibTeX allows a 'key' field :note: 2008-03-29 'entry_type' instead of", "the parts of a bibtex database. BibFile inherits from ``simpleparse.dispatchprocessor``. To fill a", "# for pretty format except ValueError: #no fields (not a true entry) mlen", "of a bibtex database. BibFile inherits from ``simpleparse.dispatchprocessor``. To fill a BibFile instance,", "= [m[:3] for m in monthslower_en] MONTH_DICT = dict( zip(monthmacros_en, months_en) ) #####################################################################", "names else: #assume a BibName instance #ask BibName instance to format itself (and", "interface --------------------------------------------------------- Provides two classes, BibFile and BibEntry for accessing the parts of", "= True addquotes = False #spacer = ' '*(mlen - len(key) ) val", ":note: 2006-08-10 use 'citekey' instead of 'key' since BibTeX allows a 'key' field", "make this framework more general, perhaps along the lines of the btparse library", "= max( len(key_str) for key_str in self._fields ) # for pretty format except", "'entry_type' instead of 'type' since BibTeX allows a 'type' field \"\"\" def __init__(self,*args,**kwargs):", "else None. If field is omitted, search is through all fields. :note: used", "change to `make_names`, no longer sets `self._names` \"\"\" if entry_formatter is None: if", "__init__(self) : self.entries = [] self._macroMap = {} def get_entrylist(self, citekeys, discard=True): \"\"\"Return", "#field is usually a BibTeX field but can be a citekey field =", "MONTH_DICT[result] return result def __delitem__(self,key) : key = key.lower() try: dict.__delitem__(self, key) except", "= self.get_names().get_names_dicts() # make list of 'v_|l' last names, which can possibly have", "dispatchprocessor as spdp from simpleparse.dispatchprocessor import dispatch, DispatchProcessor, getString, lines #bibstuff imports #", "tuple4 return getString((tag,start,stop,subtags), buffer) # macro name def name(self, tuple4, buffer ): \"\"\"Return", "= dispatch(self, field, buffer) self._macroMap[name] = str \"\"\" self._macroMap[name] = str def preamble(", "empty fields below elif self[field]: found = reo.search( self[field] ) else: if field", "altered to return BibName instance and not set _names :note: self returns None", "entry) mlen = 0 bibfile_logger.warn(\"Entry apparently has no fields.\") field_list = [] for", "-- or should we be strict? for field in subtags[1][3] : k,v =", "ascii_lowercase format_dict = {} entry_type = self.entry_type.lower() try: label_template = style[entry_type] except KeyError:", "entry. If field is omitted, search is through all fields. :note: used by", "' '*(mlen - len(key) ) val = self[key] #handle crossref if key ==", "\"\"\"return formatted BibName-object if possible else raw name :type `names_formatter`: NamesFormatter :note: called", ") # for pretty format except ValueError: #no fields (not a true entry)", "sets a `_names` attribute :TODO: add default name_template useful for .bib files? \"\"\"", "we be strict? for field in subtags[1][3] : k,v = dispatch(self, field, buffer)", "if the_type.upper() != 'STRING' : # it looks like a macro, but is", "__strict__ = False allows empty citekeys __strict__ = False # should we be", "import bibgrammar ##################################################################### ############### GLOBAL VARIABLES ################################## months_en = ('January','February','March','April','May','June', 'July','August','September','October','November','December') monthslower_en =", "citekeys __strict__ = False # should we be strict with bibtex format? #######################", "value.\"\"\" (tag,start,stop,subtags) = tuple4 str = '' for t in subtags[1][3]: if(t) :", "# usage: bibfile.py DATABASE_FILE # if __name__ == \"__main__\": # import sys #", "isinstance(crossref, str): crossref = self.get_entry_by_citekey(crossref) if crossref: entry['crossref'] = crossref return result def", "to lowercase entry[k] = v self.entries.append(entry) bibfile_logger.warning(\"Dummy key added to entry at line", "self[\"entry_type\"] = val.lower() #:note: entry_type stored as lowercase def get_entry_type(self): return self[\"entry_type\"] entry_type", "filed -> name \"\"\" # importing bibname here to avoid recursive import from", "added to entry at line %d\" % lineno) else : # otherwise it", "field == \"key\": bibfile_logger.info( \"Seeking 'key' as an entry *field*. (Recall 'citekey' holds", "creating predictable and useful citekey (labels) for BibEntry objects. This is not integrated", "= %s\" % (mlen, key, val)) stringrep += \",\\n\".join(field_list) stringrep += '\\n}\\n' return", "style = citekey_label_style1): \"\"\"Create and return a new citekey based on the entry's", "field='', ignore_case=True): \"\"\"Find regular expression in entry. Return MatchObject if string_or_compiled found in", "# citekey_label_style2 = dict( # name_first = 'l{_}', # name_other = 'l{_}', #", "at line %d has preamble syntax but entry_type is %s\" % (lineno,the_type)) else", "\"+\", etal = 'etal', anonymous = 'anon', lower_name = False, article = \"%(names)s-%(year)s\",", "= '2.4' # options: # __strict__ = False allows empty citekeys __strict__ =", "through all fields. :note: used by bibsearch.py \"\"\" ls = [entry for entry", "#might be an entry except TypeError: pass #->must be a string elif key", "lineno) else : # otherwise it is really a macro entry for field", "--------------------------------------------------------- Provides two classes, BibFile and BibEntry for accessing the parts of a", "= tuple4 str = '' for t in subtags[1][3]: if(t) : str +=", "else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #must have a compiled regular expression reo", "like a macro, but is not: could be a regular entry with no", "the_type = getString(subtags[0], buffer) lineno = lines(0,start,buffer)+1 if the_type.upper() != 'PREAMBLE' : bibfile_logger.warning(\"Entry", "in self._fields: addbraces = True addquotes = False #spacer = ' '*(mlen -", "return result # ---------------------------------------------------------- # Bibfile # ------- # Data storage for bibtex", "To fill a BibFile instance, bfi, call bibgrammar.Parse(src, bfi). :copyright: <NAME> and <NAME>,", "string to compile or compiled regex pattern for searching `field` : string field", "(via bibname) :TODO: return BibName instance for each available name field?? :Parameters: -", "for formatting) #:note: 20080331 changed KeyError to return '' instead of None except", "self[\"entry_type\"] entry_type = property(get_entry_type, set_entry_type, None, \"property: 'entry_type'\") def set_citekey(self, val): self[\"citekey\"] =", "set _names :note: self returns None if field missing (-> no KeyError) :note:", "'@' in val: # need to protect '@' addquotes = True if addquotes:", "each available name field?? :Parameters: - `entry_formatter`: EntryFormatter instance to provide style information", "entry_type == \"article\": jrnl = self['journal'] jrnl = ''.join(jrnl.split()).lower() # keep macro jrnl", ") #style2 shd be rst compatible # citekey_label_style2 = dict( # name_first =", "macro elif key == 'month': # always use month macros if possible if", "entry else None. If field is omitted, search is through all fields. :note:", "it return entry_formatter.format_entry(self) # A default label style for citekeys created by make_citekey()", "= tuple4 return getString((tag,start,stop,subtags), buffer) def citekey( self, tuple4, buffer ): \"\"\"Return the", "= '', # lower_name = True, # anonymous = 'anon', # article =", "\"\"\" if entry_formatter is None: if not try_fields: try_fields = ['author','editor','organization'] return self.make_names(entry_formatter,", "val.islower(): #:TODO: allow punctuation!! addbraces = False #i.e., assume it is a macro", "buffer) lineno = lines(0,start,buffer)+1 if the_type.upper() != 'PREAMBLE' : bibfile_logger.warning(\"Entry at line %d", "is %s\" % (lineno,the_type)) else : bibfile_logger.warning(\"Preamble entry on line %d:\" % lineno", "list result = [pair[1] for pair in temp] #attach cross references for entry", "except ValueError: pass def set_entry_type(self, val): self[\"entry_type\"] = val.lower() #:note: entry_type stored as", "fields are stored in the order added. :note: 2006-08-10 use 'citekey' instead of", "move all bibname into here? possibly if entry_formatter is None: for field in", "getString((tag,start,stop,subtags), buffer) # macro name def name(self, tuple4, buffer ): \"\"\"Return lookup on", "= [entry for entry in self.entries if entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)] return ls #", "None, \"property: 'entry_type'\") def set_citekey(self, val): self[\"citekey\"] = val def get_citekey(self): return self[\"citekey\"]", "try: result = dict.__getitem__(self, field) #:TODO: rethink this decision (but it is used", "preamble( self, tuple4, buffer ): \"\"\"Process the given production and it's children\"\"\" (tag,start,stop,subtags)", "etal citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # \"van_der_Meer\" or \"van_DerStadt\" max_names =", "except KeyError: crossref = self.get('crossref', '') if isinstance(crossref, self.__class__): result = crossref[field] else:", "for m in months_en] monthmacros_en = [m[:3] for m in monthslower_en] MONTH_DICT =", "self[\"citekey\"] citekey = property(get_citekey,set_citekey,None,\"property: 'citekey'\") def get_fields(self): return self._fields def set_fields(self, lst): self._fields", "not field: #->try all fields (but not citekey) for f in self.get_fields(): found", "and return a new citekey based on the entry's data. This is for", "A default label style for citekeys created by make_citekey() # first max_names names", "\"\"\" # importing bibname here to avoid recursive import from bibstuff import bibname", "more fields # :note: CAN test 'field in self' (even though an entry", "need to protect '@' addquotes = True if addquotes: val = '\"' +", "''; c = 1 # while result+sfx in used_citekeys: while label_template%format_dict in used_citekeys:", "name(self, tuple4, buffer ): \"\"\"Return lookup on name or name if not in", "\"\"\"Process the given production and it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0],", "'v_|l' last names, which can possibly have multiple # tokens (e.g., two piece", "BibEntry(dict): \"\"\" Stores a single bibliographic entry. Provides a dictionary interface to the", "entries not found, unless discarded). \"\"\" if not citekeys: bibfile_logger.warning(\"get_entrylist: No keys provided;", "None: if not try_fields: try_fields = ['author','editor','organization'] return self.make_names(entry_formatter, try_fields=try_fields) def make_names(self, entry_formatter=None,", "the entry type\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) def citekey( self, tuple4,", "__getitem__(self, field): #field is usually a BibTeX field but can be a citekey", "__needs__ = '2.4' # options: # __strict__ = False allows empty citekeys __strict__", "== citekey: return entry \"\"\"PRODUCTION FUNCTIONS: for parsing, must provide a function for", "jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl'] = jrnl # short form, no spaces # make unique result:", "recursive import from bibstuff import bibname #ai: shd move all bibname into here?", "#:TODO: rethink this decision (but it is used for formatting) #:note: 20080331 changed", "ls = [entry for entry in self.entries if entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)] return ls", "a BibFile instance, bfi, call bibgrammar.Parse(src, bfi). :copyright: <NAME> and <NAME>, see AUTHORS", "level BibTeX file interface --------------------------------------------------------- Provides two classes, BibFile and BibEntry for accessing", "two classes, BibFile and BibEntry for accessing the parts of a bibtex database.", "def number(self, tuple4, buffer ): \"\"\"return a number as a string\"\"\" (tag,start,stop,subtags) =", "# bibgrammar.Parse(src, bfile) # for entry in bfile.entries : # print entry #", "names name_formatter = NameFormatter(template = name_template) names_dicts = self.get_names().get_names_dicts() # make list of", "found: break # no need to check more fields # :note: CAN test", "for citekeys created by make_citekey() # first max_names names included, then etal citekey_label_style1", "in self.entries: if entry.citekey == citekey: return entry \"\"\"PRODUCTION FUNCTIONS: for parsing, must", "subtags[0], buffer), str) def entry( self, tuple4, buffer ): \"\"\"Process the bibentry and", "bibname #ai: shd move all bibname into here? possibly if entry_formatter is None:", "it if isinstance(names,str): result = names else: #assume a BibName instance #ask BibName", "else: #must have a compiled regular expression reo = string_or_compiled if not field:", "anonymous = 'anon', lower_name = False, article = \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc", "style['name_name_sep'] lower_name = style['lower_name'] etal = style['etal'] # first, make names name_formatter =", "# distinguish case format_dict['year'] = year+sfx c += 1 result = label_template%format_dict return", "etal = 'etal', anonymous = 'anon', lower_name = False, article = \"%(names)s-%(year)s\", book", "adding a,b,c,d... etc) - style : str The format of the citetekey is", "KeyError: label_template = style['default_type'] name_template = style['name_template'] max_names = style['max_names'] name_name_sep = style['name_name_sep']", "a macro entry for field in subtags[1][3]: name, str = dispatch(self, field, buffer)", "Return MatchObject if string_or_compiled found in entry else None. If field is omitted,", "else string) :note: 2006-08-09 matching change to `make_names`, no longer sets `self._names` \"\"\"", "'@%s{%s,\\n' % (self.entry_type.upper() , self.citekey) try: mlen = max( len(key_str) for key_str in", "addquotes = False #spacer = ' '*(mlen - len(key) ) val = self[key]", "[] temp = [ (key,self.get_entry_by_citekey(key)) for key in citekeys ] bad_keys = [pair[0]", "*store* .bib file parts (a list of entries and a macro map) and", "bibfile_logger.debug(\"BibEntry.format_names: arg is:\"+str(names_formatter)) names = self.get_names() #get a BibName instance (or possibly, a", "dict( name_template = 'v{_}_|l{}', # \"van_der_Meer\" or \"van_DerStadt\" max_names = 2, name_name_sep =", "different purpose. This is to create consistent citation keys that are easy to", "'\\n}\\n' return stringrep def __setitem__(self, key, val): key = key.lower() dict.__setitem__(self, key, val)", "= property(get_entry_type, set_entry_type, None, \"property: 'entry_type'\") def set_citekey(self, val): self[\"citekey\"] = val def", "BibFile() # bibgrammar.Parse(src, bfile) # for entry in bfile.entries : # print entry", "the already taken citation keys so that the function can avoid duplicates (by", ": # src = open(sys.argv[1]).read() # bfile = BibFile() # bibgrammar.Parse(src, bfile) #", "entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = 'KEY' # dummy key -- or", "= self[key] #handle crossref if key == 'crossref': try: val = val['citekey'] #might", "# make list of 'v_|l' last names, which can possibly have multiple #", "BibTeX field but can be a citekey field = field.lower() if field ==", ":requires: Python 2.4+ :TODO: make this framework more general, perhaps along the lines", "looks like a macro, but is not: could be a regular entry with", "used by bibsearch.py \"\"\" ls = [entry for entry in self.entries if entry.search_fields(string_or_compiled=reo,", "make_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string) (from \"raw\" names). :change:", "otherwise it is really a macro entry for field in subtags[1][3]: name, str", "all fields. :note: used by BibFile's find_re method, which is used in turn", "children\"\"\" (tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0], buffer) lineno = lines(0,start,buffer)+1 if the_type.upper()", "fill a BibFile instance, bfi, call bibgrammar.Parse(src, bfi). :copyright: <NAME> and <NAME>, see", "lineno = lines(0,start,buffer)+1 if the_type.upper() != 'PREAMBLE' : bibfile_logger.warning(\"Entry at line %d has", "of name, value.\"\"\" (tag,start,stop,subtags) = tuple4 str = '' for t in subtags[1][3]:", "be strict? for field in subtags[1][3] : k,v = dispatch(self, field, buffer) #:note:", "= lst fields = property(get_fields, set_fields, None, \"property: 'fields'\") def search_fields(self, string_or_compiled, field='',", "self.entries: if entry.citekey == citekey: return entry \"\"\"PRODUCTION FUNCTIONS: for parsing, must provide", "isinstance(crossref, self.__class__): result = crossref[field] else: result = '' #:note: 20080331 add handling", "# import sys # if len(sys.argv) > 1 : # src = open(sys.argv[1]).read()", "for field in subtags[1][3]: name, str = dispatch(self, field, buffer) self._macroMap[name] = str", "macro jrnl = jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl'] = jrnl # short form, no spaces #", "Find regex in bib_entry. If field is omitted, search is through all fields.", "from ``simpleparse.dispatchprocessor``. To fill a BibFile instance, bfi, call bibgrammar.Parse(src, bfi). :copyright: <NAME>", "names.lower() format_dict['names'] = names year = self['year'] or '????' format_dict['year'] = year if", "return buffer[start+1:stop-1] def number(self, tuple4, buffer ): \"\"\"return a number as a string\"\"\"", "tuple4, buffer ): \"\"\"Process a macro entry and add macros to macro map\"\"\"", "key.lower() dict.__setitem__(self, key, val) if key == \"key\": bibfile_logger.info( \"Setting 'key' as an", "line %d has macro syntax, but entry_type is %s\" % (lineno , the_type))", "allows empty citekeys __strict__ = False # should we be strict with bibtex", "decision (but it is used for formatting) #:note: 20080331 changed KeyError to return", "val = self[key] #handle crossref if key == 'crossref': try: val = val['citekey']", "{} entry_type = self.entry_type.lower() try: label_template = style[entry_type] except KeyError: label_template = style['default_type']", "a list of the already taken citation keys so that the function can", "regex in bib_entry. If field is omitted, search is through all fields. :note:", "entry_type stored as lowercase def get_entry_type(self): return self[\"entry_type\"] entry_type = property(get_entry_type, set_entry_type, None,", "\"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\") :TODO: Strip LaTeX accent characters from names when making", "= year+sfx c += 1 result = label_template%format_dict return result # ---------------------------------------------------------- #", "raw_names = self[field] if raw_names: break else: raw_names, field = entry_formatter.pick_raw_names(self,try_fields) return bibname.BibName(raw_names,from_field=field)", "\"\"\"Find regular expression in entry. Return MatchObject if string_or_compiled found in entry else", "lst fields = property(get_fields, set_fields, None, \"property: 'fields'\") def search_fields(self, string_or_compiled, field='', ignore_case=True):", "names year = self['year'] or '????' format_dict['year'] = year if entry_type == \"article\":", "[ (key,self.get_entry_by_citekey(key)) for key in citekeys ] bad_keys = [pair[0] for pair in", "arg is:\"+str(names_formatter)) names = self.get_names() #get a BibName instance (or possibly, a string)", "in try_fields: raw_names = self[field] if raw_names: break else: raw_names, field = entry_formatter.pick_raw_names(self,try_fields)", "if entry_type == \"article\": jrnl = self['journal'] jrnl = ''.join(jrnl.split()).lower() # keep macro", "entry.citekey = 'KEY' # dummy key -- or should we be strict? for", "= citekey_label_style1): \"\"\"Create and return a new citekey based on the entry's data.", "imports # from . import bibgrammar ##################################################################### ############### GLOBAL VARIABLES ################################## months_en =", "property(get_fields, set_fields, None, \"property: 'fields'\") def search_fields(self, string_or_compiled, field='', ignore_case=True): \"\"\"Find regular expression", "for pair in temp if pair[1]] else: #keep None when occurs in entry", "2008-03-29 'entry_type' instead of 'type' since BibTeX allows a 'type' field \"\"\" def", "keys:\\n\"+\"\\n\".join(bad_keys)) if discard: result = [pair[1] for pair in temp if pair[1]] else:", "\"\"\"Process a macro entry and add macros to macro map\"\"\" (tag,start,stop,subtags) = tuple4", "lines of the btparse library in `btOOL <http://www.gerg.ca/software/btOOL/>`_ :TODO: add support for bibtexparser", "= property(get_fields, set_fields, None, \"property: 'fields'\") def search_fields(self, string_or_compiled, field='', ignore_case=True): \"\"\"Find regular", "style : str The format of the citetekey is determined by a `label_style`", "from simpleparse.dispatchprocessor import dispatch, DispatchProcessor, getString, lines #bibstuff imports # from . import", "citetekey is determined by a `label_style` (see below) :Returns: string the citation key", "a,b,c,d... etc) - style : str The format of the citetekey is determined", "= [] def __repr__(self): \"\"\"return string representation of entry \"\"\" stringrep = '@%s{%s,\\n'", "suffix b or c or d... to year sfx = ''; c =", "in temp if not pair[1]] if bad_keys and discard: bibfile_logger.warning(\"Database entries not found", ": # it looks like a macro, but is not: could be a", "a BibName object def format_with(self, entry_formatter): bibfile_logger.debug(\"BibEntry.format_with: arg is:\"+str(entry_formatter)) #ask the EntryFormatter to", "+= dispatch(self, t, buffer) # concatenate hashed together strings return (dispatch(self, subtags[0], buffer),", "leading and trailing markers\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start+1:stop-1] def number(self, tuple4, buffer", "string) :note: 2006-08-09 matching change to `make_names`, no longer sets `self._names` \"\"\" if", "val.lower() in monthslower_en + monthmacros_en: val = val[:3].lower() addbraces = False elif key", "entry will force k to lowercase entry[k] = v self.entries.append(entry) def macro( self,", "crossref = self.get_entry_by_citekey(crossref) if crossref: entry['crossref'] = crossref return result def get_entry_by_citekey(self, citekey):", "below elif self[field]: found = reo.search( self[field] ) else: if field in self:", "make_citekey() # first max_names names included, then etal citekey_label_style1 = dict( name_template =", "is not integrated with the citation styles in bibstuff.bibstyles; but it serves a", "as a regular entry entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey", "book = \"%(names)s-%(year)s\", # misc = \"%(names)s-%(year)s\", # default_type = \"%(names)s-%(year)s\", # )", "fields:: citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # see NameFormatter class max_names =", "= name_name_sep.join(ls) if lower_name: names = names.lower() format_dict['names'] = names year = self['year']", "dict.__setitem__(self, key, val) if key == \"key\": bibfile_logger.info( \"Setting 'key' as an entry", "label \"\"\" from .bibstyles.shared import NameFormatter from string import ascii_lowercase format_dict = {}", "\"\"\" stringrep = '@%s{%s,\\n' % (self.entry_type.upper() , self.citekey) try: mlen = max( len(key_str)", "buffer) def citekey( self, tuple4, buffer ): \"\"\"Return the entry's citekey\"\"\" (tag,start,stop,subtags) =", "instead of 'type' since BibTeX allows a 'type' field \"\"\" def __init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs)", "field missing (-> no KeyError) :note: this method introduces the only dependence on", "files? \"\"\" bibfile_logger.debug(\"BibEntry.format_names: arg is:\"+str(names_formatter)) names = self.get_names() #get a BibName instance (or", "self.entry_type.lower() try: label_template = style[entry_type] except KeyError: label_template = style['default_type'] name_template = style['name_template']", "if len(ls) > max_names: ls = ls[:max_names] + [etal] names = name_name_sep.join(ls) if", "(label) Example: The label style is a dict with the following fields:: citekey_label_style1", "macro entry for field in subtags[1][3]: name, str = dispatch(self, field, buffer) self._macroMap[name]", "fields (not a true entry) mlen = 0 bibfile_logger.warn(\"Entry apparently has no fields.\")", "%s in entry\\n%s.\\n.\"%(self,field)) found = None return found def format_names(self, names_formatter): \"\"\"return formatted", "in self.entries if entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)] return ls # self test # -------------------------", "buffer[start:stop]) def comment_entry(self, tuple4, buffer): \"\"\"Process the given production and it's children\"\"\" (tag,start,stop,subtags)", "buffer ): \"\"\"Process a macro entry and add macros to macro map\"\"\" (tag,start,stop,subtags)", "else raw name :type `names_formatter`: NamesFormatter :note: called by CitationManager in format_citation :note:", "list, the BibEntry instances that were found (and None for entries not found,", "def macro( self, tuple4, buffer ): \"\"\"Process a macro entry and add macros", "used_citekeys = [], style = citekey_label_style1): \"\"\"Create and return a new citekey based", "# article = \"%(names)s-%(year)s-%(jrnl)s\", # book = \"%(names)s-%(year)s\", # misc = \"%(names)s-%(year)s\", #", "a string, stripping leading and trailing markers\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start+1:stop-1] def", "ignore_case=ignore_case)] return ls # self test # ------------------------- # usage: bibfile.py DATABASE_FILE #", "# self test # ------------------------- # usage: bibfile.py DATABASE_FILE # if __name__ ==", "lowercase since # BibTeX does not # distinguish case format_dict['year'] = year+sfx c", "on the entry's data. This is for creating predictable and useful citekey (labels)", "handling of month macros if field == 'month' and result in monthmacros_en: result", "re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #->must have a compiled regular expression", "self, tuple4, buffer ): \"\"\"Return the entry's citekey\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags),", "treat this entry as a regular entry entry = BibEntry() entry.entry_type = dispatch(self,", "if '@' in val: # need to protect '@' addquotes = True if", "since # BibTeX does not # distinguish case format_dict['year'] = year+sfx c +=", "= [pair[0] for pair in temp if not pair[1]] if bad_keys and discard:", "regular expression in the fields of each entry. If field is omitted, search", "2.4+ :TODO: make this framework more general, perhaps along the lines of the", "import NameFormatter from string import ascii_lowercase format_dict = {} entry_type = self.entry_type.lower() try:", "serves a very different purpose. This is to create consistent citation keys that", "BibTeX does not # distinguish case format_dict['year'] = year+sfx c += 1 result", "that the function can avoid duplicates (by adding a,b,c,d... etc) - style :", "buffer))) k,v = dispatch(self, field, buffer) #:note: entry will force k to lowercase", "test 'field in self' bc want test #for empty fields below elif self[field]:", "a BibName instance (or possibly, a string) #keep string if stuck with it", "temp if not pair[1]] if bad_keys and discard: bibfile_logger.warning(\"Database entries not found for", "parts (a list of entries and a macro map) and provide access to", "not citekeys: bibfile_logger.warning(\"get_entrylist: No keys provided; returning empty cited-entry list.\") return [] temp", "month macros if field == 'month' and result in monthmacros_en: result = MONTH_DICT[result]", "(default: search all fields) \"\"\" if isinstance(string_or_compiled, str): if ignore_case: reo = re.compile(string_or_compiled,", "\"+str(result)) return result def get_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string)", "LaTeX accent characters from names when making label \"\"\" from .bibstyles.shared import NameFormatter", "if field == 'month' and result in monthmacros_en: result = MONTH_DICT[result] return result", "#KeyError! see TODO above) BUT do not test 'field in self' bc want", "# see NameFormatter class max_names = 2, name_name_sep = \"+\", etal = 'etal',", "to entry at line %d\" % lineno) else : # otherwise it is", "has comment syntax but entry_type is %s: Details: %s\"\"\" % (lineno, the_type, getString(subtags[1],", "self._fields and key not in [\"citekey\",\"entry_type\"] and val: self._fields.append(key) def __getitem__(self, field): #field", "max_names: ls = ls[:max_names] + [etal] names = name_name_sep.join(ls) if lower_name: names =", "(but it is used for formatting) #:note: 20080331 changed KeyError to return ''", "at line %d has comment syntax but entry_type is %s: Details: %s\"\"\" %", "for searching - `field` : string field to search in self (default: search", "if field == \"key\": bibfile_logger.info( \"Seeking 'key' as an entry *field*. (Recall 'citekey'", "together strings return (dispatch(self, subtags[0], buffer), str) def entry( self, tuple4, buffer ):", ": k,v = dispatch(self, field, buffer) #:note: entry will force k to lowercase", "'field in self' (even though an entry will not raise #KeyError! see TODO", ": str += dispatch(self, t, buffer) # concatenate hashed together strings return (dispatch(self,", "!= 'PREAMBLE' : bibfile_logger.warning(\"Entry at line %d has preamble syntax but entry_type is", "cross references for entry in result: if entry: crossref = entry.get('crossref', None) if", "\"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\", ) #style2 shd", "in bfile.entries : # print entry # else : # print \"self test", "= \"%(names)s-%(year)s-%(jrnl)s\", # book = \"%(names)s-%(year)s\", # misc = \"%(names)s-%(year)s\", # default_type =", "stored as lowercase def get_entry_type(self): return self[\"entry_type\"] entry_type = property(get_entry_type, set_entry_type, None, \"property:", "misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\") :TODO: Strip LaTeX accent characters from names", "if not __strict__: # we can add a dummy key and treat this", "but is not: could be a regular entry with no key lineno =", "to try sequentially; none empty filed -> name \"\"\" # importing bibname here", "except ValueError: #no fields (not a true entry) mlen = 0 bibfile_logger.warn(\"Entry apparently", "want test #for empty fields below elif self[field]: found = reo.search( self[field] )", "try_fields: try_fields = ['author','editor','organization'] return self.make_names(entry_formatter, try_fields=try_fields) def make_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object", "object should simply *store* .bib file parts (a list of entries and a", ". import bibgrammar ##################################################################### ############### GLOBAL VARIABLES ################################## months_en = ('January','February','March','April','May','June', 'July','August','September','October','November','December') monthslower_en", "buffer)) def search_entries(self, string_or_compiled, field='', ignore_case=True): \"\"\"Return list of matching entries. Search for", "the given production and it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0], buffer)", "= crossref return result def get_entry_by_citekey(self, citekey): \"\"\"Return entry or None.\"\"\" for entry", "but entry_type is %s: Details: %s\"\"\" % (lineno, the_type, getString(subtags[1], buffer))) else :", ":note: lowercase since # BibTeX does not # distinguish case format_dict['year'] = year+sfx", "BibFile object should simply *store* .bib file parts (a list of entries and", "tuple4, buffer): \"\"\"Process the given production and it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type", "field but can be a citekey field = field.lower() if field == \"key\":", ":note: self returns None if field missing (-> no KeyError) :note: this method", "d... to year sfx = ''; c = 1 # while result+sfx in", "'v{_}_|l{}', # \"van_der_Meer\" or \"van_DerStadt\" max_names = 2, name_name_sep = '+', etal =", "= {} def get_entrylist(self, citekeys, discard=True): \"\"\"Return list, the BibEntry instances that were", "which can possibly have multiple # tokens (e.g., two piece last names) ls", "dependence on simpleparse (via bibname) :TODO: return BibName instance for each available name", "name_template) names_dicts = self.get_names().get_names_dicts() # make list of 'v_|l' last names, which can", "result in monthmacros_en: result = MONTH_DICT[result] return result def __delitem__(self,key) : key =", "self[field] if raw_names: break else: raw_names, field = entry_formatter.pick_raw_names(self,try_fields) return bibname.BibName(raw_names,from_field=field) #names are", "#names are in a BibName object def format_with(self, entry_formatter): bibfile_logger.debug(\"BibEntry.format_with: arg is:\"+str(entry_formatter)) #ask", "= ('.','.'), # etal = '', # lower_name = True, # anonymous =", "dict.__delitem__(self, key) except KeyError: pass try: self._fields.remove(key) except ValueError: pass def set_entry_type(self, val):", "\"\"\" def __init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs) self._fields = [] def __repr__(self): \"\"\"return string representation of", "\"\"\" if not citekeys: bibfile_logger.warning(\"get_entrylist: No keys provided; returning empty cited-entry list.\") return", "in turn by bibsearch.py :Parameters: `string_or_compiled` : string to compile or compiled regex", "reo = re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #->must", "== \"__main__\": # import sys # if len(sys.argv) > 1 : # src", "self.get_names().get_names_dicts() # make list of 'v_|l' last names, which can possibly have multiple", "lineno = spdp.lines(0, start, buffer) + 1 if the_type.upper() != 'COMMENT' : bibfile_logger.warning(\"\"\"Entry", "a `_names` attribute :TODO: add default name_template useful for .bib files? \"\"\" bibfile_logger.debug(\"BibEntry.format_names:", "the bibentry and its children. \"\"\" (tag,start,stop,subtags) = tuple4 entry = BibEntry() entry.entry_type", "# :note: lowercase since # BibTeX does not # distinguish case format_dict['year'] =", "list of matching entries. Search for regular expression in the fields of each", "Access entries by key. :note: a BibFile object should simply *store* .bib file", "Details: %s\"\"\" % (lineno, the_type, getString(subtags[1], buffer))) else : bibfile_logger.info(\"Comment entry on line", "import dependencies from simpleparse import dispatchprocessor as spdp from simpleparse.dispatchprocessor import dispatch, DispatchProcessor,", "last names) ls = [name_formatter.format_name(name_dict) for name_dict in names_dicts] if len(ls) > max_names:", "be an entry except TypeError: pass #->must be a string elif key ==", "type and guess and that are valid BibTeX citation keys. :Parameters: - used_citekeys", "addquotes: val = '\"' + val + '\"' elif addbraces: val = \"{\"", "key) except KeyError: pass try: self._fields.remove(key) except ValueError: pass def set_entry_type(self, val): self[\"entry_type\"]", "citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # \"van_der_Meer\" or \"van_DerStadt\" max_names = 2,", "or compiled regex pattern for searching `field` : string field to search in", "= str def preamble( self, tuple4, buffer ): \"\"\"Process the given production and", "created by make_citekey() # first max_names names included, then etal citekey_label_style1 = dict(", "should we be strict? for field in subtags[1][3] : k,v = dispatch(self, field,", "return found def format_names(self, names_formatter): \"\"\"return formatted BibName-object if possible else raw name", "\"%(names)s-%(year)s\", ) #style2 shd be rst compatible # citekey_label_style2 = dict( # name_first", "lst): self._fields = lst fields = property(get_fields, set_fields, None, \"property: 'fields'\") def search_fields(self,", ":note: used by bibsearch.py \"\"\" ls = [entry for entry in self.entries if", "self[field] ) else: if field in self: bibfile_logger.info(\"Empty field %s in entry\\n%s.\\n.\"%(self,field)) found", ":TODO: add support for bibtexparser (BSD) https://github.com/sciunto-org/python-bibtexparser \"\"\" __docformat__ = \"restructuredtext en\" __authors__", "== 'month': # always use month macros if possible if val.lower() in monthslower_en", "of the already taken citation keys so that the function can avoid duplicates", "__delitem__(self,key) : key = key.lower() try: dict.__delitem__(self, key) except KeyError: pass try: self._fields.remove(key)", "__strict__: # we can add a dummy key and treat this entry as", "entry \"\"\" stringrep = '@%s{%s,\\n' % (self.entry_type.upper() , self.citekey) try: mlen = max(", ": # print entry # else : # print \"self test usage: bibfile.py", "key not in self._fields and key not in [\"citekey\",\"entry_type\"] and val: self._fields.append(key) def", "lookup on name or name if not in map.\"\"\" (tag,start,stop,subtags) = tuple4 return", "the_type = getString(subtags[0], buffer) lineno = spdp.lines(0, start, buffer) + 1 if the_type.upper()", "for creating predictable and useful citekey (labels) for BibEntry objects. This is not", "= \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\", ) #style2 shd be rst compatible # citekey_label_style2", "multiple # tokens (e.g., two piece last names) ls = [name_formatter.format_name(name_dict) for name_dict", "to macro map\"\"\" (tag,start,stop,subtags) = tuple4 name, str = dispatch(self, subtags[0], buffer) \"\"\"", "keys that are easy to type and guess and that are valid BibTeX", "file parts (a list of entries and a macro map) and provide access", "macro map\"\"\" (tag,start,stop,subtags) = tuple4 name, str = dispatch(self, subtags[0], buffer) \"\"\" the_type", "formatted BibName-object if possible else raw name :type `names_formatter`: NamesFormatter :note: called by", "entry_type = self.entry_type.lower() try: label_template = style[entry_type] except KeyError: label_template = style['default_type'] name_template", "self.__class__): result = crossref[field] else: result = '' #:note: 20080331 add handling of", "names_dicts = self.get_names().get_names_dicts() # make list of 'v_|l' last names, which can possibly", "__authors__ = [\"<NAME>\", \"<NAME>\"] __version__ = '1.13' __needs__ = '2.4' # options: #", "for each production name. \"\"\" def string(self, tuple4, buffer ): \"\"\"Return a string,", "'anon', # article = \"%(names)s-%(year)s-%(jrnl)s\", # book = \"%(names)s-%(year)s\", # misc = \"%(names)s-%(year)s\",", "see AUTHORS :license: MIT (see LICENSE) :requires: Python 2.4+ :TODO: make this framework", "first max_names names included, then etal citekey_label_style1 = dict( name_template = 'v{_}_|l{}', #", "citekey) for f in self.get_fields(): found = reo.search( self[f] ) if found: break", "2006-08-08 no longer sets a `_names` attribute :TODO: add default name_template useful for", "set_fields, None, \"property: 'fields'\") def search_fields(self, string_or_compiled, field='', ignore_case=True): \"\"\"Find regular expression in", "that are valid BibTeX citation keys. :Parameters: - used_citekeys : list a list", "year if entry_type == \"article\": jrnl = self['journal'] jrnl = ''.join(jrnl.split()).lower() # keep", "citation key (label) Example: The label style is a dict with the following", "BibEntry instances that were found (and None for entries not found, unless discarded).", "label_template = style['default_type'] name_template = style['name_template'] max_names = style['max_names'] name_name_sep = style['name_name_sep'] lower_name", "addbraces = False #i.e., assume it is a macro elif key == 'month':", "return result def get_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string) :note:", "citekey (labels) for BibEntry objects. This is not integrated with the citation styles", "references for entry in result: if entry: crossref = entry.get('crossref', None) if isinstance(crossref,", "citation keys. :Parameters: - used_citekeys : list a list of the already taken", "re.MULTILINE) else: #must have a compiled regular expression reo = string_or_compiled if not", "result def get_entry_by_citekey(self, citekey): \"\"\"Return entry or None.\"\"\" for entry in self.entries: if", "easy to type and guess and that are valid BibTeX citation keys. :Parameters:", "result = crossref[field] else: result = '' #:note: 20080331 add handling of month", "style['max_names'] name_name_sep = style['name_name_sep'] lower_name = style['lower_name'] etal = style['etal'] # first, make", "try: addbraces = not int(val) except: pass if '@' in val: # need", ": bibfile_logger.warning(\"Preamble entry on line %d:\" % lineno + \"\\n\" + buffer[start:stop]) def", "return self._fields def set_fields(self, lst): self._fields = lst fields = property(get_fields, set_fields, None,", "since BibTeX allows a 'type' field \"\"\" def __init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs) self._fields = []", "except KeyError: pass try: self._fields.remove(key) except ValueError: pass def set_entry_type(self, val): self[\"entry_type\"] =", "False #i.e., assume it is a macro elif key == 'month': # always", "entry. Provides a dictionary interface to the fields: field keys are case-insensitive and", "= val[:3].lower() addbraces = False elif key in (\"year\",\"number\",\"volume\",\"chapter\"): try: addbraces = not", "first, make names name_formatter = NameFormatter(template = name_template) names_dicts = self.get_names().get_names_dicts() # make", "field is omitted, search is through all fields. :note: used by BibFile's find_re", "= re.compile(string_or_compiled, re.MULTILINE) else: #must have a compiled regular expression reo = string_or_compiled", "VARIABLES ################################## months_en = ('January','February','March','April','May','June', 'July','August','September','October','November','December') monthslower_en = [m.lower() for m in months_en]", "= style['default_type'] name_template = style['name_template'] max_names = style['max_names'] name_name_sep = style['name_name_sep'] lower_name =", "= ('January','February','March','April','May','June', 'July','August','September','October','November','December') monthslower_en = [m.lower() for m in months_en] monthmacros_en = [m[:3]", "not __strict__: # we can add a dummy key and treat this entry", "re.MULTILINE | re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #->must have a compiled", "buffer) entry.citekey = 'KEY' # dummy key -- or should we be strict?", "Example: The label style is a dict with the following fields:: citekey_label_style1 =", "markers\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start+1:stop-1] def number(self, tuple4, buffer ): \"\"\"return a", "str = dispatch(self, field, buffer) self._macroMap[name] = str \"\"\" self._macroMap[name] = str def", "if possible else string) (from \"raw\" names). :change: 2006-08-02 altered to return BibName", "= dispatch(self, subtags[0], buffer) entry.citekey = 'KEY' # dummy key -- or should", "search is through all fields. :note: used by bibsearch.py \"\"\" ls = [entry", "('.','.'), # etal = '', # lower_name = True, # anonymous = 'anon',", "name_name_sep = style['name_name_sep'] lower_name = style['lower_name'] etal = style['etal'] # first, make names", "- len(key) ) val = self[key] #handle crossref if key == 'crossref': try:", "(mlen, key, val)) stringrep += \",\\n\".join(field_list) stringrep += '\\n}\\n' return stringrep def __setitem__(self,", "entry. Return MatchObject if string_or_compiled found in entry else None. If field is", "force k to lowercase entry[k] = v self.entries.append(entry) bibfile_logger.warning(\"Dummy key added to entry", "a true entry) mlen = 0 bibfile_logger.warn(\"Entry apparently has no fields.\") field_list =", "(tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0], buffer) lineno = spdp.lines(0, start, buffer) +", "line %d:\" % lineno + \"\\n\" + buffer[start:stop]) def comment_entry(self, tuple4, buffer): \"\"\"Process", "val = \"{\" + val + \"}\" field_list.append(\" %-*s = %s\" % (mlen,", "\"van_DerStadt\" max_names = 2, name_name_sep = '+', etal = 'etal', anonymous = 'anon',", "predictable and useful citekey (labels) for BibEntry objects. This is not integrated with", "= ['author','editor','organization'] return self.make_names(entry_formatter, try_fields=try_fields) def make_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible", "key_str in self._fields ) # for pretty format except ValueError: #no fields (not", "omitted, search is through all fields. :note: used by bibsearch.py \"\"\" ls =", "subtags[1][3]: if(t) : str += dispatch(self, t, buffer) # concatenate hashed together strings", "instance to provide style information - `try_fields`: list of field names to try", "found = None return found def format_names(self, names_formatter): \"\"\"return formatted BibName-object if possible", "#keep string if stuck with it if isinstance(names,str): result = names else: #assume", "#style2 shd be rst compatible # citekey_label_style2 = dict( # name_first = 'l{_}',", "of entry \"\"\" stringrep = '@%s{%s,\\n' % (self.entry_type.upper() , self.citekey) try: mlen =", "- style : str The format of the citetekey is determined by a", "NameFormatter(template = name_template) names_dicts = self.get_names().get_names_dicts() # make list of 'v_|l' last names,", "monthslower_en = [m.lower() for m in months_en] monthmacros_en = [m[:3] for m in", "bibfile_logger.warning(\"Dummy key added to entry at line %d\" % lineno) else : #", "fields below elif self[field]: found = reo.search( self[field] ) else: if field in", "= val.lower() #:note: entry_type stored as lowercase def get_entry_type(self): return self[\"entry_type\"] entry_type =", "instance for each available name field?? :Parameters: - `entry_formatter`: EntryFormatter instance to provide", "if key == 'crossref': try: val = val['citekey'] #might be an entry except", "# BibTeX does not # distinguish case format_dict['year'] = year+sfx c += 1", "BibFile inherits from ``simpleparse.dispatchprocessor``. To fill a BibFile instance, bfi, call bibgrammar.Parse(src, bfi).", "> max_names: ls = ls[:max_names] + [etal] names = name_name_sep.join(ls) if lower_name: names", "else: raw_names, field = entry_formatter.pick_raw_names(self,try_fields) return bibname.BibName(raw_names,from_field=field) #names are in a BibName object", "= jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl'] = jrnl # short form, no spaces # make unique", "taken citation keys so that the function can avoid duplicates (by adding a,b,c,d...", ", self.citekey) try: mlen = max( len(key_str) for key_str in self._fields ) #", "entry for field in subtags[1][3]: name, str = dispatch(self, field, buffer) self._macroMap[name] =", "macro map) and provide access to these parts \"\"\" def __init__(self) : self.entries", "found = reo.search( self[f] ) if found: break # no need to check", "key == \"key\": bibfile_logger.info( \"Setting 'key' as an entry *field*. (Recall 'citekey' holds", "lines(0, start, buffer)+1 bibfile_logger.warning(\"Entry at line %d has macro syntax, but entry_type is", "self._macroMap = {} def get_entrylist(self, citekeys, discard=True): \"\"\"Return list, the BibEntry instances that", "the fields of each entry. If field is omitted, search is through all", "break else: raw_names, field = entry_formatter.pick_raw_names(self,try_fields) return bibname.BibName(raw_names,from_field=field) #names are in a BibName", "def set_fields(self, lst): self._fields = lst fields = property(get_fields, set_fields, None, \"property: 'fields'\")", "# lower_name = True, # anonymous = 'anon', # article = \"%(names)s-%(year)s-%(jrnl)s\", #", "and discard: bibfile_logger.warning(\"Database entries not found for the following keys:\\n\"+\"\\n\".join(bad_keys)) if discard: result", "result = label_template%format_dict return result # ---------------------------------------------------------- # Bibfile # ------- # Data", "available name field?? :Parameters: - `entry_formatter`: EntryFormatter instance to provide style information -", "def search_entries(self, string_or_compiled, field='', ignore_case=True): \"\"\"Return list of matching entries. Search for regular", "class BibFile( DispatchProcessor ): \"\"\"Stores parsed bibtex file. Access entries by key. :note:", "no fields.\") field_list = [] for key in self._fields: addbraces = True addquotes", "= tuple4 return getString((tag,start,stop,subtags), buffer) # macro name def name(self, tuple4, buffer ):", "= MONTH_DICT[result] return result def __delitem__(self,key) : key = key.lower() try: dict.__delitem__(self, key)", "bibfile.py DATABASE_FILE # if __name__ == \"__main__\": # import sys # if len(sys.argv)", "for entry in self.entries: if entry.citekey == citekey: return entry \"\"\"PRODUCTION FUNCTIONS: for", "all fields (but not citekey) for f in self.get_fields(): found = reo.search( self[f]", "= style['etal'] # first, make names name_formatter = NameFormatter(template = name_template) names_dicts =", "self._fields.append(key) def __getitem__(self, field): #field is usually a BibTeX field but can be", "function can avoid duplicates (by adding a,b,c,d... etc) - style : str The", "dummy key -- or should we be strict? for field in subtags[1][3] :", "to search in self (default: search all fields) \"\"\" if isinstance(string_or_compiled, str): if", "{} def get_entrylist(self, citekeys, discard=True): \"\"\"Return list, the BibEntry instances that were found", "(see below) :Returns: string the citation key (label) Example: The label style is", "but it serves a very different purpose. This is to create consistent citation", "- used_citekeys : list a list of the already taken citation keys so", "bfile = BibFile() # bibgrammar.Parse(src, bfile) # for entry in bfile.entries : #", "mlen = max( len(key_str) for key_str in self._fields ) # for pretty format", "# ---------------------------------------------------------- # Bibfile # ------- # Data storage for bibtex file #", "field='', ignore_case=True): \"\"\"Return list of matching entries. Search for regular expression in the", "and key not in [\"citekey\",\"entry_type\"] and val: self._fields.append(key) def __getitem__(self, field): #field is", "lineno = lines(0, start, buffer)+1 bibfile_logger.warning(\"Entry at line %d has macro syntax, but", "pair[1]] else: #keep None when occurs in entry list result = [pair[1] for", "__setitem__(self, key, val): key = key.lower() dict.__setitem__(self, key, val) if key == \"key\":", "the_type.upper() != 'COMMENT' : bibfile_logger.warning(\"\"\"Entry at line %d has comment syntax but entry_type", "# from . import bibgrammar ##################################################################### ############### GLOBAL VARIABLES ################################## months_en = ('January','February','March','April','May','June',", "string to compile or compiled regex pattern for searching - `field` : string", "is through all fields. :note: used by bibsearch.py \"\"\" ls = [entry for", "self._fields def set_fields(self, lst): self._fields = lst fields = property(get_fields, set_fields, None, \"property:", "ignore_case: reo = re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else:", "): \"\"\"Process a macro entry and add macros to macro map\"\"\" (tag,start,stop,subtags) =", "a bibentry field and return tuple of name, value.\"\"\" (tag,start,stop,subtags) = tuple4 str", "here to avoid recursive import from bibstuff import bibname #ai: shd move all", "BibName instance (or possibly, a string) #keep string if stuck with it if", "following fields:: citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # see NameFormatter class max_names", "entry with no key lineno = lines(0, start, buffer)+1 bibfile_logger.warning(\"Entry at line %d", "break # no need to check more fields # :note: CAN test 'field", ":TODO: Strip LaTeX accent characters from names when making label \"\"\" from .bibstyles.shared", "article = \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\", )", "= spdp.lines(0, start, buffer) + 1 if the_type.upper() != 'COMMENT' : bibfile_logger.warning(\"\"\"Entry at", "import ascii_lowercase format_dict = {} entry_type = self.entry_type.lower() try: label_template = style[entry_type] except", "\"\"\" (tag,start,stop,subtags) = tuple4 entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey", "bibtex file # ---------------------------------------------------------- class BibFile( DispatchProcessor ): \"\"\"Stores parsed bibtex file. Access", "syntax but entry_type is %s: Details: %s\"\"\" % (lineno, the_type, getString(subtags[1], buffer))) else", "a macro, but is not: could be a regular entry with no key", "month macros if possible if val.lower() in monthslower_en + monthmacros_en: val = val[:3].lower()", "\"%(names)s-%(year)s-%(jrnl)s\", # book = \"%(names)s-%(year)s\", # misc = \"%(names)s-%(year)s\", # default_type = \"%(names)s-%(year)s\",", "in monthslower_en + monthmacros_en: val = val[:3].lower() addbraces = False elif key in", "[] def __repr__(self): \"\"\"return string representation of entry \"\"\" stringrep = '@%s{%s,\\n' %", "%d has comment syntax but entry_type is %s: Details: %s\"\"\" % (lineno, the_type,", "result = '' #:note: 20080331 add handling of month macros if field ==", "+ monthmacros_en: val = val[:3].lower() addbraces = False elif key in (\"year\",\"number\",\"volume\",\"chapter\"): try:", "find_re method, which is used in turn by bibsearch.py :Parameters: `string_or_compiled` : string", "return [] temp = [ (key,self.get_entry_by_citekey(key)) for key in citekeys ] bad_keys =", "20080331 add handling of month macros if field == 'month' and result in", "getString, lines #bibstuff imports # from . import bibgrammar ##################################################################### ############### GLOBAL VARIABLES", "'field in self' bc want test #for empty fields below elif self[field]: found", "to check more fields # :note: CAN test 'field in self' (even though", "citekey( self, tuple4, buffer ): \"\"\"Return the entry's citekey\"\"\" (tag,start,stop,subtags) = tuple4 return", "lower_name = style['lower_name'] etal = style['etal'] # first, make names name_formatter = NameFormatter(template", "compiled regular expression reo = string_or_compiled if not field: #->try all fields (but", "('January','February','March','April','May','June', 'July','August','September','October','November','December') monthslower_en = [m.lower() for m in months_en] monthmacros_en = [m[:3] for", "= style['name_name_sep'] lower_name = style['lower_name'] etal = style['etal'] # first, make names name_formatter", "entry id.)\") try: result = dict.__getitem__(self, field) #:TODO: rethink this decision (but it", "is to create consistent citation keys that are easy to type and guess", "'fields'\") def search_fields(self, string_or_compiled, field='', ignore_case=True): \"\"\"Find regular expression in entry. Return MatchObject", "name_template = style['name_template'] max_names = style['max_names'] name_name_sep = style['name_name_sep'] lower_name = style['lower_name'] etal", "key added to entry at line %d\" % lineno) else : # otherwise", "property(get_citekey,set_citekey,None,\"property: 'citekey'\") def get_fields(self): return self._fields def set_fields(self, lst): self._fields = lst fields", "entry as a regular entry entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer)", "#:TODO: allow punctuation!! addbraces = False #i.e., assume it is a macro elif", "\"Seeking 'key' as an entry *field*. (Recall 'citekey' holds the entry id.)\") try:", "Data storage for bibtex file # ---------------------------------------------------------- class BibFile( DispatchProcessor ): \"\"\"Stores parsed", "compile or compiled regex pattern for searching - `field` : string field to", "citation keys so that the function can avoid duplicates (by adding a,b,c,d... etc)", "in months_en] monthmacros_en = [m[:3] for m in monthslower_en] MONTH_DICT = dict( zip(monthmacros_en,", "omitted, search is through all fields. :note: used by BibFile's find_re method, which", "a BibName instance #ask BibName instance to format itself (and it asks a", "useful citekey (labels) for BibEntry objects. This is not integrated with the citation", "is:\"+str(entry_formatter)) #ask the EntryFormatter to do it return entry_formatter.format_entry(self) # A default label", "\"\"\"Process a bibentry field and return tuple of name, value.\"\"\" (tag,start,stop,subtags) = tuple4", "holds the entry id.)\") try: result = dict.__getitem__(self, field) #:TODO: rethink this decision", "self (default: search all fields) \"\"\" if isinstance(string_or_compiled, str): if ignore_case: reo =", "= \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\") :TODO: Strip LaTeX accent characters from names when", "changed KeyError to return '' instead of None except KeyError: crossref = self.get('crossref',", "entries. Search for regular expression in the fields of each entry. If field", "buffer)+1 bibfile_logger.warning(\"Entry at line %d has macro syntax, but entry_type is %s\" %", "[pair[1] for pair in temp] #attach cross references for entry in result: if", "'citekey' holds the entry id.)\") try: result = dict.__getitem__(self, field) #:TODO: rethink this", "in result: if entry: crossref = entry.get('crossref', None) if isinstance(crossref, str): crossref =", "1 result = label_template%format_dict return result # ---------------------------------------------------------- # Bibfile # ------- #", "[pair[1] for pair in temp if pair[1]] else: #keep None when occurs in", ": bibfile_logger.warning(\"\"\"Entry at line %d has comment syntax but entry_type is %s: Details:", "'citekey' holds the entry id.)\") if key not in self._fields and key not", "(tag,start,stop,subtags) = tuple4 str = '' for t in subtags[1][3]: if(t) : str", "format_dict['names'] = names year = self['year'] or '????' format_dict['year'] = year if entry_type", "in `btOOL <http://www.gerg.ca/software/btOOL/>`_ :TODO: add support for bibtexparser (BSD) https://github.com/sciunto-org/python-bibtexparser \"\"\" __docformat__ =", "format_dict['jrnl'] = jrnl # short form, no spaces # make unique result: if", "regular expression reo = string_or_compiled if not field: #->try all fields (but not", "method, which is used in turn by bibsearch.py :Parameters: `string_or_compiled` : string to", "by bibsearch.py \"\"\" ls = [entry for entry in self.entries if entry.search_fields(string_or_compiled=reo, field=field,", "(self.entry_type.upper() , self.citekey) try: mlen = max( len(key_str) for key_str in self._fields )", "): \"\"\"Process the bibentry and its children. \"\"\" (tag,start,stop,subtags) = tuple4 entry =", "re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #->must have a", "crossref[field] else: result = '' #:note: 20080331 add handling of month macros if", "elif self[field]: found = reo.search( self[field] ) else: if field in self: bibfile_logger.info(\"Empty", "tuple4, buffer ): \"\"\"Return a string, stripping leading and trailing markers\"\"\" (tag,start,stop,subtags) =", "#->try all fields (but not citekey) for f in self.get_fields(): found = reo.search(", "[m.lower() for m in months_en] monthmacros_en = [m[:3] for m in monthslower_en] MONTH_DICT", "c += 1 result = label_template%format_dict return result # ---------------------------------------------------------- # Bibfile #", "to format itself (and it asks a NamesFormatter to do it) result =", "= {} entry_type = self.entry_type.lower() try: label_template = style[entry_type] except KeyError: label_template =", "High level BibTeX file interface --------------------------------------------------------- Provides two classes, BibFile and BibEntry for", "+ \" \" + getString(subtags[1], buffer)) def search_entries(self, string_or_compiled, field='', ignore_case=True): \"\"\"Return list", ":license: MIT (see LICENSE) :requires: Python 2.4+ :TODO: make this framework more general,", "(from \"raw\" names). :change: 2006-08-02 altered to return BibName instance and not set", "v self.entries.append(entry) bibfile_logger.warning(\"Dummy key added to entry at line %d\" % lineno) else", "addbraces: val = \"{\" + val + \"}\" field_list.append(\" %-*s = %s\" %", "= v self.entries.append(entry) bibfile_logger.warning(\"Dummy key added to entry at line %d\" % lineno)", "BibTeX allows a 'type' field \"\"\" def __init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs) self._fields = [] def", "entry\\n%s.\\n.\"%(self,field)) found = None return found def format_names(self, names_formatter): \"\"\"return formatted BibName-object if", "called by CitationManager in format_citation :note: 2006-08-08 no longer sets a `_names` attribute", "if field missing (-> no KeyError) :note: this method introduces the only dependence", "dict with the following fields:: citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # see", "buffer ): \"\"\"Return the entry's citekey\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) #", "or name if not in map.\"\"\" (tag,start,stop,subtags) = tuple4 return self._macroMap.get(buffer[start:stop],buffer[start:stop]) def field(self,", "not int(val) except: pass if '@' in val: # need to protect '@'", "except: pass if '@' in val: # need to protect '@' addquotes =", "BibFile's find_re method, which is used in turn by bibsearch.py :Parameters: `string_or_compiled` :", "+ \"\\n\" + buffer[start:stop]) def comment_entry(self, tuple4, buffer): \"\"\"Process the given production and", "this framework more general, perhaps along the lines of the btparse library in", "need to check more fields # :note: CAN test 'field in self' (even", "in temp if pair[1]] else: #keep None when occurs in entry list result", "for entry in bfile.entries : # print entry # else : # print", "val: # need to protect '@' addquotes = True if addquotes: val =", "is a macro elif key == 'month': # always use month macros if", "`_names` attribute :TODO: add default name_template useful for .bib files? \"\"\" bibfile_logger.debug(\"BibEntry.format_names: arg", "= jrnl # short form, no spaces # make unique result: if needed,", "the function can avoid duplicates (by adding a,b,c,d... etc) - style : str", "entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string) :note: 2006-08-09 matching change to", "sfx = ascii_lowercase[c%26]*(1+c//26) # :note: lowercase since # BibTeX does not # distinguish", "(or possibly, a string) #keep string if stuck with it if isinstance(names,str): result", "if val.isalpha() and val.islower(): #:TODO: allow punctuation!! addbraces = False #i.e., assume it", "`string_or_compiled` : string to compile or compiled regex pattern for searching `field` :", "if possible else string) :note: 2006-08-09 matching change to `make_names`, no longer sets", "matching entries. Search for regular expression in the fields of each entry. If", "False, # name_name_sep = ('.','.'), # etal = '', # lower_name = True,", "parsed bibtex file. Access entries by key. :note: a BibFile object should simply", "`make_names`, no longer sets `self._names` \"\"\" if entry_formatter is None: if not try_fields:", "possibly if entry_formatter is None: for field in try_fields: raw_names = self[field] if", "field in self: bibfile_logger.info(\"Empty field %s in entry\\n%s.\\n.\"%(self,field)) found = None return found", "if addquotes: val = '\"' + val + '\"' elif addbraces: val =", "False, article = \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\",", "'anon', lower_name = False, article = \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\",", "buffer) \"\"\" the_type = getString(subtags[0], buffer) if the_type.upper() != 'STRING' : # it", "If field is omitted, search is through all fields. :note: used by bibsearch.py", "None) if isinstance(crossref, str): crossref = self.get_entry_by_citekey(crossref) if crossref: entry['crossref'] = crossref return", "for regular expression in the fields of each entry. If field is omitted,", "max_names names included, then etal citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # \"van_der_Meer\"", "(but not citekey) for f in self.get_fields(): found = reo.search( self[f] ) if", "bibgrammar.Parse(src, bfile) # for entry in bfile.entries : # print entry # else", "= tuple4 the_type = getString(subtags[0], buffer) lineno = lines(0,start,buffer)+1 if the_type.upper() != 'PREAMBLE'", "or should we be strict? for field in subtags[1][3] : k,v = dispatch(self,", "pair in temp if not pair[1]] if bad_keys and discard: bibfile_logger.warning(\"Database entries not", "bibtex database. BibFile inherits from ``simpleparse.dispatchprocessor``. To fill a BibFile instance, bfi, call", "get_citekey(self): return self[\"citekey\"] citekey = property(get_citekey,set_citekey,None,\"property: 'citekey'\") def get_fields(self): return self._fields def set_fields(self,", "dict( name_template = 'v{_}_|l{}', # see NameFormatter class max_names = 2, name_name_sep =", "usage: bibfile.py DATABASE_FILE # if __name__ == \"__main__\": # import sys # if", "# no need to check more fields # :note: CAN test 'field in", "as lowercase def get_entry_type(self): return self[\"entry_type\"] entry_type = property(get_entry_type, set_entry_type, None, \"property: 'entry_type'\")", "BibName instance and not set _names :note: self returns None if field missing", "\" \" + getString(subtags[1], buffer)) def search_entries(self, string_or_compiled, field='', ignore_case=True): \"\"\"Return list of", "= lines(0,start,buffer)+1 if the_type.upper() != 'PREAMBLE' : bibfile_logger.warning(\"Entry at line %d has preamble", ": bibfile_logger.warning(\"Entry at line %d has preamble syntax but entry_type is %s\" %", "(-> no KeyError) :note: this method introduces the only dependence on simpleparse (via", "\"\"\"PRODUCTION FUNCTIONS: for parsing, must provide a function for each production name. \"\"\"", "buffer) # concatenate hashed together strings return (dispatch(self, subtags[0], buffer), str) def entry(", "a number as a string\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start:stop] def entry_type( self,", "BibName-object if possible else raw name :type `names_formatter`: NamesFormatter :note: called by CitationManager", "`string_or_compiled` : string to compile or compiled regex pattern for searching - `field`", "names) ls = [name_formatter.format_name(name_dict) for name_dict in names_dicts] if len(ls) > max_names: ls", "returns None if field missing (-> no KeyError) :note: this method introduces the", "result: if needed, append suffix b or c or d... to year sfx", "line %d\" % lineno) else : # otherwise it is really a macro", "def preamble( self, tuple4, buffer ): \"\"\"Process the given production and it's children\"\"\"", "but entry_type is %s\" % (lineno,the_type)) else : bibfile_logger.warning(\"Preamble entry on line %d:\"", "is omitted, search is through all fields. :note: used by BibFile's find_re method,", "the BibEntry instances that were found (and None for entries not found, unless", "entry *field*. (Recall 'citekey' holds the entry id.)\") if key not in self._fields", "name, value.\"\"\" (tag,start,stop,subtags) = tuple4 str = '' for t in subtags[1][3]: if(t)", "not in self._fields and key not in [\"citekey\",\"entry_type\"] and val: self._fields.append(key) def __getitem__(self,", "False elif key in (\"year\",\"number\",\"volume\",\"chapter\"): try: addbraces = not int(val) except: pass if", "bad_keys and discard: bibfile_logger.warning(\"Database entries not found for the following keys:\\n\"+\"\\n\".join(bad_keys)) if discard:", "tuple4 entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = dispatch(self, subtags[1],", "\" + getString(subtags[1], buffer)) def search_entries(self, string_or_compiled, field='', ignore_case=True): \"\"\"Return list of matching", "the lines of the btparse library in `btOOL <http://www.gerg.ca/software/btOOL/>`_ :TODO: add support for", "try_fields = ['author','editor','organization'] return self.make_names(entry_formatter, try_fields=try_fields) def make_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if", "= dict( # name_first = 'l{_}', # name_other = 'l{_}', # max_names =", "name or name if not in map.\"\"\" (tag,start,stop,subtags) = tuple4 return self._macroMap.get(buffer[start:stop],buffer[start:stop]) def", "'citekey' instead of 'key' since BibTeX allows a 'key' field :note: 2008-03-29 'entry_type'", "= logging.getLogger('bibstuff_logger') # import dependencies from simpleparse import dispatchprocessor as spdp from simpleparse.dispatchprocessor", "##################################################################### ############### GLOBAL VARIABLES ################################## months_en = ('January','February','March','April','May','June', 'July','August','September','October','November','December') monthslower_en = [m.lower() for", "# use_max_names = False, # name_name_sep = ('.','.'), # etal = '', #", "a macro elif key == 'month': # always use month macros if possible", "#assume a BibName instance #ask BibName instance to format itself (and it asks", "if string_or_compiled found in entry else None. If field is omitted, search is", "buffer[start:stop] def entry_type( self, tuple4, buffer ): \"\"\"Return the entry type\"\"\" (tag,start,stop,subtags) =", "The label style is a dict with the following fields:: citekey_label_style1 = dict(", "= self['journal'] jrnl = ''.join(jrnl.split()).lower() # keep macro jrnl = jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl'] =", ") def make_citekey(self, used_citekeys = [], style = citekey_label_style1): \"\"\"Create and return a", "make unique result: if needed, append suffix b or c or d... to", "logging.getLogger('bibstuff_logger') # import dependencies from simpleparse import dispatchprocessor as spdp from simpleparse.dispatchprocessor import", "an entry will not raise #KeyError! see TODO above) BUT do not test", "def set_entry_type(self, val): self[\"entry_type\"] = val.lower() #:note: entry_type stored as lowercase def get_entry_type(self):", "= 1 # while result+sfx in used_citekeys: while label_template%format_dict in used_citekeys: sfx =", "possible else string) :note: 2006-08-09 matching change to `make_names`, no longer sets `self._names`", "% lineno) else : # otherwise it is really a macro entry for", "val)) stringrep += \",\\n\".join(field_list) stringrep += '\\n}\\n' return stringrep def __setitem__(self, key, val):", "field in subtags[1][3] : k,v = dispatch(self, field, buffer) #:note: entry will force", "of matching entries. Search for regular expression in the fields of each entry.", "citekeys created by make_citekey() # first max_names names included, then etal citekey_label_style1 =", "__name__ == \"__main__\": # import sys # if len(sys.argv) > 1 : #", "bibfile_logger.warning(\"get_entrylist: No keys provided; returning empty cited-entry list.\") return [] temp = [", "dict( zip(monthmacros_en, months_en) ) ##################################################################### class BibEntry(dict): \"\"\" Stores a single bibliographic entry.", "below) :Returns: string the citation key (label) Example: The label style is a", "#must have a compiled regular expression reo = string_or_compiled if not field: #->try", "instances that were found (and None for entries not found, unless discarded). \"\"\"", ":note: 2006-08-08 no longer sets a `_names` attribute :TODO: add default name_template useful", "entry.citekey == citekey: return entry \"\"\"PRODUCTION FUNCTIONS: for parsing, must provide a function", "dispatch(self, field, buffer) #:note: entry will force k to lowercase entry[k] = v", "to return BibName instance and not set _names :note: self returns None if", "(Recall 'citekey' holds the entry id.)\") try: result = dict.__getitem__(self, field) #:TODO: rethink", "months_en = ('January','February','March','April','May','June', 'July','August','September','October','November','December') monthslower_en = [m.lower() for m in months_en] monthmacros_en =", "tuple of name, value.\"\"\" (tag,start,stop,subtags) = tuple4 str = '' for t in", "from simpleparse import dispatchprocessor as spdp from simpleparse.dispatchprocessor import dispatch, DispatchProcessor, getString, lines", "search in self (default: search all fields) \"\"\" if isinstance(string_or_compiled, str): if ignore_case:", "max_names = 2, # use_max_names = False, # name_name_sep = ('.','.'), # etal", "self.get_entry_by_citekey(crossref) if crossref: entry['crossref'] = crossref return result def get_entry_by_citekey(self, citekey): \"\"\"Return entry", "name, str = dispatch(self, field, buffer) self._macroMap[name] = str \"\"\" self._macroMap[name] = str", ":TODO: add default name_template useful for .bib files? \"\"\" bibfile_logger.debug(\"BibEntry.format_names: arg is:\"+str(names_formatter)) names", "= re.compile(string_or_compiled, re.MULTILINE) else: #->must have a compiled regular expression reo = string_or_compiled", "in subtags[1][3]: if(t) : str += dispatch(self, t, buffer) # concatenate hashed together", "etc) - style : str The format of the citetekey is determined by", "empty cited-entry list.\") return [] temp = [ (key,self.get_entry_by_citekey(key)) for key in citekeys", "new citekey based on the entry's data. This is for creating predictable and", "field) #:TODO: rethink this decision (but it is used for formatting) #:note: 20080331", "format_dict['year'] = year if entry_type == \"article\": jrnl = self['journal'] jrnl = ''.join(jrnl.split()).lower()", "field and return tuple of name, value.\"\"\" (tag,start,stop,subtags) = tuple4 str = ''", "def format_names(self, names_formatter): \"\"\"return formatted BibName-object if possible else raw name :type `names_formatter`:", "key = key.lower() try: dict.__delitem__(self, key) except KeyError: pass try: self._fields.remove(key) except ValueError:", "!= 'STRING' : # it looks like a macro, but is not: could", "strict with bibtex format? ####################### IMPORTS ##################################### # import from standard library import", "searching - `field` : string field to search in self (default: search all", "#spacer = ' '*(mlen - len(key) ) val = self[key] #handle crossref if", "above) BUT do not test 'field in self' bc want test #for empty", "can avoid duplicates (by adding a,b,c,d... etc) - style : str The format", "for entry in self.entries if entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)] return ls # self test", "the fields: field keys are case-insensitive and fields are stored in the order", "cited-entry list.\") return [] temp = [ (key,self.get_entry_by_citekey(key)) for key in citekeys ]", "<reponame>cpitclaudel/bibstuff \"\"\" :mod:`bibstuff.bibfile`: High level BibTeX file interface --------------------------------------------------------- Provides two classes, BibFile", "IMPORTS ##################################### # import from standard library import re, logging bibfile_logger = logging.getLogger('bibstuff_logger')", "macro syntax, but entry_type is %s\" % (lineno , the_type)) if not __strict__:", "\"\"\" ls = [entry for entry in self.entries if entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)] return", "the following fields:: citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # see NameFormatter class", "strings return (dispatch(self, subtags[0], buffer), str) def entry( self, tuple4, buffer ): \"\"\"Process", "entry or None.\"\"\" for entry in self.entries: if entry.citekey == citekey: return entry", "is for creating predictable and useful citekey (labels) for BibEntry objects. This is", ":change: 2006-08-02 altered to return BibName instance and not set _names :note: self", "expression reo = string_or_compiled \"\"\" Find regex in bib_entry. If field is omitted,", "+ val + \"}\" field_list.append(\" %-*s = %s\" % (mlen, key, val)) stringrep", "are easy to type and guess and that are valid BibTeX citation keys.", "return tuple of name, value.\"\"\" (tag,start,stop,subtags) = tuple4 str = '' for t", "'STRING' : # it looks like a macro, but is not: could be", "return entry \"\"\"PRODUCTION FUNCTIONS: for parsing, must provide a function for each production", "= '\"' + val + '\"' elif addbraces: val = \"{\" + val", "# :note: CAN test 'field in self' (even though an entry will not", "fields of each entry. If field is omitted, search is through all fields.", "the btparse library in `btOOL <http://www.gerg.ca/software/btOOL/>`_ :TODO: add support for bibtexparser (BSD) https://github.com/sciunto-org/python-bibtexparser", "name_name_sep.join(ls) if lower_name: names = names.lower() format_dict['names'] = names year = self['year'] or", "style for citekeys created by make_citekey() # first max_names names included, then etal", "##################################### # import from standard library import re, logging bibfile_logger = logging.getLogger('bibstuff_logger') #", "id.)\") try: result = dict.__getitem__(self, field) #:TODO: rethink this decision (but it is", "%d\" % lineno) else : # otherwise it is really a macro entry", "and it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0], buffer) lineno = spdp.lines(0,", "= False, article = \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type =", "when occurs in entry list result = [pair[1] for pair in temp] #attach", "val + \"}\" field_list.append(\" %-*s = %s\" % (mlen, key, val)) stringrep +=", "for .bib files? \"\"\" bibfile_logger.debug(\"BibEntry.format_names: arg is:\"+str(names_formatter)) names = self.get_names() #get a BibName", "string_or_compiled \"\"\" Find regex in bib_entry. If field is omitted, search is through", "= False allows empty citekeys __strict__ = False # should we be strict", "and a macro map) and provide access to these parts \"\"\" def __init__(self)", "[] for key in self._fields: addbraces = True addquotes = False #spacer =", "macro entry and add macros to macro map\"\"\" (tag,start,stop,subtags) = tuple4 name, str", "\"{\" + val + \"}\" field_list.append(\" %-*s = %s\" % (mlen, key, val))", "access to these parts \"\"\" def __init__(self) : self.entries = [] self._macroMap =", "field in subtags[2][3] : #bibfile_logger.debug(\"entry: ready to add field: \"+str(dispatch(self, field, buffer))) k,v", "force k to lowercase entry[k] = v self.entries.append(entry) def macro( self, tuple4, buffer", "= False elif key in (\"year\",\"number\",\"volume\",\"chapter\"): try: addbraces = not int(val) except: pass", "'*(mlen - len(key) ) val = self[key] #handle crossref if key == 'crossref':", "but entry_type is %s\" % (lineno , the_type)) if not __strict__: # we", "buffer ): \"\"\"Return a string, stripping leading and trailing markers\"\"\" (tag,start,stop,subtags) = tuple4", "and not set _names :note: self returns None if field missing (-> no", "dict.__init__(self,*args,**kwargs) self._fields = [] def __repr__(self): \"\"\"return string representation of entry \"\"\" stringrep", "= dispatch(self, field, buffer) #:note: entry will force k to lowercase entry[k] =", "format? ####################### IMPORTS ##################################### # import from standard library import re, logging bibfile_logger", "get_entry_by_citekey(self, citekey): \"\"\"Return entry or None.\"\"\" for entry in self.entries: if entry.citekey ==", "in monthmacros_en: result = MONTH_DICT[result] return result def __delitem__(self,key) : key = key.lower()", "bibstuff import bibname #ai: shd move all bibname into here? possibly if entry_formatter", "'PREAMBLE' : bibfile_logger.warning(\"Entry at line %d has preamble syntax but entry_type is %s\"", "def entry_type( self, tuple4, buffer ): \"\"\"Return the entry type\"\"\" (tag,start,stop,subtags) = tuple4", "citekeys ] bad_keys = [pair[0] for pair in temp if not pair[1]] if", "# import from standard library import re, logging bibfile_logger = logging.getLogger('bibstuff_logger') # import", "duplicates (by adding a,b,c,d... etc) - style : str The format of the", "`try_fields`: list of field names to try sequentially; none empty filed -> name", "# always use month macros if possible if val.lower() in monthslower_en + monthmacros_en:", "used_citekeys : list a list of the already taken citation keys so that", "added. :note: 2006-08-10 use 'citekey' instead of 'key' since BibTeX allows a 'key'", "self._fields: addbraces = True addquotes = False #spacer = ' '*(mlen - len(key)", "raise #KeyError! see TODO above) BUT do not test 'field in self' bc", "should simply *store* .bib file parts (a list of entries and a macro", "getString(subtags[0], buffer) if the_type.upper() != 'STRING' : # it looks like a macro,", "buffer) if the_type.upper() != 'STRING' : # it looks like a macro, but", ":note: 2006-08-09 matching change to `make_names`, no longer sets `self._names` \"\"\" if entry_formatter", "'@' addquotes = True if addquotes: val = '\"' + val + '\"'", "at line %d has macro syntax, but entry_type is %s\" % (lineno ,", "subtags[0], buffer) \"\"\" the_type = getString(subtags[0], buffer) if the_type.upper() != 'STRING' : #", "(key,self.get_entry_by_citekey(key)) for key in citekeys ] bad_keys = [pair[0] for pair in temp", ": string to compile or compiled regex pattern for searching - `field` :", "by CitationManager in format_citation :note: 2006-08-08 no longer sets a `_names` attribute :TODO:", "key == 'month': # always use month macros if possible if val.lower() in", "dispatch(self, subtags[1], buffer) for field in subtags[2][3] : #bibfile_logger.debug(\"entry: ready to add field:", "import dispatch, DispatchProcessor, getString, lines #bibstuff imports # from . import bibgrammar #####################################################################", "raw_names: break else: raw_names, field = entry_formatter.pick_raw_names(self,try_fields) return bibname.BibName(raw_names,from_field=field) #names are in a", "'entry_type'\") def set_citekey(self, val): self[\"citekey\"] = val def get_citekey(self): return self[\"citekey\"] citekey =", "def get_citekey(self): return self[\"citekey\"] citekey = property(get_citekey,set_citekey,None,\"property: 'citekey'\") def get_fields(self): return self._fields def", "else: #keep None when occurs in entry list result = [pair[1] for pair", "if isinstance(names,str): result = names else: #assume a BibName instance #ask BibName instance", "in val: # need to protect '@' addquotes = True if addquotes: val", "found = reo.search( self[field] ) else: if field in self: bibfile_logger.info(\"Empty field %s", "####################### IMPORTS ##################################### # import from standard library import re, logging bibfile_logger =", "max_names = 2, name_name_sep = '+', etal = 'etal', anonymous = 'anon', lower_name", "if bad_keys and discard: bibfile_logger.warning(\"Database entries not found for the following keys:\\n\"+\"\\n\".join(bad_keys)) if", "jrnl = ''.join(jrnl.split()).lower() # keep macro jrnl = jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl'] = jrnl #", "BibTeX citation keys. :Parameters: - used_citekeys : list a list of the already", "string_or_compiled if not field: #->try all fields (but not citekey) for f in", "months_en) ) ##################################################################### class BibEntry(dict): \"\"\" Stores a single bibliographic entry. Provides a", "return entry_formatter.format_entry(self) # A default label style for citekeys created by make_citekey() #", "label_template = style[entry_type] except KeyError: label_template = style['default_type'] name_template = style['name_template'] max_names =", "buffer ): \"\"\"Process the given production and it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type", "of None except KeyError: crossref = self.get('crossref', '') if isinstance(crossref, self.__class__): result =", "or compiled regex pattern for searching - `field` : string field to search", "list of entries and a macro map) and provide access to these parts", ":copyright: <NAME> and <NAME>, see AUTHORS :license: MIT (see LICENSE) :requires: Python 2.4+", "'etal', anonymous = 'anon', lower_name = False, article = \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\",", "'????' format_dict['year'] = year if entry_type == \"article\": jrnl = self['journal'] jrnl =", "'', # lower_name = True, # anonymous = 'anon', # article = \"%(names)s-%(year)s-%(jrnl)s\",", "stringrep += \",\\n\".join(field_list) stringrep += '\\n}\\n' return stringrep def __setitem__(self, key, val): key", "field_list = [] for key in self._fields: addbraces = True addquotes = False", "field: #->try all fields (but not citekey) for f in self.get_fields(): found =", "[] self._macroMap = {} def get_entrylist(self, citekeys, discard=True): \"\"\"Return list, the BibEntry instances", "as an entry *field*. (Recall 'citekey' holds the entry id.)\") try: result =", "\"\"\"return (BibName-object if possible else string) :note: 2006-08-09 matching change to `make_names`, no", "#get a BibName instance (or possibly, a string) #keep string if stuck with", "def __setitem__(self, key, val): key = key.lower() dict.__setitem__(self, key, val) if key ==", "(tag,start,stop,subtags) = tuple4 entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey =", "val def get_citekey(self): return self[\"citekey\"] citekey = property(get_citekey,set_citekey,None,\"property: 'citekey'\") def get_fields(self): return self._fields", "the citation styles in bibstuff.bibstyles; but it serves a very different purpose. This", "'l{_}', # name_other = 'l{_}', # max_names = 2, # use_max_names = False,", "to protect '@' addquotes = True if addquotes: val = '\"' + val", "entries not found for the following keys:\\n\"+\"\\n\".join(bad_keys)) if discard: result = [pair[1] for", "not integrated with the citation styles in bibstuff.bibstyles; but it serves a very", "def make_citekey(self, used_citekeys = [], style = citekey_label_style1): \"\"\"Create and return a new", "case format_dict['year'] = year+sfx c += 1 result = label_template%format_dict return result #", "] bad_keys = [pair[0] for pair in temp if not pair[1]] if bad_keys", "= 'etal', anonymous = 'anon', lower_name = False, article = \"%(names)s-%(year)s\", book =", "with no key lineno = lines(0, start, buffer)+1 bibfile_logger.warning(\"Entry at line %d has", "only dependence on simpleparse (via bibname) :TODO: return BibName instance for each available", "reo = string_or_compiled \"\"\" Find regex in bib_entry. If field is omitted, search", "spaces # make unique result: if needed, append suffix b or c or", "not test 'field in self' bc want test #for empty fields below elif", "fields (but not citekey) for f in self.get_fields(): found = reo.search( self[f] )", "the_type.upper() != 'STRING' : # it looks like a macro, but is not:", "and val.islower(): #:TODO: allow punctuation!! addbraces = False #i.e., assume it is a", "for m in monthslower_en] MONTH_DICT = dict( zip(monthmacros_en, months_en) ) ##################################################################### class BibEntry(dict):", ": string to compile or compiled regex pattern for searching `field` : string", "bibfile_logger = logging.getLogger('bibstuff_logger') # import dependencies from simpleparse import dispatchprocessor as spdp from", "asks a NamesFormatter to do it) result = names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names result = \"+str(result))", "while label_template%format_dict in used_citekeys: sfx = ascii_lowercase[c%26]*(1+c//26) # :note: lowercase since # BibTeX", "its children. \"\"\" (tag,start,stop,subtags) = tuple4 entry = BibEntry() entry.entry_type = dispatch(self, subtags[0],", "used by bibsearch.py :Parameters: - `string_or_compiled` : string to compile or compiled regex", "# misc = \"%(names)s-%(year)s\", # default_type = \"%(names)s-%(year)s\", # ) def make_citekey(self, used_citekeys", "and that are valid BibTeX citation keys. :Parameters: - used_citekeys : list a", "monthmacros_en: val = val[:3].lower() addbraces = False elif key in (\"year\",\"number\",\"volume\",\"chapter\"): try: addbraces", "to avoid recursive import from bibstuff import bibname #ai: shd move all bibname", "in self._fields and key not in [\"citekey\",\"entry_type\"] and val: self._fields.append(key) def __getitem__(self, field):", "if possible else raw name :type `names_formatter`: NamesFormatter :note: called by CitationManager in", "and result in monthmacros_en: result = MONTH_DICT[result] return result def __delitem__(self,key) : key", "field == 'month' and result in monthmacros_en: result = MONTH_DICT[result] return result def", "citekey = property(get_citekey,set_citekey,None,\"property: 'citekey'\") def get_fields(self): return self._fields def set_fields(self, lst): self._fields =", "is not: could be a regular entry with no key lineno = lines(0,", ") ##################################################################### class BibEntry(dict): \"\"\" Stores a single bibliographic entry. Provides a dictionary", "str def preamble( self, tuple4, buffer ): \"\"\"Process the given production and it's", "allow punctuation!! addbraces = False #i.e., assume it is a macro elif key", "# short form, no spaces # make unique result: if needed, append suffix", "reo.search( self[f] ) if found: break # no need to check more fields", "which is used in turn by bibsearch.py :Parameters: `string_or_compiled` : string to compile", "self' (even though an entry will not raise #KeyError! see TODO above) BUT", "protect '@' addquotes = True if addquotes: val = '\"' + val +", "label_template%format_dict in used_citekeys: sfx = ascii_lowercase[c%26]*(1+c//26) # :note: lowercase since # BibTeX does", "citekey based on the entry's data. This is for creating predictable and useful", "result def get_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string) :note: 2006-08-09", "in subtags[1][3]: name, str = dispatch(self, field, buffer) self._macroMap[name] = str \"\"\" self._macroMap[name]", "spdp.lines(0, start, buffer) + 1 if the_type.upper() != 'COMMENT' : bibfile_logger.warning(\"\"\"Entry at line", "= ls[:max_names] + [etal] names = name_name_sep.join(ls) if lower_name: names = names.lower() format_dict['names']", "entry_type is %s\" % (lineno , the_type)) if not __strict__: # we can", "used by BibFile's find_re method, which is used in turn by bibsearch.py :Parameters:", "Provides a dictionary interface to the fields: field keys are case-insensitive and fields", "None. If field is omitted, search is through all fields. :note: used by", "for parsing, must provide a function for each production name. \"\"\" def string(self,", "# import dependencies from simpleparse import dispatchprocessor as spdp from simpleparse.dispatchprocessor import dispatch,", "entry.citekey = dispatch(self, subtags[1], buffer) for field in subtags[2][3] : #bibfile_logger.debug(\"entry: ready to", "from string import ascii_lowercase format_dict = {} entry_type = self.entry_type.lower() try: label_template =", "TypeError: pass #->must be a string elif key == 'journal': if val.isalpha() and", "str The format of the citetekey is determined by a `label_style` (see below)", "field \"\"\" def __init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs) self._fields = [] def __repr__(self): \"\"\"return string representation", "buffer): \"\"\"Process the given production and it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type =", "= ascii_lowercase[c%26]*(1+c//26) # :note: lowercase since # BibTeX does not # distinguish case", "field?? :Parameters: - `entry_formatter`: EntryFormatter instance to provide style information - `try_fields`: list", "a bibtex database. BibFile inherits from ``simpleparse.dispatchprocessor``. To fill a BibFile instance, bfi,", "key in citekeys ] bad_keys = [pair[0] for pair in temp if not", "label style is a dict with the following fields:: citekey_label_style1 = dict( name_template", "= \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\") :TODO: Strip", "so that the function can avoid duplicates (by adding a,b,c,d... etc) - style", "will force k to lowercase entry[k] = v self.entries.append(entry) bibfile_logger.warning(\"Dummy key added to", "self' bc want test #for empty fields below elif self[field]: found = reo.search(", "in [\"citekey\",\"entry_type\"] and val: self._fields.append(key) def __getitem__(self, field): #field is usually a BibTeX", "and trailing markers\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start+1:stop-1] def number(self, tuple4, buffer ):", "in bib_entry. If field is omitted, search is through all fields. :note: used", "result def __delitem__(self,key) : key = key.lower() try: dict.__delitem__(self, key) except KeyError: pass", "= self.get_entry_by_citekey(crossref) if crossref: entry['crossref'] = crossref return result def get_entry_by_citekey(self, citekey): \"\"\"Return", "buffer ): \"\"\"return a number as a string\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start:stop]", "'+', etal = 'etal', anonymous = 'anon', lower_name = False, article = \"%(names)s-%(year)s\",", "MatchObject if string_or_compiled found in entry else None. If field is omitted, search", "possibly have multiple # tokens (e.g., two piece last names) ls = [name_formatter.format_name(name_dict)", ":Parameters: - `string_or_compiled` : string to compile or compiled regex pattern for searching", "This is for creating predictable and useful citekey (labels) for BibEntry objects. This", "try: self._fields.remove(key) except ValueError: pass def set_entry_type(self, val): self[\"entry_type\"] = val.lower() #:note: entry_type", "= \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\", ) #style2 shd be rst", "instead of None except KeyError: crossref = self.get('crossref', '') if isinstance(crossref, self.__class__): result", "to create consistent citation keys that are easy to type and guess and", "'' for t in subtags[1][3]: if(t) : str += dispatch(self, t, buffer) #", "use month macros if possible if val.lower() in monthslower_en + monthmacros_en: val =", "'1.13' __needs__ = '2.4' # options: # __strict__ = False allows empty citekeys", "= re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #must have", "line %d:\" % lineno + \" \" + getString(subtags[1], buffer)) def search_entries(self, string_or_compiled,", "= [], style = citekey_label_style1): \"\"\"Create and return a new citekey based on", "# default_type = \"%(names)s-%(year)s\", # ) def make_citekey(self, used_citekeys = [], style =", "| re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #must have a compiled regular", "string_or_compiled found in entry else None. If field is omitted, search is through", "for searching `field` : string field to search in self (default: search all", "self.entries = [] self._macroMap = {} def get_entrylist(self, citekeys, discard=True): \"\"\"Return list, the", "and fields are stored in the order added. :note: 2006-08-10 use 'citekey' instead", "sequentially; none empty filed -> name \"\"\" # importing bibname here to avoid", "classes, BibFile and BibEntry for accessing the parts of a bibtex database. BibFile", "use_max_names = False, # name_name_sep = ('.','.'), # etal = '', # lower_name", "name_other = 'l{_}', # max_names = 2, # use_max_names = False, # name_name_sep", "itself (and it asks a NamesFormatter to do it) result = names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names", "jrnl = self['journal'] jrnl = ''.join(jrnl.split()).lower() # keep macro jrnl = jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl']", "== 'crossref': try: val = val['citekey'] #might be an entry except TypeError: pass", "`self._names` \"\"\" if entry_formatter is None: if not try_fields: try_fields = ['author','editor','organization'] return", "= ' '*(mlen - len(key) ) val = self[key] #handle crossref if key", "for accessing the parts of a bibtex database. BibFile inherits from ``simpleparse.dispatchprocessor``. To", "file interface --------------------------------------------------------- Provides two classes, BibFile and BibEntry for accessing the parts", "self.entries if entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)] return ls # self test # ------------------------- #", "BibEntry for accessing the parts of a bibtex database. BibFile inherits from ``simpleparse.dispatchprocessor``.", "if __name__ == \"__main__\": # import sys # if len(sys.argv) > 1 :", "else string) (from \"raw\" names). :change: 2006-08-02 altered to return BibName instance and", "if val.lower() in monthslower_en + monthmacros_en: val = val[:3].lower() addbraces = False elif", "used_citekeys: while label_template%format_dict in used_citekeys: sfx = ascii_lowercase[c%26]*(1+c//26) # :note: lowercase since #", "fields: field keys are case-insensitive and fields are stored in the order added.", "stripping leading and trailing markers\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start+1:stop-1] def number(self, tuple4,", "#:note: entry will force k to lowercase entry[k] = v self.entries.append(entry) bibfile_logger.warning(\"Dummy key", "field): #field is usually a BibTeX field but can be a citekey field", "= 0 bibfile_logger.warn(\"Entry apparently has no fields.\") field_list = [] for key in", "punctuation!! addbraces = False #i.e., assume it is a macro elif key ==", "entry at line %d\" % lineno) else : # otherwise it is really", "(a list of entries and a macro map) and provide access to these", "1 # while result+sfx in used_citekeys: while label_template%format_dict in used_citekeys: sfx = ascii_lowercase[c%26]*(1+c//26)", "val): key = key.lower() dict.__setitem__(self, key, val) if key == \"key\": bibfile_logger.info( \"Setting", "\"\"\"Return list, the BibEntry instances that were found (and None for entries not", "entry in self.entries: if entry.citekey == citekey: return entry \"\"\"PRODUCTION FUNCTIONS: for parsing,", "return (dispatch(self, subtags[0], buffer), str) def entry( self, tuple4, buffer ): \"\"\"Process the", "tuple4, buffer ): \"\"\"Process the given production and it's children\"\"\" (tag,start,stop,subtags) = tuple4", "and return tuple of name, value.\"\"\" (tag,start,stop,subtags) = tuple4 str = '' for", "determined by a `label_style` (see below) :Returns: string the citation key (label) Example:", "[\"citekey\",\"entry_type\"] and val: self._fields.append(key) def __getitem__(self, field): #field is usually a BibTeX field", "entry[k] = v self.entries.append(entry) bibfile_logger.warning(\"Dummy key added to entry at line %d\" %", "'\"' elif addbraces: val = \"{\" + val + \"}\" field_list.append(\" %-*s =", "------- # Data storage for bibtex file # ---------------------------------------------------------- class BibFile( DispatchProcessor ):", "library import re, logging bibfile_logger = logging.getLogger('bibstuff_logger') # import dependencies from simpleparse import", "raw_names, field = entry_formatter.pick_raw_names(self,try_fields) return bibname.BibName(raw_names,from_field=field) #names are in a BibName object def", ".bibstyles.shared import NameFormatter from string import ascii_lowercase format_dict = {} entry_type = self.entry_type.lower()", "val.lower() #:note: entry_type stored as lowercase def get_entry_type(self): return self[\"entry_type\"] entry_type = property(get_entry_type,", "misc = \"%(names)s-%(year)s\", # default_type = \"%(names)s-%(year)s\", # ) def make_citekey(self, used_citekeys =", "dispatch(self, t, buffer) # concatenate hashed together strings return (dispatch(self, subtags[0], buffer), str)", "a dictionary interface to the fields: field keys are case-insensitive and fields are", "are in a BibName object def format_with(self, entry_formatter): bibfile_logger.debug(\"BibEntry.format_with: arg is:\"+str(entry_formatter)) #ask the", "of the btparse library in `btOOL <http://www.gerg.ca/software/btOOL/>`_ :TODO: add support for bibtexparser (BSD)", "= ''; c = 1 # while result+sfx in used_citekeys: while label_template%format_dict in", "test #for empty fields below elif self[field]: found = reo.search( self[field] ) else:", ".bib file parts (a list of entries and a macro map) and provide", "found for the following keys:\\n\"+\"\\n\".join(bad_keys)) if discard: result = [pair[1] for pair in", "entry_type is %s: Details: %s\"\"\" % (lineno, the_type, getString(subtags[1], buffer))) else : bibfile_logger.info(\"Comment", "(lineno, the_type, getString(subtags[1], buffer))) else : bibfile_logger.info(\"Comment entry on line %d:\" % lineno", "make names name_formatter = NameFormatter(template = name_template) names_dicts = self.get_names().get_names_dicts() # make list", "import re, logging bibfile_logger = logging.getLogger('bibstuff_logger') # import dependencies from simpleparse import dispatchprocessor", "set_entry_type(self, val): self[\"entry_type\"] = val.lower() #:note: entry_type stored as lowercase def get_entry_type(self): return", "style is a dict with the following fields:: citekey_label_style1 = dict( name_template =", "# etal = '', # lower_name = True, # anonymous = 'anon', #", "max_names = 2, name_name_sep = \"+\", etal = 'etal', anonymous = 'anon', lower_name", "entry( self, tuple4, buffer ): \"\"\"Process the bibentry and its children. \"\"\" (tag,start,stop,subtags)", "key == 'journal': if val.isalpha() and val.islower(): #:TODO: allow punctuation!! addbraces = False", "return stringrep def __setitem__(self, key, val): key = key.lower() dict.__setitem__(self, key, val) if", "see TODO above) BUT do not test 'field in self' bc want test", "bibname into here? possibly if entry_formatter is None: for field in try_fields: raw_names", "): \"\"\"Return the entry type\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) def citekey(", "= getString(subtags[0], buffer) lineno = spdp.lines(0, start, buffer) + 1 if the_type.upper() !=", "dict( # name_first = 'l{_}', # name_other = 'l{_}', # max_names = 2,", "rethink this decision (but it is used for formatting) #:note: 20080331 changed KeyError", "def __init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs) self._fields = [] def __repr__(self): \"\"\"return string representation of entry", "list.\") return [] temp = [ (key,self.get_entry_by_citekey(key)) for key in citekeys ] bad_keys", "self._fields.remove(key) except ValueError: pass def set_entry_type(self, val): self[\"entry_type\"] = val.lower() #:note: entry_type stored", "to return '' instead of None except KeyError: crossref = self.get('crossref', '') if", "NamesFormatter to do it) result = names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names result = \"+str(result)) return result", "style['default_type'] name_template = style['name_template'] max_names = style['max_names'] name_name_sep = style['name_name_sep'] lower_name = style['lower_name']", "temp = [ (key,self.get_entry_by_citekey(key)) for key in citekeys ] bad_keys = [pair[0] for", "production and it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0], buffer) lineno =", "in entry. Return MatchObject if string_or_compiled found in entry else None. If field", "be a citekey field = field.lower() if field == \"key\": bibfile_logger.info( \"Seeking 'key'", "str = '' for t in subtags[1][3]: if(t) : str += dispatch(self, t,", "compiled regular expression reo = string_or_compiled \"\"\" Find regex in bib_entry. If field", "short form, no spaces # make unique result: if needed, append suffix b", "Strip LaTeX accent characters from names when making label \"\"\" from .bibstyles.shared import", ": string field to search in self (default: search all fields) \"\"\" if", "must provide a function for each production name. \"\"\" def string(self, tuple4, buffer", "instance, bfi, call bibgrammar.Parse(src, bfi). :copyright: <NAME> and <NAME>, see AUTHORS :license: MIT", "styles in bibstuff.bibstyles; but it serves a very different purpose. This is to", "entry_type = property(get_entry_type, set_entry_type, None, \"property: 'entry_type'\") def set_citekey(self, val): self[\"citekey\"] = val", "%d has preamble syntax but entry_type is %s\" % (lineno,the_type)) else : bibfile_logger.warning(\"Preamble", "search is through all fields. :note: used by BibFile's find_re method, which is", "formatting) #:note: 20080331 changed KeyError to return '' instead of None except KeyError:", "else : bibfile_logger.info(\"Comment entry on line %d:\" % lineno + \" \" +", "self._macroMap.get(buffer[start:stop],buffer[start:stop]) def field(self, tuple4, buffer ): \"\"\"Process a bibentry field and return tuple", "result = [pair[1] for pair in temp if pair[1]] else: #keep None when", "None when occurs in entry list result = [pair[1] for pair in temp]", "key -- or should we be strict? for field in subtags[1][3] : k,v", "_names :note: self returns None if field missing (-> no KeyError) :note: this", "macros to macro map\"\"\" (tag,start,stop,subtags) = tuple4 name, str = dispatch(self, subtags[0], buffer)", "perhaps along the lines of the btparse library in `btOOL <http://www.gerg.ca/software/btOOL/>`_ :TODO: add", "bibtex format? ####################### IMPORTS ##################################### # import from standard library import re, logging", "string import ascii_lowercase format_dict = {} entry_type = self.entry_type.lower() try: label_template = style[entry_type]", "possible else raw name :type `names_formatter`: NamesFormatter :note: called by CitationManager in format_citation", "search_entries(self, string_or_compiled, field='', ignore_case=True): \"\"\"Return list of matching entries. Search for regular expression", "were found (and None for entries not found, unless discarded). \"\"\" if not", "addbraces = not int(val) except: pass if '@' in val: # need to", "BUT do not test 'field in self' bc want test #for empty fields", "format_citation :note: 2006-08-08 no longer sets a `_names` attribute :TODO: add default name_template", "ValueError: pass def set_entry_type(self, val): self[\"entry_type\"] = val.lower() #:note: entry_type stored as lowercase", "this method introduces the only dependence on simpleparse (via bibname) :TODO: return BibName", "the order added. :note: 2006-08-10 use 'citekey' instead of 'key' since BibTeX allows", "2, # use_max_names = False, # name_name_sep = ('.','.'), # etal = '',", "'month' and result in monthmacros_en: result = MONTH_DICT[result] return result def __delitem__(self,key) :", "= [ (key,self.get_entry_by_citekey(key)) for key in citekeys ] bad_keys = [pair[0] for pair", "and add macros to macro map\"\"\" (tag,start,stop,subtags) = tuple4 name, str = dispatch(self,", "if stuck with it if isinstance(names,str): result = names else: #assume a BibName", "dispatch(self, field, buffer) self._macroMap[name] = str \"\"\" self._macroMap[name] = str def preamble( self,", "string the citation key (label) Example: The label style is a dict with", "set_entry_type, None, \"property: 'entry_type'\") def set_citekey(self, val): self[\"citekey\"] = val def get_citekey(self): return", "val + '\"' elif addbraces: val = \"{\" + val + \"}\" field_list.append(\"", "but can be a citekey field = field.lower() if field == \"key\": bibfile_logger.info(", "result: if entry: crossref = entry.get('crossref', None) if isinstance(crossref, str): crossref = self.get_entry_by_citekey(crossref)", "the entry id.)\") try: result = dict.__getitem__(self, field) #:TODO: rethink this decision (but", "sys # if len(sys.argv) > 1 : # src = open(sys.argv[1]).read() # bfile", "omitted, search is through all fields. :note: used by bibsearch.py :Parameters: - `string_or_compiled`", "== \"key\": bibfile_logger.info( \"Setting 'key' as an entry *field*. (Recall 'citekey' holds the", "self, tuple4, buffer ): \"\"\"Process a macro entry and add macros to macro", "return buffer[start:stop] def entry_type( self, tuple4, buffer ): \"\"\"Return the entry type\"\"\" (tag,start,stop,subtags)", "entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)] return ls # self test # ------------------------- # usage: bibfile.py", "= dict.__getitem__(self, field) #:TODO: rethink this decision (but it is used for formatting)", "MONTH_DICT = dict( zip(monthmacros_en, months_en) ) ##################################################################### class BibEntry(dict): \"\"\" Stores a single", "# __strict__ = False allows empty citekeys __strict__ = False # should we", "list of the already taken citation keys so that the function can avoid", "in self: bibfile_logger.info(\"Empty field %s in entry\\n%s.\\n.\"%(self,field)) found = None return found def", "return result def get_entry_by_citekey(self, citekey): \"\"\"Return entry or None.\"\"\" for entry in self.entries:", "\"+str(dispatch(self, field, buffer))) k,v = dispatch(self, field, buffer) #:note: entry will force k", "list a list of the already taken citation keys so that the function", "field=field, ignore_case=ignore_case)] return ls # self test # ------------------------- # usage: bibfile.py DATABASE_FILE", ":Parameters: - used_citekeys : list a list of the already taken citation keys", "bc want test #for empty fields below elif self[field]: found = reo.search( self[field]", "= getString(subtags[0], buffer) if the_type.upper() != 'STRING' : # it looks like a", "'key' field :note: 2008-03-29 'entry_type' instead of 'type' since BibTeX allows a 'type'", "with bibtex format? ####################### IMPORTS ##################################### # import from standard library import re,", "the entry id.)\") if key not in self._fields and key not in [\"citekey\",\"entry_type\"]", "by key. :note: a BibFile object should simply *store* .bib file parts (a", "found, unless discarded). \"\"\" if not citekeys: bibfile_logger.warning(\"get_entrylist: No keys provided; returning empty", "\"<NAME>\"] __version__ = '1.13' __needs__ = '2.4' # options: # __strict__ = False", "+= \",\\n\".join(field_list) stringrep += '\\n}\\n' return stringrep def __setitem__(self, key, val): key =", "format_names(self, names_formatter): \"\"\"return formatted BibName-object if possible else raw name :type `names_formatter`: NamesFormatter", "for entry in result: if entry: crossref = entry.get('crossref', None) if isinstance(crossref, str):", "in citekeys ] bad_keys = [pair[0] for pair in temp if not pair[1]]", "it) result = names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names result = \"+str(result)) return result def get_names(self, entry_formatter=None,", "to compile or compiled regex pattern for searching - `field` : string field", "`btOOL <http://www.gerg.ca/software/btOOL/>`_ :TODO: add support for bibtexparser (BSD) https://github.com/sciunto-org/python-bibtexparser \"\"\" __docformat__ = \"restructuredtext", "return a new citekey based on the entry's data. This is for creating", "map.\"\"\" (tag,start,stop,subtags) = tuple4 return self._macroMap.get(buffer[start:stop],buffer[start:stop]) def field(self, tuple4, buffer ): \"\"\"Process a", "in (\"year\",\"number\",\"volume\",\"chapter\"): try: addbraces = not int(val) except: pass if '@' in val:", "'key' as an entry *field*. (Recall 'citekey' holds the entry id.)\") try: result", "bibsearch.py \"\"\" ls = [entry for entry in self.entries if entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)]", "= tuple4 return self._macroMap.get(buffer[start:stop],buffer[start:stop]) def field(self, tuple4, buffer ): \"\"\"Process a bibentry field", "max( len(key_str) for key_str in self._fields ) # for pretty format except ValueError:", "reo = string_or_compiled if not field: #->try all fields (but not citekey) for", "longer sets a `_names` attribute :TODO: add default name_template useful for .bib files?", "\"%(names)s-%(year)s\", # ) def make_citekey(self, used_citekeys = [], style = citekey_label_style1): \"\"\"Create and", "def __repr__(self): \"\"\"return string representation of entry \"\"\" stringrep = '@%s{%s,\\n' % (self.entry_type.upper()", "entry type\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) def citekey( self, tuple4, buffer", "in map.\"\"\" (tag,start,stop,subtags) = tuple4 return self._macroMap.get(buffer[start:stop],buffer[start:stop]) def field(self, tuple4, buffer ): \"\"\"Process", "k to lowercase entry[k] = v self.entries.append(entry) def macro( self, tuple4, buffer ):", "citekey: return entry \"\"\"PRODUCTION FUNCTIONS: for parsing, must provide a function for each", "dispatch(self, subtags[0], buffer) entry.citekey = 'KEY' # dummy key -- or should we", "discarded). \"\"\" if not citekeys: bibfile_logger.warning(\"get_entrylist: No keys provided; returning empty cited-entry list.\")", "not try_fields: try_fields = ['author','editor','organization'] return self.make_names(entry_formatter, try_fields=try_fields) def make_names(self, entry_formatter=None, try_fields=None): \"\"\"return", "% (lineno , the_type)) if not __strict__: # we can add a dummy", "allows a 'key' field :note: 2008-03-29 'entry_type' instead of 'type' since BibTeX allows", "could be a regular entry with no key lineno = lines(0, start, buffer)+1", "re, logging bibfile_logger = logging.getLogger('bibstuff_logger') # import dependencies from simpleparse import dispatchprocessor as", "the_type, getString(subtags[1], buffer))) else : bibfile_logger.info(\"Comment entry on line %d:\" % lineno +", "def string(self, tuple4, buffer ): \"\"\"Return a string, stripping leading and trailing markers\"\"\"", "__version__ = '1.13' __needs__ = '2.4' # options: # __strict__ = False allows", "already taken citation keys so that the function can avoid duplicates (by adding", "all bibname into here? possibly if entry_formatter is None: for field in try_fields:", "is %s\" % (lineno , the_type)) if not __strict__: # we can add", "entry_formatter.pick_raw_names(self,try_fields) return bibname.BibName(raw_names,from_field=field) #names are in a BibName object def format_with(self, entry_formatter): bibfile_logger.debug(\"BibEntry.format_with:", "(tag,start,stop,subtags) = tuple4 return buffer[start:stop] def entry_type( self, tuple4, buffer ): \"\"\"Return the", "if isinstance(string_or_compiled, str): if ignore_case: reo = re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else: reo", "macros if field == 'month' and result in monthmacros_en: result = MONTH_DICT[result] return", "the_type)) if not __strict__: # we can add a dummy key and treat", "entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = dispatch(self, subtags[1], buffer) for field in", "self.entries.append(entry) bibfile_logger.warning(\"Dummy key added to entry at line %d\" % lineno) else :", "when making label \"\"\" from .bibstyles.shared import NameFormatter from string import ascii_lowercase format_dict", "compiled regex pattern for searching `field` : string field to search in self", "parts \"\"\" def __init__(self) : self.entries = [] self._macroMap = {} def get_entrylist(self,", "% lineno + \"\\n\" + buffer[start:stop]) def comment_entry(self, tuple4, buffer): \"\"\"Process the given", "sfx = ''; c = 1 # while result+sfx in used_citekeys: while label_template%format_dict", "last names, which can possibly have multiple # tokens (e.g., two piece last", "name \"\"\" # importing bibname here to avoid recursive import from bibstuff import", "are case-insensitive and fields are stored in the order added. :note: 2006-08-10 use", "subtags[1], buffer) for field in subtags[2][3] : #bibfile_logger.debug(\"entry: ready to add field: \"+str(dispatch(self,", "entry: crossref = entry.get('crossref', None) if isinstance(crossref, str): crossref = self.get_entry_by_citekey(crossref) if crossref:", "= key.lower() dict.__setitem__(self, key, val) if key == \"key\": bibfile_logger.info( \"Setting 'key' as", "% (lineno, the_type, getString(subtags[1], buffer))) else : bibfile_logger.info(\"Comment entry on line %d:\" %", "`field` : string field to search in self (default: search all fields) \"\"\"", "macro( self, tuple4, buffer ): \"\"\"Process a macro entry and add macros to", "instance #ask BibName instance to format itself (and it asks a NamesFormatter to", "max_names = style['max_names'] name_name_sep = style['name_name_sep'] lower_name = style['lower_name'] etal = style['etal'] #", "def field(self, tuple4, buffer ): \"\"\"Process a bibentry field and return tuple of", "string) (from \"raw\" names). :change: 2006-08-02 altered to return BibName instance and not", "two piece last names) ls = [name_formatter.format_name(name_dict) for name_dict in names_dicts] if len(ls)", "str) def entry( self, tuple4, buffer ): \"\"\"Process the bibentry and its children.", "# name_other = 'l{_}', # max_names = 2, # use_max_names = False, #", "keys are case-insensitive and fields are stored in the order added. :note: 2006-08-10", "macro, but is not: could be a regular entry with no key lineno", "= getString(subtags[0], buffer) lineno = lines(0,start,buffer)+1 if the_type.upper() != 'PREAMBLE' : bibfile_logger.warning(\"Entry at", "start, buffer) + 1 if the_type.upper() != 'COMMENT' : bibfile_logger.warning(\"\"\"Entry at line %d", "ready to add field: \"+str(dispatch(self, field, buffer))) k,v = dispatch(self, field, buffer) #:note:", "style['lower_name'] etal = style['etal'] # first, make names name_formatter = NameFormatter(template = name_template)", "of each entry. If field is omitted, search is through all fields. :note:", "False allows empty citekeys __strict__ = False # should we be strict with", "field_list.append(\" %-*s = %s\" % (mlen, key, val)) stringrep += \",\\n\".join(field_list) stringrep +=", "name if not in map.\"\"\" (tag,start,stop,subtags) = tuple4 return self._macroMap.get(buffer[start:stop],buffer[start:stop]) def field(self, tuple4,", "pair in temp] #attach cross references for entry in result: if entry: crossref", "simpleparse (via bibname) :TODO: return BibName instance for each available name field?? :Parameters:", "None except KeyError: crossref = self.get('crossref', '') if isinstance(crossref, self.__class__): result = crossref[field]", "given production and it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0], buffer) lineno", "accessing the parts of a bibtex database. BibFile inherits from ``simpleparse.dispatchprocessor``. To fill", "syntax, but entry_type is %s\" % (lineno , the_type)) if not __strict__: #", "in self.get_fields(): found = reo.search( self[f] ) if found: break # no need", "entry_type is %s\" % (lineno,the_type)) else : bibfile_logger.warning(\"Preamble entry on line %d:\" %", "\"\"\"Return the entry's citekey\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) # macro name", "has no fields.\") field_list = [] for key in self._fields: addbraces = True", "self['year'] or '????' format_dict['year'] = year if entry_type == \"article\": jrnl = self['journal']", "= [pair[1] for pair in temp if pair[1]] else: #keep None when occurs", "is determined by a `label_style` (see below) :Returns: string the citation key (label)", "src = open(sys.argv[1]).read() # bfile = BibFile() # bibgrammar.Parse(src, bfile) # for entry", "\"\"\" def string(self, tuple4, buffer ): \"\"\"Return a string, stripping leading and trailing", "\"\"\"return a number as a string\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start:stop] def entry_type(", "ls = ls[:max_names] + [etal] names = name_name_sep.join(ls) if lower_name: names = names.lower()", "a string\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start:stop] def entry_type( self, tuple4, buffer ):", "through all fields. :note: used by bibsearch.py :Parameters: - `string_or_compiled` : string to", "to compile or compiled regex pattern for searching `field` : string field to", "attribute :TODO: add default name_template useful for .bib files? \"\"\" bibfile_logger.debug(\"BibEntry.format_names: arg is:\"+str(names_formatter))", "make_citekey(self, used_citekeys = [], style = citekey_label_style1): \"\"\"Create and return a new citekey", "= open(sys.argv[1]).read() # bfile = BibFile() # bibgrammar.Parse(src, bfile) # for entry in", "misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\", ) #style2 shd be rst compatible #", "# tokens (e.g., two piece last names) ls = [name_formatter.format_name(name_dict) for name_dict in", "20080331 changed KeyError to return '' instead of None except KeyError: crossref =", "# anonymous = 'anon', # article = \"%(names)s-%(year)s-%(jrnl)s\", # book = \"%(names)s-%(year)s\", #", "form, no spaces # make unique result: if needed, append suffix b or", "isinstance(names,str): result = names else: #assume a BibName instance #ask BibName instance to", "True, # anonymous = 'anon', # article = \"%(names)s-%(year)s-%(jrnl)s\", # book = \"%(names)s-%(year)s\",", "try_fields: raw_names = self[field] if raw_names: break else: raw_names, field = entry_formatter.pick_raw_names(self,try_fields) return", "key lineno = lines(0, start, buffer)+1 bibfile_logger.warning(\"Entry at line %d has macro syntax,", "def entry( self, tuple4, buffer ): \"\"\"Process the bibentry and its children. \"\"\"", "val): self[\"citekey\"] = val def get_citekey(self): return self[\"citekey\"] citekey = property(get_citekey,set_citekey,None,\"property: 'citekey'\") def", "#attach cross references for entry in result: if entry: crossref = entry.get('crossref', None)", "\"\"\"Return lookup on name or name if not in map.\"\"\" (tag,start,stop,subtags) = tuple4", ": #bibfile_logger.debug(\"entry: ready to add field: \"+str(dispatch(self, field, buffer))) k,v = dispatch(self, field,", "key, val) if key == \"key\": bibfile_logger.info( \"Setting 'key' as an entry *field*.", ":type `names_formatter`: NamesFormatter :note: called by CitationManager in format_citation :note: 2006-08-08 no longer", "bibgrammar ##################################################################### ############### GLOBAL VARIABLES ################################## months_en = ('January','February','March','April','May','June', 'July','August','September','October','November','December') monthslower_en = [m.lower()", "BibName instance #ask BibName instance to format itself (and it asks a NamesFormatter", "the following keys:\\n\"+\"\\n\".join(bad_keys)) if discard: result = [pair[1] for pair in temp if", "= lines(0, start, buffer)+1 bibfile_logger.warning(\"Entry at line %d has macro syntax, but entry_type", "\"raw\" names). :change: 2006-08-02 altered to return BibName instance and not set _names", "these parts \"\"\" def __init__(self) : self.entries = [] self._macroMap = {} def", "here? possibly if entry_formatter is None: for field in try_fields: raw_names = self[field]", "def citekey( self, tuple4, buffer ): \"\"\"Return the entry's citekey\"\"\" (tag,start,stop,subtags) = tuple4", ") if found: break # no need to check more fields # :note:", "bibfile_logger.debug(\"BibEntry.format_names result = \"+str(result)) return result def get_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if", "elif key == 'journal': if val.isalpha() and val.islower(): #:TODO: allow punctuation!! addbraces =", ":note: called by CitationManager in format_citation :note: 2006-08-08 no longer sets a `_names`", "stringrep += '\\n}\\n' return stringrep def __setitem__(self, key, val): key = key.lower() dict.__setitem__(self,", "\"article\": jrnl = self['journal'] jrnl = ''.join(jrnl.split()).lower() # keep macro jrnl = jrnl.replace(\"journal\",\"j\",1)", "for bibtex file # ---------------------------------------------------------- class BibFile( DispatchProcessor ): \"\"\"Stores parsed bibtex file.", "citekey_label_style1): \"\"\"Create and return a new citekey based on the entry's data. This", "bibname.BibName(raw_names,from_field=field) #names are in a BibName object def format_with(self, entry_formatter): bibfile_logger.debug(\"BibEntry.format_with: arg is:\"+str(entry_formatter))", "def format_with(self, entry_formatter): bibfile_logger.debug(\"BibEntry.format_with: arg is:\"+str(entry_formatter)) #ask the EntryFormatter to do it return", "BibEntry objects. This is not integrated with the citation styles in bibstuff.bibstyles; but", "+ '\"' elif addbraces: val = \"{\" + val + \"}\" field_list.append(\" %-*s", "re.MULTILINE | re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #must have a compiled", "the_type.upper() != 'PREAMBLE' : bibfile_logger.warning(\"Entry at line %d has preamble syntax but entry_type", "line %d has preamble syntax but entry_type is %s\" % (lineno,the_type)) else :", "for pair in temp] #attach cross references for entry in result: if entry:", "by bibsearch.py :Parameters: `string_or_compiled` : string to compile or compiled regex pattern for", "\"key\": bibfile_logger.info( \"Setting 'key' as an entry *field*. (Recall 'citekey' holds the entry", "return '' instead of None except KeyError: crossref = self.get('crossref', '') if isinstance(crossref,", "= 2, name_name_sep = '+', etal = 'etal', anonymous = 'anon', lower_name =", "if not field: #->try all fields (but not citekey) for f in self.get_fields():", "def get_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string) :note: 2006-08-09 matching", ":note: used by bibsearch.py :Parameters: - `string_or_compiled` : string to compile or compiled", "style['name_template'] max_names = style['max_names'] name_name_sep = style['name_name_sep'] lower_name = style['lower_name'] etal = style['etal']", "buffer ): \"\"\"Process the bibentry and its children. \"\"\" (tag,start,stop,subtags) = tuple4 entry", "lower_name: names = names.lower() format_dict['names'] = names year = self['year'] or '????' format_dict['year']", "a macro entry and add macros to macro map\"\"\" (tag,start,stop,subtags) = tuple4 name,", "a string) #keep string if stuck with it if isinstance(names,str): result = names", "pretty format except ValueError: #no fields (not a true entry) mlen = 0", "style['etal'] # first, make names name_formatter = NameFormatter(template = name_template) names_dicts = self.get_names().get_names_dicts()", "'month': # always use month macros if possible if val.lower() in monthslower_en +", "entry_formatter is None: if not try_fields: try_fields = ['author','editor','organization'] return self.make_names(entry_formatter, try_fields=try_fields) def", "possible if val.lower() in monthslower_en + monthmacros_en: val = val[:3].lower() addbraces = False", "#ask BibName instance to format itself (and it asks a NamesFormatter to do", "not in map.\"\"\" (tag,start,stop,subtags) = tuple4 return self._macroMap.get(buffer[start:stop],buffer[start:stop]) def field(self, tuple4, buffer ):", "piece last names) ls = [name_formatter.format_name(name_dict) for name_dict in names_dicts] if len(ls) >", "bad_keys = [pair[0] for pair in temp if not pair[1]] if bad_keys and", "into here? possibly if entry_formatter is None: for field in try_fields: raw_names =", "try: mlen = max( len(key_str) for key_str in self._fields ) # for pretty", "regular entry with no key lineno = lines(0, start, buffer)+1 bibfile_logger.warning(\"Entry at line", "lineno + \"\\n\" + buffer[start:stop]) def comment_entry(self, tuple4, buffer): \"\"\"Process the given production", "by bibsearch.py :Parameters: - `string_or_compiled` : string to compile or compiled regex pattern", "return self[\"entry_type\"] entry_type = property(get_entry_type, set_entry_type, None, \"property: 'entry_type'\") def set_citekey(self, val): self[\"citekey\"]", "NameFormatter from string import ascii_lowercase format_dict = {} entry_type = self.entry_type.lower() try: label_template", "https://github.com/sciunto-org/python-bibtexparser \"\"\" __docformat__ = \"restructuredtext en\" __authors__ = [\"<NAME>\", \"<NAME>\"] __version__ = '1.13'", "'July','August','September','October','November','December') monthslower_en = [m.lower() for m in months_en] monthmacros_en = [m[:3] for m", "an entry except TypeError: pass #->must be a string elif key == 'journal':", "try_fields=None): \"\"\"return (BibName-object if possible else string) :note: 2006-08-09 matching change to `make_names`,", "keep macro jrnl = jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl'] = jrnl # short form, no spaces", "pass #->must be a string elif key == 'journal': if val.isalpha() and val.islower():", "as spdp from simpleparse.dispatchprocessor import dispatch, DispatchProcessor, getString, lines #bibstuff imports # from", "list of field names to try sequentially; none empty filed -> name \"\"\"", "+ 1 if the_type.upper() != 'COMMENT' : bibfile_logger.warning(\"\"\"Entry at line %d has comment", "BibName instance for each available name field?? :Parameters: - `entry_formatter`: EntryFormatter instance to", "not in [\"citekey\",\"entry_type\"] and val: self._fields.append(key) def __getitem__(self, field): #field is usually a", "while result+sfx in used_citekeys: while label_template%format_dict in used_citekeys: sfx = ascii_lowercase[c%26]*(1+c//26) # :note:", "bfi, call bibgrammar.Parse(src, bfi). :copyright: <NAME> and <NAME>, see AUTHORS :license: MIT (see", "BibTeX file interface --------------------------------------------------------- Provides two classes, BibFile and BibEntry for accessing the", "not set _names :note: self returns None if field missing (-> no KeyError)", "# book = \"%(names)s-%(year)s\", # misc = \"%(names)s-%(year)s\", # default_type = \"%(names)s-%(year)s\", #", "def get_entry_by_citekey(self, citekey): \"\"\"Return entry or None.\"\"\" for entry in self.entries: if entry.citekey", "default_type = \"%(names)s-%(year)s\") :TODO: Strip LaTeX accent characters from names when making label", "def get_fields(self): return self._fields def set_fields(self, lst): self._fields = lst fields = property(get_fields,", "format except ValueError: #no fields (not a true entry) mlen = 0 bibfile_logger.warn(\"Entry", "\"__main__\": # import sys # if len(sys.argv) > 1 : # src =", "citation keys that are easy to type and guess and that are valid", "entry entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = 'KEY' #", "is omitted, search is through all fields. :note: used by bibsearch.py :Parameters: -", "def set_citekey(self, val): self[\"citekey\"] = val def get_citekey(self): return self[\"citekey\"] citekey = property(get_citekey,set_citekey,None,\"property:", "= False #i.e., assume it is a macro elif key == 'month': #", "bibstuff.bibstyles; but it serves a very different purpose. This is to create consistent", "(by adding a,b,c,d... etc) - style : str The format of the citetekey", "not citekey) for f in self.get_fields(): found = reo.search( self[f] ) if found:", "default name_template useful for .bib files? \"\"\" bibfile_logger.debug(\"BibEntry.format_names: arg is:\"+str(names_formatter)) names = self.get_names()", "for name_dict in names_dicts] if len(ls) > max_names: ls = ls[:max_names] + [etal]", "\"%(names)s-%(year)s\", # default_type = \"%(names)s-%(year)s\", # ) def make_citekey(self, used_citekeys = [], style", "or c or d... to year sfx = ''; c = 1 #", "len(key) ) val = self[key] #handle crossref if key == 'crossref': try: val", "None.\"\"\" for entry in self.entries: if entry.citekey == citekey: return entry \"\"\"PRODUCTION FUNCTIONS:", "[m[:3] for m in monthslower_en] MONTH_DICT = dict( zip(monthmacros_en, months_en) ) ##################################################################### class", "the citetekey is determined by a `label_style` (see below) :Returns: string the citation", "result # ---------------------------------------------------------- # Bibfile # ------- # Data storage for bibtex file", "self._macroMap[name] = str def preamble( self, tuple4, buffer ): \"\"\"Process the given production", "names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names result = \"+str(result)) return result def get_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object", "simpleparse import dispatchprocessor as spdp from simpleparse.dispatchprocessor import dispatch, DispatchProcessor, getString, lines #bibstuff", "= \"%(names)s-%(year)s\", # ) def make_citekey(self, used_citekeys = [], style = citekey_label_style1): \"\"\"Create", "'COMMENT' : bibfile_logger.warning(\"\"\"Entry at line %d has comment syntax but entry_type is %s:", "or None.\"\"\" for entry in self.entries: if entry.citekey == citekey: return entry \"\"\"PRODUCTION", "string elif key == 'journal': if val.isalpha() and val.islower(): #:TODO: allow punctuation!! addbraces", "elif key in (\"year\",\"number\",\"volume\",\"chapter\"): try: addbraces = not int(val) except: pass if '@'", "= ''.join(jrnl.split()).lower() # keep macro jrnl = jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl'] = jrnl # short", "fields. :note: used by bibsearch.py \"\"\" ls = [entry for entry in self.entries", "single bibliographic entry. Provides a dictionary interface to the fields: field keys are", "data. This is for creating predictable and useful citekey (labels) for BibEntry objects.", "return getString((tag,start,stop,subtags), buffer) # macro name def name(self, tuple4, buffer ): \"\"\"Return lookup", "not found, unless discarded). \"\"\" if not citekeys: bibfile_logger.warning(\"get_entrylist: No keys provided; returning", "ls = [name_formatter.format_name(name_dict) for name_dict in names_dicts] if len(ls) > max_names: ls =", "entry *field*. (Recall 'citekey' holds the entry id.)\") try: result = dict.__getitem__(self, field)", "'type' since BibTeX allows a 'type' field \"\"\" def __init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs) self._fields =", "DispatchProcessor ): \"\"\"Stores parsed bibtex file. Access entries by key. :note: a BibFile", "it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0], buffer) lineno = spdp.lines(0, start,", "else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #->must have a compiled regular expression reo", "# make unique result: if needed, append suffix b or c or d...", "and it's children\"\"\" (tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0], buffer) lineno = lines(0,start,buffer)+1", "always use month macros if possible if val.lower() in monthslower_en + monthmacros_en: val", "bibfile_logger.debug(\"BibEntry.format_with: arg is:\"+str(entry_formatter)) #ask the EntryFormatter to do it return entry_formatter.format_entry(self) # A", "name_name_sep = ('.','.'), # etal = '', # lower_name = True, # anonymous", "= tuple4 return buffer[start:stop] def entry_type( self, tuple4, buffer ): \"\"\"Return the entry", "self, tuple4, buffer ): \"\"\"Process the bibentry and its children. \"\"\" (tag,start,stop,subtags) =", "compatible # citekey_label_style2 = dict( # name_first = 'l{_}', # name_other = 'l{_}',", "-> name \"\"\" # importing bibname here to avoid recursive import from bibstuff", "re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else: reo = re.compile(string_or_compiled, re.MULTILINE) else: #must have a", "id.)\") if key not in self._fields and key not in [\"citekey\",\"entry_type\"] and val:", "------------------------- # usage: bibfile.py DATABASE_FILE # if __name__ == \"__main__\": # import sys", "crossref return result def get_entry_by_citekey(self, citekey): \"\"\"Return entry or None.\"\"\" for entry in", "property(get_entry_type, set_entry_type, None, \"property: 'entry_type'\") def set_citekey(self, val): self[\"citekey\"] = val def get_citekey(self):", "try_fields=None): \"\"\"return (BibName-object if possible else string) (from \"raw\" names). :change: 2006-08-02 altered", "entry on line %d:\" % lineno + \"\\n\" + buffer[start:stop]) def comment_entry(self, tuple4,", "= name_template) names_dicts = self.get_names().get_names_dicts() # make list of 'v_|l' last names, which", "does not # distinguish case format_dict['year'] = year+sfx c += 1 result =", "m in months_en] monthmacros_en = [m[:3] for m in monthslower_en] MONTH_DICT = dict(", "result = dict.__getitem__(self, field) #:TODO: rethink this decision (but it is used for", "by make_citekey() # first max_names names included, then etal citekey_label_style1 = dict( name_template", "\"\"\"Return the entry type\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) def citekey( self,", "buffer) # macro name def name(self, tuple4, buffer ): \"\"\"Return lookup on name", ") val = self[key] #handle crossref if key == 'crossref': try: val =", "elif addbraces: val = \"{\" + val + \"}\" field_list.append(\" %-*s = %s\"", "citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # see NameFormatter class max_names = 2,", "\"\"\" __docformat__ = \"restructuredtext en\" __authors__ = [\"<NAME>\", \"<NAME>\"] __version__ = '1.13' __needs__", "2006-08-02 altered to return BibName instance and not set _names :note: self returns", "if discard: result = [pair[1] for pair in temp if pair[1]] else: #keep", "by a `label_style` (see below) :Returns: string the citation key (label) Example: The", "no key lineno = lines(0, start, buffer)+1 bibfile_logger.warning(\"Entry at line %d has macro", "if not in map.\"\"\" (tag,start,stop,subtags) = tuple4 return self._macroMap.get(buffer[start:stop],buffer[start:stop]) def field(self, tuple4, buffer", "getString(subtags[0], buffer) lineno = spdp.lines(0, start, buffer) + 1 if the_type.upper() != 'COMMENT'", "= dict( zip(monthmacros_en, months_en) ) ##################################################################### class BibEntry(dict): \"\"\" Stores a single bibliographic", "No keys provided; returning empty cited-entry list.\") return [] temp = [ (key,self.get_entry_by_citekey(key))", "general, perhaps along the lines of the btparse library in `btOOL <http://www.gerg.ca/software/btOOL/>`_ :TODO:", "entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = dispatch(self, subtags[1], buffer)", "import from bibstuff import bibname #ai: shd move all bibname into here? possibly", "\"%(names)s-%(year)s\", # misc = \"%(names)s-%(year)s\", # default_type = \"%(names)s-%(year)s\", # ) def make_citekey(self,", "test 'field in self' (even though an entry will not raise #KeyError! see", "= [pair[1] for pair in temp] #attach cross references for entry in result:", "KeyError: crossref = self.get('crossref', '') if isinstance(crossref, self.__class__): result = crossref[field] else: result", "format_dict = {} entry_type = self.entry_type.lower() try: label_template = style[entry_type] except KeyError: label_template", "bibliographic entry. Provides a dictionary interface to the fields: field keys are case-insensitive", "= \"+str(result)) return result def get_names(self, entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else", "field.lower() if field == \"key\": bibfile_logger.info( \"Seeking 'key' as an entry *field*. (Recall", "lower_name = False, article = \"%(names)s-%(year)s\", book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type", "'2.4' # options: # __strict__ = False allows empty citekeys __strict__ = False", "pair in temp if pair[1]] else: #keep None when occurs in entry list", "'l{_}', # max_names = 2, # use_max_names = False, # name_name_sep = ('.','.'),", "\"property: 'fields'\") def search_fields(self, string_or_compiled, field='', ignore_case=True): \"\"\"Find regular expression in entry. Return", "= \"%(names)s-%(year)s\", # default_type = \"%(names)s-%(year)s\", # ) def make_citekey(self, used_citekeys = [],", "names, which can possibly have multiple # tokens (e.g., two piece last names)", "if needed, append suffix b or c or d... to year sfx =", "as a string\"\"\" (tag,start,stop,subtags) = tuple4 return buffer[start:stop] def entry_type( self, tuple4, buffer", "a `label_style` (see below) :Returns: string the citation key (label) Example: The label", "on name or name if not in map.\"\"\" (tag,start,stop,subtags) = tuple4 return self._macroMap.get(buffer[start:stop],buffer[start:stop])", "be strict with bibtex format? ####################### IMPORTS ##################################### # import from standard library", "[], style = citekey_label_style1): \"\"\"Create and return a new citekey based on the", "at line %d\" % lineno) else : # otherwise it is really a", "used for formatting) #:note: 20080331 changed KeyError to return '' instead of None", "holds the entry id.)\") if key not in self._fields and key not in", "= style['name_template'] max_names = style['max_names'] name_name_sep = style['name_name_sep'] lower_name = style['lower_name'] etal =", "import sys # if len(sys.argv) > 1 : # src = open(sys.argv[1]).read() #", "= 'l{_}', # name_other = 'l{_}', # max_names = 2, # use_max_names =", "in used_citekeys: while label_template%format_dict in used_citekeys: sfx = ascii_lowercase[c%26]*(1+c//26) # :note: lowercase since", "\"\"\"Stores parsed bibtex file. Access entries by key. :note: a BibFile object should", "search is through all fields. :note: used by bibsearch.py :Parameters: - `string_or_compiled` :", "case-insensitive and fields are stored in the order added. :note: 2006-08-10 use 'citekey'", "self test # ------------------------- # usage: bibfile.py DATABASE_FILE # if __name__ == \"__main__\":", "is omitted, search is through all fields. :note: used by bibsearch.py \"\"\" ls", "for key in citekeys ] bad_keys = [pair[0] for pair in temp if", "entry will force k to lowercase entry[k] = v self.entries.append(entry) bibfile_logger.warning(\"Dummy key added", "number(self, tuple4, buffer ): \"\"\"return a number as a string\"\"\" (tag,start,stop,subtags) = tuple4", "+ buffer[start:stop]) def comment_entry(self, tuple4, buffer): \"\"\"Process the given production and it's children\"\"\"", "etal = '', # lower_name = True, # anonymous = 'anon', # article", "\"property: 'entry_type'\") def set_citekey(self, val): self[\"citekey\"] = val def get_citekey(self): return self[\"citekey\"] citekey", "!= 'COMMENT' : bibfile_logger.warning(\"\"\"Entry at line %d has comment syntax but entry_type is", "for the following keys:\\n\"+\"\\n\".join(bad_keys)) if discard: result = [pair[1] for pair in temp", "): \"\"\"Process a bibentry field and return tuple of name, value.\"\"\" (tag,start,stop,subtags) =", "name field?? :Parameters: - `entry_formatter`: EntryFormatter instance to provide style information - `try_fields`:", "is:\"+str(names_formatter)) names = self.get_names() #get a BibName instance (or possibly, a string) #keep", "if crossref: entry['crossref'] = crossref return result def get_entry_by_citekey(self, citekey): \"\"\"Return entry or", "addquotes = True if addquotes: val = '\"' + val + '\"' elif", "rst compatible # citekey_label_style2 = dict( # name_first = 'l{_}', # name_other =", "Provides two classes, BibFile and BibEntry for accessing the parts of a bibtex", "dispatch(self, subtags[0], buffer) \"\"\" the_type = getString(subtags[0], buffer) if the_type.upper() != 'STRING' :", "% lineno + \" \" + getString(subtags[1], buffer)) def search_entries(self, string_or_compiled, field='', ignore_case=True):", "standard library import re, logging bibfile_logger = logging.getLogger('bibstuff_logger') # import dependencies from simpleparse", "dependencies from simpleparse import dispatchprocessor as spdp from simpleparse.dispatchprocessor import dispatch, DispatchProcessor, getString,", "(tag,start,stop,subtags) = tuple4 name, str = dispatch(self, subtags[0], buffer) \"\"\" the_type = getString(subtags[0],", "key = key.lower() dict.__setitem__(self, key, val) if key == \"key\": bibfile_logger.info( \"Setting 'key'", "\"%(names)s-%(year)s\") :TODO: Strip LaTeX accent characters from names when making label \"\"\" from", "+ val + '\"' elif addbraces: val = \"{\" + val + \"}\"", "of the citetekey is determined by a `label_style` (see below) :Returns: string the", "not pair[1]] if bad_keys and discard: bibfile_logger.warning(\"Database entries not found for the following", "if key not in self._fields and key not in [\"citekey\",\"entry_type\"] and val: self._fields.append(key)", "strict? for field in subtags[1][3] : k,v = dispatch(self, field, buffer) #:note: entry", "regex pattern for searching - `field` : string field to search in self", "all fields. :note: used by bibsearch.py \"\"\" ls = [entry for entry in", "False # should we be strict with bibtex format? ####################### IMPORTS ##################################### #", "ls[:max_names] + [etal] names = name_name_sep.join(ls) if lower_name: names = names.lower() format_dict['names'] =", "simpleparse.dispatchprocessor import dispatch, DispatchProcessor, getString, lines #bibstuff imports # from . import bibgrammar", "get_entry_type(self): return self[\"entry_type\"] entry_type = property(get_entry_type, set_entry_type, None, \"property: 'entry_type'\") def set_citekey(self, val):", "names_dicts] if len(ls) > max_names: ls = ls[:max_names] + [etal] names = name_name_sep.join(ls)", "stringrep = '@%s{%s,\\n' % (self.entry_type.upper() , self.citekey) try: mlen = max( len(key_str) for", "# need to protect '@' addquotes = True if addquotes: val = '\"'", "entry_formatter=None, try_fields=None): \"\"\"return (BibName-object if possible else string) (from \"raw\" names). :change: 2006-08-02", "map\"\"\" (tag,start,stop,subtags) = tuple4 name, str = dispatch(self, subtags[0], buffer) \"\"\" the_type =", "val['citekey'] #might be an entry except TypeError: pass #->must be a string elif", "(BibName-object if possible else string) :note: 2006-08-09 matching change to `make_names`, no longer", "if not try_fields: try_fields = ['author','editor','organization'] return self.make_names(entry_formatter, try_fields=try_fields) def make_names(self, entry_formatter=None, try_fields=None):", "2006-08-10 use 'citekey' instead of 'key' since BibTeX allows a 'key' field :note:", "return result def __delitem__(self,key) : key = key.lower() try: dict.__delitem__(self, key) except KeyError:", "NamesFormatter :note: called by CitationManager in format_citation :note: 2006-08-08 no longer sets a", "field = field.lower() if field == \"key\": bibfile_logger.info( \"Seeking 'key' as an entry", "bibfile_logger.warning(\"Database entries not found for the following keys:\\n\"+\"\\n\".join(bad_keys)) if discard: result = [pair[1]", "regular entry entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = 'KEY'", "%d:\" % lineno + \" \" + getString(subtags[1], buffer)) def search_entries(self, string_or_compiled, field='',", ": self.entries = [] self._macroMap = {} def get_entrylist(self, citekeys, discard=True): \"\"\"Return list,", "entry_type( self, tuple4, buffer ): \"\"\"Return the entry type\"\"\" (tag,start,stop,subtags) = tuple4 return", "m in monthslower_en] MONTH_DICT = dict( zip(monthmacros_en, months_en) ) ##################################################################### class BibEntry(dict): \"\"\"", "field %s in entry\\n%s.\\n.\"%(self,field)) found = None return found def format_names(self, names_formatter): \"\"\"return", "buffer) #:note: entry will force k to lowercase entry[k] = v self.entries.append(entry) bibfile_logger.warning(\"Dummy", "bibgrammar.Parse(src, bfi). :copyright: <NAME> and <NAME>, see AUTHORS :license: MIT (see LICENSE) :requires:", "spdp from simpleparse.dispatchprocessor import dispatch, DispatchProcessor, getString, lines #bibstuff imports # from .", "interface to the fields: field keys are case-insensitive and fields are stored in", "field to search in self (default: search all fields) \"\"\" if isinstance(string_or_compiled, str):", "the citation key (label) Example: The label style is a dict with the", "can possibly have multiple # tokens (e.g., two piece last names) ls =", "bibentry field and return tuple of name, value.\"\"\" (tag,start,stop,subtags) = tuple4 str =", "str = dispatch(self, subtags[0], buffer) \"\"\" the_type = getString(subtags[0], buffer) if the_type.upper() !=", "entry \"\"\"PRODUCTION FUNCTIONS: for parsing, must provide a function for each production name.", "if not pair[1]] if bad_keys and discard: bibfile_logger.warning(\"Database entries not found for the", "possibly, a string) #keep string if stuck with it if isinstance(names,str): result =", "len(key_str) for key_str in self._fields ) # for pretty format except ValueError: #no", "---------------------------------------------------------- # Bibfile # ------- # Data storage for bibtex file # ----------------------------------------------------------", "val): self[\"entry_type\"] = val.lower() #:note: entry_type stored as lowercase def get_entry_type(self): return self[\"entry_type\"]", "entry = BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = 'KEY' # dummy", "getString(subtags[1], buffer)) def search_entries(self, string_or_compiled, field='', ignore_case=True): \"\"\"Return list of matching entries. Search", "(tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) # macro name def name(self, tuple4, buffer", "): \"\"\"Return the entry's citekey\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) # macro", "key and treat this entry as a regular entry entry = BibEntry() entry.entry_type", "and useful citekey (labels) for BibEntry objects. This is not integrated with the", "we can add a dummy key and treat this entry as a regular", "object def format_with(self, entry_formatter): bibfile_logger.debug(\"BibEntry.format_with: arg is:\"+str(entry_formatter)) #ask the EntryFormatter to do it", "# if len(sys.argv) > 1 : # src = open(sys.argv[1]).read() # bfile =", "or '????' format_dict['year'] = year if entry_type == \"article\": jrnl = self['journal'] jrnl", "#->must have a compiled regular expression reo = string_or_compiled \"\"\" Find regex in", "addbraces = False elif key in (\"year\",\"number\",\"volume\",\"chapter\"): try: addbraces = not int(val) except:", "val = val['citekey'] #might be an entry except TypeError: pass #->must be a", "or d... to year sfx = ''; c = 1 # while result+sfx", "\"\"\" if isinstance(string_or_compiled, str): if ignore_case: reo = re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE) else:", "it serves a very different purpose. This is to create consistent citation keys", "true entry) mlen = 0 bibfile_logger.warn(\"Entry apparently has no fields.\") field_list = []", "bibfile_logger.info( \"Seeking 'key' as an entry *field*. (Recall 'citekey' holds the entry id.)\")", "in subtags[2][3] : #bibfile_logger.debug(\"entry: ready to add field: \"+str(dispatch(self, field, buffer))) k,v =", "= self[field] if raw_names: break else: raw_names, field = entry_formatter.pick_raw_names(self,try_fields) return bibname.BibName(raw_names,from_field=field) #names", "entry_formatter): bibfile_logger.debug(\"BibEntry.format_with: arg is:\"+str(entry_formatter)) #ask the EntryFormatter to do it return entry_formatter.format_entry(self) #", "`label_style` (see below) :Returns: string the citation key (label) Example: The label style", "children\"\"\" (tag,start,stop,subtags) = tuple4 the_type = getString(subtags[0], buffer) lineno = spdp.lines(0, start, buffer)", "a macro map) and provide access to these parts \"\"\" def __init__(self) :", "found def format_names(self, names_formatter): \"\"\"return formatted BibName-object if possible else raw name :type", "key not in [\"citekey\",\"entry_type\"] and val: self._fields.append(key) def __getitem__(self, field): #field is usually", "each entry. If field is omitted, search is through all fields. :note: used", "will not raise #KeyError! see TODO above) BUT do not test 'field in", "str \"\"\" self._macroMap[name] = str def preamble( self, tuple4, buffer ): \"\"\"Process the", "#ai: shd move all bibname into here? possibly if entry_formatter is None: for", "def comment_entry(self, tuple4, buffer): \"\"\"Process the given production and it's children\"\"\" (tag,start,stop,subtags) =", "str): crossref = self.get_entry_by_citekey(crossref) if crossref: entry['crossref'] = crossref return result def get_entry_by_citekey(self,", "entry list result = [pair[1] for pair in temp] #attach cross references for", "= string_or_compiled if not field: #->try all fields (but not citekey) for f", "# ------- # Data storage for bibtex file # ---------------------------------------------------------- class BibFile( DispatchProcessor", "a 'type' field \"\"\" def __init__(self,*args,**kwargs): dict.__init__(self,*args,**kwargs) self._fields = [] def __repr__(self): \"\"\"return", "key in self._fields: addbraces = True addquotes = False #spacer = ' '*(mlen", "try: label_template = style[entry_type] except KeyError: label_template = style['default_type'] name_template = style['name_template'] max_names", "on line %d:\" % lineno + \"\\n\" + buffer[start:stop]) def comment_entry(self, tuple4, buffer):", "(tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) def citekey( self, tuple4, buffer ): \"\"\"Return", "= style['max_names'] name_name_sep = style['name_name_sep'] lower_name = style['lower_name'] etal = style['etal'] # first,", "tuple4 return buffer[start:stop] def entry_type( self, tuple4, buffer ): \"\"\"Return the entry type\"\"\"", "list of 'v_|l' last names, which can possibly have multiple # tokens (e.g.,", "if lower_name: names = names.lower() format_dict['names'] = names year = self['year'] or '????'", "lowercase entry[k] = v self.entries.append(entry) bibfile_logger.warning(\"Dummy key added to entry at line %d\"", "book = \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\", ) #style2 shd be", "have a compiled regular expression reo = string_or_compiled \"\"\" Find regex in bib_entry.", "= self.get('crossref', '') if isinstance(crossref, self.__class__): result = crossref[field] else: result = ''", "regular expression reo = string_or_compiled \"\"\" Find regex in bib_entry. If field is", "(\"year\",\"number\",\"volume\",\"chapter\"): try: addbraces = not int(val) except: pass if '@' in val: #", "tuple4 name, str = dispatch(self, subtags[0], buffer) \"\"\" the_type = getString(subtags[0], buffer) if", "KeyError: pass try: self._fields.remove(key) except ValueError: pass def set_entry_type(self, val): self[\"entry_type\"] = val.lower()", "parts of a bibtex database. BibFile inherits from ``simpleparse.dispatchprocessor``. To fill a BibFile", "#->must be a string elif key == 'journal': if val.isalpha() and val.islower(): #:TODO:", "= dispatch(self, subtags[1], buffer) for field in subtags[2][3] : #bibfile_logger.debug(\"entry: ready to add", "support for bibtexparser (BSD) https://github.com/sciunto-org/python-bibtexparser \"\"\" __docformat__ = \"restructuredtext en\" __authors__ = [\"<NAME>\",", "if entry: crossref = entry.get('crossref', None) if isinstance(crossref, str): crossref = self.get_entry_by_citekey(crossref) if", "\"\"\"Return list of matching entries. Search for regular expression in the fields of", "#:note: 20080331 add handling of month macros if field == 'month' and result", "LICENSE) :requires: Python 2.4+ :TODO: make this framework more general, perhaps along the", "c = 1 # while result+sfx in used_citekeys: while label_template%format_dict in used_citekeys: sfx", "= 'anon', # article = \"%(names)s-%(year)s-%(jrnl)s\", # book = \"%(names)s-%(year)s\", # misc =", "entry.get('crossref', None) if isinstance(crossref, str): crossref = self.get_entry_by_citekey(crossref) if crossref: entry['crossref'] = crossref", "instance and not set _names :note: self returns None if field missing (->", "buffer) lineno = spdp.lines(0, start, buffer) + 1 if the_type.upper() != 'COMMENT' :", "self._fields = [] def __repr__(self): \"\"\"return string representation of entry \"\"\" stringrep =", "self[f] ) if found: break # no need to check more fields #", "all fields. :note: used by bibsearch.py :Parameters: - `string_or_compiled` : string to compile", "= 'v{_}_|l{}', # see NameFormatter class max_names = 2, name_name_sep = \"+\", etal", "\"}\" field_list.append(\" %-*s = %s\" % (mlen, key, val)) stringrep += \",\\n\".join(field_list) stringrep", "should we be strict with bibtex format? ####################### IMPORTS ##################################### # import from", "dispatch, DispatchProcessor, getString, lines #bibstuff imports # from . import bibgrammar ##################################################################### ###############", "and BibEntry for accessing the parts of a bibtex database. BibFile inherits from", "a NamesFormatter to do it) result = names.format(names_formatter) bibfile_logger.debug(\"BibEntry.format_names result = \"+str(result)) return", "# A default label style for citekeys created by make_citekey() # first max_names", "k,v = dispatch(self, field, buffer) #:note: entry will force k to lowercase entry[k]", "= 'KEY' # dummy key -- or should we be strict? for field", "add a dummy key and treat this entry as a regular entry entry", "try: val = val['citekey'] #might be an entry except TypeError: pass #->must be", "= [m.lower() for m in months_en] monthmacros_en = [m[:3] for m in monthslower_en]", "key. :note: a BibFile object should simply *store* .bib file parts (a list", "to type and guess and that are valid BibTeX citation keys. :Parameters: -", "c or d... to year sfx = ''; c = 1 # while", "= '+', etal = 'etal', anonymous = 'anon', lower_name = False, article =", "= \"%(names)s-%(year)s\", misc = \"%(names)s-%(year)s\", default_type = \"%(names)s-%(year)s\") :TODO: Strip LaTeX accent characters", "lower_name = True, # anonymous = 'anon', # article = \"%(names)s-%(year)s-%(jrnl)s\", # book", "has macro syntax, but entry_type is %s\" % (lineno , the_type)) if not", "# otherwise it is really a macro entry for field in subtags[1][3]: name,", "- `string_or_compiled` : string to compile or compiled regex pattern for searching -", "t in subtags[1][3]: if(t) : str += dispatch(self, t, buffer) # concatenate hashed", "name. \"\"\" def string(self, tuple4, buffer ): \"\"\"Return a string, stripping leading and", "'') if isinstance(crossref, self.__class__): result = crossref[field] else: result = '' #:note: 20080331", "bibname) :TODO: return BibName instance for each available name field?? :Parameters: - `entry_formatter`:", "assume it is a macro elif key == 'month': # always use month", "a compiled regular expression reo = string_or_compiled \"\"\" Find regex in bib_entry. If", "lines #bibstuff imports # from . import bibgrammar ##################################################################### ############### GLOBAL VARIABLES ##################################", "*field*. (Recall 'citekey' holds the entry id.)\") try: result = dict.__getitem__(self, field) #:TODO:", "dummy key and treat this entry as a regular entry entry = BibEntry()", "have a compiled regular expression reo = string_or_compiled if not field: #->try all", "with the following fields:: citekey_label_style1 = dict( name_template = 'v{_}_|l{}', # see NameFormatter", "from bibstuff import bibname #ai: shd move all bibname into here? possibly if", "bibfile_logger.warning(\"Entry at line %d has preamble syntax but entry_type is %s\" % (lineno,the_type))", "lines(0,start,buffer)+1 if the_type.upper() != 'PREAMBLE' : bibfile_logger.warning(\"Entry at line %d has preamble syntax", "search_fields(self, string_or_compiled, field='', ignore_case=True): \"\"\"Find regular expression in entry. Return MatchObject if string_or_compiled", "in self (default: search all fields) \"\"\" if isinstance(string_or_compiled, str): if ignore_case: reo", "if possible if val.lower() in monthslower_en + monthmacros_en: val = val[:3].lower() addbraces =", "= 'v{_}_|l{}', # \"van_der_Meer\" or \"van_DerStadt\" max_names = 2, name_name_sep = '+', etal", "citekey): \"\"\"Return entry or None.\"\"\" for entry in self.entries: if entry.citekey == citekey:", "no KeyError) :note: this method introduces the only dependence on simpleparse (via bibname)", "= [] self._macroMap = {} def get_entrylist(self, citekeys, discard=True): \"\"\"Return list, the BibEntry", "subtags[1][3] : k,v = dispatch(self, field, buffer) #:note: entry will force k to", "= property(get_citekey,set_citekey,None,\"property: 'citekey'\") def get_fields(self): return self._fields def set_fields(self, lst): self._fields = lst", "# name_first = 'l{_}', # name_other = 'l{_}', # max_names = 2, #", "is None: if not try_fields: try_fields = ['author','editor','organization'] return self.make_names(entry_formatter, try_fields=try_fields) def make_names(self,", "This is not integrated with the citation styles in bibstuff.bibstyles; but it serves", "\"\"\" Find regex in bib_entry. If field is omitted, search is through all", "possible else string) (from \"raw\" names). :change: 2006-08-02 altered to return BibName instance", "if found: break # no need to check more fields # :note: CAN", "none empty filed -> name \"\"\" # importing bibname here to avoid recursive", "format_with(self, entry_formatter): bibfile_logger.debug(\"BibEntry.format_with: arg is:\"+str(entry_formatter)) #ask the EntryFormatter to do it return entry_formatter.format_entry(self)", "stored in the order added. :note: 2006-08-10 use 'citekey' instead of 'key' since", "bibentry and its children. \"\"\" (tag,start,stop,subtags) = tuple4 entry = BibEntry() entry.entry_type =", "a compiled regular expression reo = string_or_compiled if not field: #->try all fields", "Stores a single bibliographic entry. Provides a dictionary interface to the fields: field", "that were found (and None for entries not found, unless discarded). \"\"\" if", "crossref = entry.get('crossref', None) if isinstance(crossref, str): crossref = self.get_entry_by_citekey(crossref) if crossref: entry['crossref']", "string_or_compiled, field='', ignore_case=True): \"\"\"Find regular expression in entry. Return MatchObject if string_or_compiled found", "str += dispatch(self, t, buffer) # concatenate hashed together strings return (dispatch(self, subtags[0],", "# src = open(sys.argv[1]).read() # bfile = BibFile() # bibgrammar.Parse(src, bfile) # for", "name def name(self, tuple4, buffer ): \"\"\"Return lookup on name or name if", "BibName object def format_with(self, entry_formatter): bibfile_logger.debug(\"BibEntry.format_with: arg is:\"+str(entry_formatter)) #ask the EntryFormatter to do", "reo = re.compile(string_or_compiled, re.MULTILINE) else: #->must have a compiled regular expression reo =", "introduces the only dependence on simpleparse (via bibname) :TODO: return BibName instance for", "getString(subtags[0], buffer) lineno = lines(0,start,buffer)+1 if the_type.upper() != 'PREAMBLE' : bibfile_logger.warning(\"Entry at line", "instance (or possibly, a string) #keep string if stuck with it if isinstance(names,str):", "objects. This is not integrated with the citation styles in bibstuff.bibstyles; but it", "is used for formatting) #:note: 20080331 changed KeyError to return '' instead of", "shd be rst compatible # citekey_label_style2 = dict( # name_first = 'l{_}', #", "elif key == 'month': # always use month macros if possible if val.lower()", "= dispatch(self, subtags[0], buffer) \"\"\" the_type = getString(subtags[0], buffer) if the_type.upper() != 'STRING'", "%s\"\"\" % (lineno, the_type, getString(subtags[1], buffer))) else : bibfile_logger.info(\"Comment entry on line %d:\"", "self['journal'] jrnl = ''.join(jrnl.split()).lower() # keep macro jrnl = jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl'] = jrnl", "no need to check more fields # :note: CAN test 'field in self'", "bibname here to avoid recursive import from bibstuff import bibname #ai: shd move", "type\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) def citekey( self, tuple4, buffer ):", "field: \"+str(dispatch(self, field, buffer))) k,v = dispatch(self, field, buffer) #:note: entry will force", "information - `try_fields`: list of field names to try sequentially; none empty filed", "bfi). :copyright: <NAME> and <NAME>, see AUTHORS :license: MIT (see LICENSE) :requires: Python", "bibfile_logger.info( \"Setting 'key' as an entry *field*. (Recall 'citekey' holds the entry id.)\")", "bibfile_logger.info(\"Comment entry on line %d:\" % lineno + \" \" + getString(subtags[1], buffer))", "else: #->must have a compiled regular expression reo = string_or_compiled \"\"\" Find regex", "citekey\"\"\" (tag,start,stop,subtags) = tuple4 return getString((tag,start,stop,subtags), buffer) # macro name def name(self, tuple4,", "= reo.search( self[f] ) if found: break # no need to check more", "val = val[:3].lower() addbraces = False elif key in (\"year\",\"number\",\"volume\",\"chapter\"): try: addbraces =", "= style['lower_name'] etal = style['etal'] # first, make names name_formatter = NameFormatter(template =", "is through all fields. :note: used by BibFile's find_re method, which is used", "on simpleparse (via bibname) :TODO: return BibName instance for each available name field??", "the EntryFormatter to do it return entry_formatter.format_entry(self) # A default label style for", "(lineno , the_type)) if not __strict__: # we can add a dummy key", "return bibname.BibName(raw_names,from_field=field) #names are in a BibName object def format_with(self, entry_formatter): bibfile_logger.debug(\"BibEntry.format_with: arg", "None for entries not found, unless discarded). \"\"\" if not citekeys: bibfile_logger.warning(\"get_entrylist: No", "fields. :note: used by BibFile's find_re method, which is used in turn by", "to the fields: field keys are case-insensitive and fields are stored in the", "result = names else: #assume a BibName instance #ask BibName instance to format", "tuple4 the_type = getString(subtags[0], buffer) lineno = spdp.lines(0, start, buffer) + 1 if", "if the_type.upper() != 'COMMENT' : bibfile_logger.warning(\"\"\"Entry at line %d has comment syntax but", "= '@%s{%s,\\n' % (self.entry_type.upper() , self.citekey) try: mlen = max( len(key_str) for key_str", ":note: this method introduces the only dependence on simpleparse (via bibname) :TODO: return", "and guess and that are valid BibTeX citation keys. :Parameters: - used_citekeys :", "%s\" % (lineno,the_type)) else : bibfile_logger.warning(\"Preamble entry on line %d:\" % lineno +", ": str The format of the citetekey is determined by a `label_style` (see", "database. BibFile inherits from ``simpleparse.dispatchprocessor``. To fill a BibFile instance, bfi, call bibgrammar.Parse(src,", "# it looks like a macro, but is not: could be a regular", "file. Access entries by key. :note: a BibFile object should simply *store* .bib", "for field in try_fields: raw_names = self[field] if raw_names: break else: raw_names, field", "if isinstance(crossref, str): crossref = self.get_entry_by_citekey(crossref) if crossref: entry['crossref'] = crossref return result", "production name. \"\"\" def string(self, tuple4, buffer ): \"\"\"Return a string, stripping leading", "\"\"\"Process the bibentry and its children. \"\"\" (tag,start,stop,subtags) = tuple4 entry = BibEntry()", "provided; returning empty cited-entry list.\") return [] temp = [ (key,self.get_entry_by_citekey(key)) for key", "is a dict with the following fields:: citekey_label_style1 = dict( name_template = 'v{_}_|l{}',", "1 : # src = open(sys.argv[1]).read() # bfile = BibFile() # bibgrammar.Parse(src, bfile)", "= tuple4 return buffer[start+1:stop-1] def number(self, tuple4, buffer ): \"\"\"return a number as", "''.join(jrnl.split()).lower() # keep macro jrnl = jrnl.replace(\"journal\",\"j\",1) format_dict['jrnl'] = jrnl # short form,", "import dispatchprocessor as spdp from simpleparse.dispatchprocessor import dispatch, DispatchProcessor, getString, lines #bibstuff imports", "is usually a BibTeX field but can be a citekey field = field.lower()", "None: for field in try_fields: raw_names = self[field] if raw_names: break else: raw_names,", "citekeys, discard=True): \"\"\"Return list, the BibEntry instances that were found (and None for", "searching `field` : string field to search in self (default: search all fields)", "for bibtexparser (BSD) https://github.com/sciunto-org/python-bibtexparser \"\"\" __docformat__ = \"restructuredtext en\" __authors__ = [\"<NAME>\", \"<NAME>\"]", "test # ------------------------- # usage: bibfile.py DATABASE_FILE # if __name__ == \"__main__\": #", "val = '\"' + val + '\"' elif addbraces: val = \"{\" +", "a BibTeX field but can be a citekey field = field.lower() if field", "\"key\": bibfile_logger.info( \"Seeking 'key' as an entry *field*. (Recall 'citekey' holds the entry", "KeyError to return '' instead of None except KeyError: crossref = self.get('crossref', '')", "from .bibstyles.shared import NameFormatter from string import ascii_lowercase format_dict = {} entry_type =", "raw name :type `names_formatter`: NamesFormatter :note: called by CitationManager in format_citation :note: 2006-08-08", "each production name. \"\"\" def string(self, tuple4, buffer ): \"\"\"Return a string, stripping", "macro name def name(self, tuple4, buffer ): \"\"\"Return lookup on name or name", "name_name_sep = \"+\", etal = 'etal', anonymous = 'anon', lower_name = False, article", "buffer) entry.citekey = dispatch(self, subtags[1], buffer) for field in subtags[2][3] : #bibfile_logger.debug(\"entry: ready", "############### GLOBAL VARIABLES ################################## months_en = ('January','February','March','April','May','June', 'July','August','September','October','November','December') monthslower_en = [m.lower() for m", "%s: Details: %s\"\"\" % (lineno, the_type, getString(subtags[1], buffer))) else : bibfile_logger.info(\"Comment entry on", "lowercase entry[k] = v self.entries.append(entry) def macro( self, tuple4, buffer ): \"\"\"Process a", "field, buffer))) k,v = dispatch(self, field, buffer) #:note: entry will force k to", "[entry for entry in self.entries if entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)] return ls # self", "to `make_names`, no longer sets `self._names` \"\"\" if entry_formatter is None: if not", "# if __name__ == \"__main__\": # import sys # if len(sys.argv) > 1", "= False # should we be strict with bibtex format? ####################### IMPORTS #####################################", "(tag,start,stop,subtags) = tuple4 return buffer[start+1:stop-1] def number(self, tuple4, buffer ): \"\"\"return a number", "default label style for citekeys created by make_citekey() # first max_names names included,", "result = [pair[1] for pair in temp] #attach cross references for entry in", "= BibEntry() entry.entry_type = dispatch(self, subtags[0], buffer) entry.citekey = 'KEY' # dummy key", "stringrep def __setitem__(self, key, val): key = key.lower() dict.__setitem__(self, key, val) if key", "%d:\" % lineno + \"\\n\" + buffer[start:stop]) def comment_entry(self, tuple4, buffer): \"\"\"Process the", "set_citekey(self, val): self[\"citekey\"] = val def get_citekey(self): return self[\"citekey\"] citekey = property(get_citekey,set_citekey,None,\"property: 'citekey'\")" ]
[ "frappe.db.get_value(\"Labour Information\",{\"emp_id\": emp_id}): test_record = { \"doctype\": \"Labour Information\", \"title\": \"_Test Labour Information\",", "test_record = { \"doctype\": \"Labour Information\", \"title\": \"_Test Labour Information\", \"category\": \"STAFF\", \"name1\":\"abc\",", "import unicode_literals import frappe import unittest test_records = frappe.get_test_records('Labour Information') class TestLabourInformation(unittest.TestCase): #from", "frappe import unittest test_records = frappe.get_test_records('Labour Information') class TestLabourInformation(unittest.TestCase): #from erpnext.stock.get_item_details import get_item_details", "import get_item_details #from frappe import MandatoryError def make_employee(self,emp_id): if not frappe.db.get_value(\"Labour Information\",{\"emp_id\": emp_id}):", "# Copyright (c) 2015, Frappe and Contributors # See license.txt from __future__ import", "make_employee(self,emp_id): if not frappe.db.get_value(\"Labour Information\",{\"emp_id\": emp_id}): test_record = { \"doctype\": \"Labour Information\", \"title\":", "Information\", \"title\": \"_Test Labour Information\", \"category\": \"STAFF\", \"name1\":\"abc\", \"sirdar\":\"aaa\", \"status\":\"Permanent\", \"gender1\":\"Male\", \"doj\":\"2016-08-19\", \"book_code\":\"STAFF\",", "erpnext.stock.get_item_details import get_item_details #from frappe import MandatoryError def make_employee(self,emp_id): if not frappe.db.get_value(\"Labour Information\",{\"emp_id\":", "frappe.get_test_records('Labour Information') class TestLabourInformation(unittest.TestCase): #from erpnext.stock.get_item_details import get_item_details #from frappe import MandatoryError def", "class TestLabourInformation(unittest.TestCase): #from erpnext.stock.get_item_details import get_item_details #from frappe import MandatoryError def make_employee(self,emp_id): if", "coding: utf-8 -*- # Copyright (c) 2015, Frappe and Contributors # See license.txt", "Contributors # See license.txt from __future__ import unicode_literals import frappe import unittest test_records", "employee_name(self): employee = self.make_employee(\"001\") self.assertEqual(employee.name1, \"abc\") res = frappe.get_list(\"Labour Information\", filters=[[\"Labour Information\", \"name1\",", "__future__ import unicode_literals import frappe import unittest test_records = frappe.get_test_records('Labour Information') class TestLabourInformation(unittest.TestCase):", "Copyright (c) 2015, Frappe and Contributors # See license.txt from __future__ import unicode_literals", "get_item_details #from frappe import MandatoryError def make_employee(self,emp_id): if not frappe.db.get_value(\"Labour Information\",{\"emp_id\": emp_id}): test_record", "#from erpnext.stock.get_item_details import get_item_details #from frappe import MandatoryError def make_employee(self,emp_id): if not frappe.db.get_value(\"Labour", "\"_Test Labour Information\", \"category\": \"STAFF\", \"name1\":\"abc\", \"sirdar\":\"aaa\", \"status\":\"Permanent\", \"gender1\":\"Male\", \"doj\":\"2016-08-19\", \"book_code\":\"STAFF\", \"emp_id\":\"001\", \"garden\":", "and Contributors # See license.txt from __future__ import unicode_literals import frappe import unittest", "from __future__ import unicode_literals import frappe import unittest test_records = frappe.get_test_records('Labour Information') class", "\"doj\":\"2016-08-19\", \"book_code\":\"STAFF\", \"emp_id\":\"001\", \"garden\": \"Ghatia Tea Estate\" } frappe.get_doc(test_record).insert() def employee_name(self): employee =", "Information') class TestLabourInformation(unittest.TestCase): #from erpnext.stock.get_item_details import get_item_details #from frappe import MandatoryError def make_employee(self,emp_id):", "\"doctype\": \"Labour Information\", \"title\": \"_Test Labour Information\", \"category\": \"STAFF\", \"name1\":\"abc\", \"sirdar\":\"aaa\", \"status\":\"Permanent\", \"gender1\":\"Male\",", "unittest test_records = frappe.get_test_records('Labour Information') class TestLabourInformation(unittest.TestCase): #from erpnext.stock.get_item_details import get_item_details #from frappe", "unicode_literals import frappe import unittest test_records = frappe.get_test_records('Labour Information') class TestLabourInformation(unittest.TestCase): #from erpnext.stock.get_item_details", "import unittest test_records = frappe.get_test_records('Labour Information') class TestLabourInformation(unittest.TestCase): #from erpnext.stock.get_item_details import get_item_details #from", "\"Labour Information\", \"title\": \"_Test Labour Information\", \"category\": \"STAFF\", \"name1\":\"abc\", \"sirdar\":\"aaa\", \"status\":\"Permanent\", \"gender1\":\"Male\", \"doj\":\"2016-08-19\",", "Information\",{\"emp_id\": emp_id}): test_record = { \"doctype\": \"Labour Information\", \"title\": \"_Test Labour Information\", \"category\":", "\"status\":\"Permanent\", \"gender1\":\"Male\", \"doj\":\"2016-08-19\", \"book_code\":\"STAFF\", \"emp_id\":\"001\", \"garden\": \"Ghatia Tea Estate\" } frappe.get_doc(test_record).insert() def employee_name(self):", "\"Ghatia Tea Estate\" } frappe.get_doc(test_record).insert() def employee_name(self): employee = self.make_employee(\"001\") self.assertEqual(employee.name1, \"abc\") res", "\"emp_id\":\"001\", \"garden\": \"Ghatia Tea Estate\" } frappe.get_doc(test_record).insert() def employee_name(self): employee = self.make_employee(\"001\") self.assertEqual(employee.name1,", "= self.make_employee(\"001\") self.assertEqual(employee.name1, \"abc\") res = frappe.get_list(\"Labour Information\", filters=[[\"Labour Information\", \"name1\", \"like\", \"abc%\"]],", "def make_employee(self,emp_id): if not frappe.db.get_value(\"Labour Information\",{\"emp_id\": emp_id}): test_record = { \"doctype\": \"Labour Information\",", "frappe.get_doc(test_record).insert() def employee_name(self): employee = self.make_employee(\"001\") self.assertEqual(employee.name1, \"abc\") res = frappe.get_list(\"Labour Information\", filters=[[\"Labour", "\"title\": \"_Test Labour Information\", \"category\": \"STAFF\", \"name1\":\"abc\", \"sirdar\":\"aaa\", \"status\":\"Permanent\", \"gender1\":\"Male\", \"doj\":\"2016-08-19\", \"book_code\":\"STAFF\", \"emp_id\":\"001\",", "-*- # Copyright (c) 2015, Frappe and Contributors # See license.txt from __future__", "test_records = frappe.get_test_records('Labour Information') class TestLabourInformation(unittest.TestCase): #from erpnext.stock.get_item_details import get_item_details #from frappe import", "import MandatoryError def make_employee(self,emp_id): if not frappe.db.get_value(\"Labour Information\",{\"emp_id\": emp_id}): test_record = { \"doctype\":", "employee = self.make_employee(\"001\") self.assertEqual(employee.name1, \"abc\") res = frappe.get_list(\"Labour Information\", filters=[[\"Labour Information\", \"name1\", \"like\",", "\"category\": \"STAFF\", \"name1\":\"abc\", \"sirdar\":\"aaa\", \"status\":\"Permanent\", \"gender1\":\"Male\", \"doj\":\"2016-08-19\", \"book_code\":\"STAFF\", \"emp_id\":\"001\", \"garden\": \"Ghatia Tea Estate\"", "Frappe and Contributors # See license.txt from __future__ import unicode_literals import frappe import", "(c) 2015, Frappe and Contributors # See license.txt from __future__ import unicode_literals import", "\"gender1\":\"Male\", \"doj\":\"2016-08-19\", \"book_code\":\"STAFF\", \"emp_id\":\"001\", \"garden\": \"Ghatia Tea Estate\" } frappe.get_doc(test_record).insert() def employee_name(self): employee", "# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe and Contributors #", "self.make_employee(\"001\") self.assertEqual(employee.name1, \"abc\") res = frappe.get_list(\"Labour Information\", filters=[[\"Labour Information\", \"name1\", \"like\", \"abc%\"]], fields=[\"emp_id\",", "\"STAFF\", \"name1\":\"abc\", \"sirdar\":\"aaa\", \"status\":\"Permanent\", \"gender1\":\"Male\", \"doj\":\"2016-08-19\", \"book_code\":\"STAFF\", \"emp_id\":\"001\", \"garden\": \"Ghatia Tea Estate\" }", "if not frappe.db.get_value(\"Labour Information\",{\"emp_id\": emp_id}): test_record = { \"doctype\": \"Labour Information\", \"title\": \"_Test", "\"abc\") res = frappe.get_list(\"Labour Information\", filters=[[\"Labour Information\", \"name1\", \"like\", \"abc%\"]], fields=[\"emp_id\", \"name1\"]) self.assertEquals(len(res),", "frappe import MandatoryError def make_employee(self,emp_id): if not frappe.db.get_value(\"Labour Information\",{\"emp_id\": emp_id}): test_record = {", "Labour Information\", \"category\": \"STAFF\", \"name1\":\"abc\", \"sirdar\":\"aaa\", \"status\":\"Permanent\", \"gender1\":\"Male\", \"doj\":\"2016-08-19\", \"book_code\":\"STAFF\", \"emp_id\":\"001\", \"garden\": \"Ghatia", "\"garden\": \"Ghatia Tea Estate\" } frappe.get_doc(test_record).insert() def employee_name(self): employee = self.make_employee(\"001\") self.assertEqual(employee.name1, \"abc\")", "See license.txt from __future__ import unicode_literals import frappe import unittest test_records = frappe.get_test_records('Labour", "emp_id}): test_record = { \"doctype\": \"Labour Information\", \"title\": \"_Test Labour Information\", \"category\": \"STAFF\",", "# See license.txt from __future__ import unicode_literals import frappe import unittest test_records =", "= { \"doctype\": \"Labour Information\", \"title\": \"_Test Labour Information\", \"category\": \"STAFF\", \"name1\":\"abc\", \"sirdar\":\"aaa\",", "\"book_code\":\"STAFF\", \"emp_id\":\"001\", \"garden\": \"Ghatia Tea Estate\" } frappe.get_doc(test_record).insert() def employee_name(self): employee = self.make_employee(\"001\")", "-*- coding: utf-8 -*- # Copyright (c) 2015, Frappe and Contributors # See", "Tea Estate\" } frappe.get_doc(test_record).insert() def employee_name(self): employee = self.make_employee(\"001\") self.assertEqual(employee.name1, \"abc\") res =", "= frappe.get_test_records('Labour Information') class TestLabourInformation(unittest.TestCase): #from erpnext.stock.get_item_details import get_item_details #from frappe import MandatoryError", "not frappe.db.get_value(\"Labour Information\",{\"emp_id\": emp_id}): test_record = { \"doctype\": \"Labour Information\", \"title\": \"_Test Labour", "Estate\" } frappe.get_doc(test_record).insert() def employee_name(self): employee = self.make_employee(\"001\") self.assertEqual(employee.name1, \"abc\") res = frappe.get_list(\"Labour", "MandatoryError def make_employee(self,emp_id): if not frappe.db.get_value(\"Labour Information\",{\"emp_id\": emp_id}): test_record = { \"doctype\": \"Labour", "res = frappe.get_list(\"Labour Information\", filters=[[\"Labour Information\", \"name1\", \"like\", \"abc%\"]], fields=[\"emp_id\", \"name1\"]) self.assertEquals(len(res), 1)", "utf-8 -*- # Copyright (c) 2015, Frappe and Contributors # See license.txt from", "Information\", \"category\": \"STAFF\", \"name1\":\"abc\", \"sirdar\":\"aaa\", \"status\":\"Permanent\", \"gender1\":\"Male\", \"doj\":\"2016-08-19\", \"book_code\":\"STAFF\", \"emp_id\":\"001\", \"garden\": \"Ghatia Tea", "def employee_name(self): employee = self.make_employee(\"001\") self.assertEqual(employee.name1, \"abc\") res = frappe.get_list(\"Labour Information\", filters=[[\"Labour Information\",", "<reponame>nivedita05/Tea-Admin<gh_stars>0 # -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe and Contributors", "import frappe import unittest test_records = frappe.get_test_records('Labour Information') class TestLabourInformation(unittest.TestCase): #from erpnext.stock.get_item_details import", "\"sirdar\":\"aaa\", \"status\":\"Permanent\", \"gender1\":\"Male\", \"doj\":\"2016-08-19\", \"book_code\":\"STAFF\", \"emp_id\":\"001\", \"garden\": \"Ghatia Tea Estate\" } frappe.get_doc(test_record).insert() def", "license.txt from __future__ import unicode_literals import frappe import unittest test_records = frappe.get_test_records('Labour Information')", "self.assertEqual(employee.name1, \"abc\") res = frappe.get_list(\"Labour Information\", filters=[[\"Labour Information\", \"name1\", \"like\", \"abc%\"]], fields=[\"emp_id\", \"name1\"])", "2015, Frappe and Contributors # See license.txt from __future__ import unicode_literals import frappe", "TestLabourInformation(unittest.TestCase): #from erpnext.stock.get_item_details import get_item_details #from frappe import MandatoryError def make_employee(self,emp_id): if not", "#from frappe import MandatoryError def make_employee(self,emp_id): if not frappe.db.get_value(\"Labour Information\",{\"emp_id\": emp_id}): test_record =", "} frappe.get_doc(test_record).insert() def employee_name(self): employee = self.make_employee(\"001\") self.assertEqual(employee.name1, \"abc\") res = frappe.get_list(\"Labour Information\",", "\"name1\":\"abc\", \"sirdar\":\"aaa\", \"status\":\"Permanent\", \"gender1\":\"Male\", \"doj\":\"2016-08-19\", \"book_code\":\"STAFF\", \"emp_id\":\"001\", \"garden\": \"Ghatia Tea Estate\" } frappe.get_doc(test_record).insert()", "{ \"doctype\": \"Labour Information\", \"title\": \"_Test Labour Information\", \"category\": \"STAFF\", \"name1\":\"abc\", \"sirdar\":\"aaa\", \"status\":\"Permanent\"," ]
[ "author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, description=DESCRIPTION, long_description=open('README.md').read(), license=LICENSE, packages=find_packages(), keywords='Solr GOlr golr-schema', classifiers=[ 'License", "= 'https://github.com/deepakunni3/golr-schema-generator' AUTHOR = '<NAME>' EMAIL = '<EMAIL>' REQUIRES_PYTHON = '>=3.7.0' VERSION =", "'golr-schema-generator' DESCRIPTION = 'GOlr Schema Generator' URL = 'https://github.com/deepakunni3/golr-schema-generator' AUTHOR = '<NAME>' EMAIL", "setup, find_packages NAME = 'golr-schema-generator' DESCRIPTION = 'GOlr Schema Generator' URL = 'https://github.com/deepakunni3/golr-schema-generator'", "packages=find_packages(), keywords='Solr GOlr golr-schema', classifiers=[ 'License :: OSI Approved :: BSD License', 'Programming", "URL = 'https://github.com/deepakunni3/golr-schema-generator' AUTHOR = '<NAME>' EMAIL = '<EMAIL>' REQUIRES_PYTHON = '>=3.7.0' VERSION", "'<NAME>' EMAIL = '<EMAIL>' REQUIRES_PYTHON = '>=3.7.0' VERSION = '0.0.1' LICENSE = 'BSD3'", "url=URL, description=DESCRIPTION, long_description=open('README.md').read(), license=LICENSE, packages=find_packages(), keywords='Solr GOlr golr-schema', classifiers=[ 'License :: OSI Approved", "{ 'test': ['pytest'] } setup( name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, description=DESCRIPTION, long_description=open('README.md').read(),", "= '<EMAIL>' REQUIRES_PYTHON = '>=3.7.0' VERSION = '0.0.1' LICENSE = 'BSD3' REQUIRED =", "'<EMAIL>' REQUIRES_PYTHON = '>=3.7.0' VERSION = '0.0.1' LICENSE = 'BSD3' REQUIRED = [", "'test': ['pytest'] } setup( name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, description=DESCRIPTION, long_description=open('README.md').read(), license=LICENSE,", "Schema Generator' URL = 'https://github.com/deepakunni3/golr-schema-generator' AUTHOR = '<NAME>' EMAIL = '<EMAIL>' REQUIRES_PYTHON =", "description=DESCRIPTION, long_description=open('README.md').read(), license=LICENSE, packages=find_packages(), keywords='Solr GOlr golr-schema', classifiers=[ 'License :: OSI Approved ::", "python_requires=REQUIRES_PYTHON, url=URL, description=DESCRIPTION, long_description=open('README.md').read(), license=LICENSE, packages=find_packages(), keywords='Solr GOlr golr-schema', classifiers=[ 'License :: OSI", "license=LICENSE, packages=find_packages(), keywords='Solr GOlr golr-schema', classifiers=[ 'License :: OSI Approved :: BSD License',", "] EXTRAS = { 'test': ['pytest'] } setup( name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON,", "BSD License', 'Programming Language :: Python :: 3' ], install_requires=REQUIRED, extras_require=EXTRAS, include_package_data=True )", "EXTRAS = { 'test': ['pytest'] } setup( name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL,", "'0.0.1' LICENSE = 'BSD3' REQUIRED = [ 'PyYAML>=5.3' ] EXTRAS = { 'test':", "REQUIRES_PYTHON = '>=3.7.0' VERSION = '0.0.1' LICENSE = 'BSD3' REQUIRED = [ 'PyYAML>=5.3'", "author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, description=DESCRIPTION, long_description=open('README.md').read(), license=LICENSE, packages=find_packages(), keywords='Solr GOlr golr-schema', classifiers=[ 'License ::", "from setuptools import setup, find_packages NAME = 'golr-schema-generator' DESCRIPTION = 'GOlr Schema Generator'", "} setup( name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, description=DESCRIPTION, long_description=open('README.md').read(), license=LICENSE, packages=find_packages(), keywords='Solr", ":: OSI Approved :: BSD License', 'Programming Language :: Python :: 3' ],", "REQUIRED = [ 'PyYAML>=5.3' ] EXTRAS = { 'test': ['pytest'] } setup( name=NAME,", "Approved :: BSD License', 'Programming Language :: Python :: 3' ], install_requires=REQUIRED, extras_require=EXTRAS,", "golr-schema', classifiers=[ 'License :: OSI Approved :: BSD License', 'Programming Language :: Python", "DESCRIPTION = 'GOlr Schema Generator' URL = 'https://github.com/deepakunni3/golr-schema-generator' AUTHOR = '<NAME>' EMAIL =", "setuptools import setup, find_packages NAME = 'golr-schema-generator' DESCRIPTION = 'GOlr Schema Generator' URL", "'GOlr Schema Generator' URL = 'https://github.com/deepakunni3/golr-schema-generator' AUTHOR = '<NAME>' EMAIL = '<EMAIL>' REQUIRES_PYTHON", "keywords='Solr GOlr golr-schema', classifiers=[ 'License :: OSI Approved :: BSD License', 'Programming Language", "NAME = 'golr-schema-generator' DESCRIPTION = 'GOlr Schema Generator' URL = 'https://github.com/deepakunni3/golr-schema-generator' AUTHOR =", "import setup, find_packages NAME = 'golr-schema-generator' DESCRIPTION = 'GOlr Schema Generator' URL =", "= 'GOlr Schema Generator' URL = 'https://github.com/deepakunni3/golr-schema-generator' AUTHOR = '<NAME>' EMAIL = '<EMAIL>'", "= '>=3.7.0' VERSION = '0.0.1' LICENSE = 'BSD3' REQUIRED = [ 'PyYAML>=5.3' ]", "Generator' URL = 'https://github.com/deepakunni3/golr-schema-generator' AUTHOR = '<NAME>' EMAIL = '<EMAIL>' REQUIRES_PYTHON = '>=3.7.0'", "classifiers=[ 'License :: OSI Approved :: BSD License', 'Programming Language :: Python ::", "= [ 'PyYAML>=5.3' ] EXTRAS = { 'test': ['pytest'] } setup( name=NAME, version=VERSION,", "GOlr golr-schema', classifiers=[ 'License :: OSI Approved :: BSD License', 'Programming Language ::", "AUTHOR = '<NAME>' EMAIL = '<EMAIL>' REQUIRES_PYTHON = '>=3.7.0' VERSION = '0.0.1' LICENSE", "setup( name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, description=DESCRIPTION, long_description=open('README.md').read(), license=LICENSE, packages=find_packages(), keywords='Solr GOlr", "= 'golr-schema-generator' DESCRIPTION = 'GOlr Schema Generator' URL = 'https://github.com/deepakunni3/golr-schema-generator' AUTHOR = '<NAME>'", "= 'BSD3' REQUIRED = [ 'PyYAML>=5.3' ] EXTRAS = { 'test': ['pytest'] }", "'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 3'", "name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, description=DESCRIPTION, long_description=open('README.md').read(), license=LICENSE, packages=find_packages(), keywords='Solr GOlr golr-schema',", "'BSD3' REQUIRED = [ 'PyYAML>=5.3' ] EXTRAS = { 'test': ['pytest'] } setup(", "'PyYAML>=5.3' ] EXTRAS = { 'test': ['pytest'] } setup( name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL,", "= '0.0.1' LICENSE = 'BSD3' REQUIRED = [ 'PyYAML>=5.3' ] EXTRAS = {", "'https://github.com/deepakunni3/golr-schema-generator' AUTHOR = '<NAME>' EMAIL = '<EMAIL>' REQUIRES_PYTHON = '>=3.7.0' VERSION = '0.0.1'", "version=VERSION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, description=DESCRIPTION, long_description=open('README.md').read(), license=LICENSE, packages=find_packages(), keywords='Solr GOlr golr-schema', classifiers=[", "long_description=open('README.md').read(), license=LICENSE, packages=find_packages(), keywords='Solr GOlr golr-schema', classifiers=[ 'License :: OSI Approved :: BSD", "find_packages NAME = 'golr-schema-generator' DESCRIPTION = 'GOlr Schema Generator' URL = 'https://github.com/deepakunni3/golr-schema-generator' AUTHOR", "[ 'PyYAML>=5.3' ] EXTRAS = { 'test': ['pytest'] } setup( name=NAME, version=VERSION, author=AUTHOR,", "LICENSE = 'BSD3' REQUIRED = [ 'PyYAML>=5.3' ] EXTRAS = { 'test': ['pytest']", "OSI Approved :: BSD License', 'Programming Language :: Python :: 3' ], install_requires=REQUIRED,", ":: BSD License', 'Programming Language :: Python :: 3' ], install_requires=REQUIRED, extras_require=EXTRAS, include_package_data=True", "EMAIL = '<EMAIL>' REQUIRES_PYTHON = '>=3.7.0' VERSION = '0.0.1' LICENSE = 'BSD3' REQUIRED", "= { 'test': ['pytest'] } setup( name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, description=DESCRIPTION,", "= '<NAME>' EMAIL = '<EMAIL>' REQUIRES_PYTHON = '>=3.7.0' VERSION = '0.0.1' LICENSE =", "['pytest'] } setup( name=NAME, version=VERSION, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, description=DESCRIPTION, long_description=open('README.md').read(), license=LICENSE, packages=find_packages(),", "'>=3.7.0' VERSION = '0.0.1' LICENSE = 'BSD3' REQUIRED = [ 'PyYAML>=5.3' ] EXTRAS", "VERSION = '0.0.1' LICENSE = 'BSD3' REQUIRED = [ 'PyYAML>=5.3' ] EXTRAS =" ]
[ "command class AliasedGroup(click.Group): def get_command(self, ctx, cmd_name): rv = click.Group.get_command(self, ctx, cmd_name) if", "if not (json_obj and json_obj != NULL and token): return json_obj index =", "of array)\") elif len(g) > 2: message = \", \".join([str(r) for r in", "key or key with one index (in case of array)\") def _get_index(token): if", "raise click.ClickException(f\"{expression} is a bad expression\") for g in all_tokens: if not g:", "index = _get_index(token) if isinstance(json_obj, list): result = [] for obj in json_obj:", "not token or len(token) <= 1: return None t = token[1] if t.name", "of array)\") elif len(g) == 2: if not ( g[0].name == TokenName.KEY and", "a bad value where a numeric index of >= 0 is expected\") return", "if not (json_obj and json_obj != NULL and tokens): return json_obj if(len(tokens) ==", "supporting unix style multiple dots (such as .. etc)\") if len(g) == 1:", "return retrieve_token_from_json(json_obj, token) first_token = tokens[0] remaining_tokens = tokens[1:] if isinstance(json_obj, list): result", "with valid json content\", required=True) def cli(expression, file): all_tokens = [g for g", "= get_json(file) result = jq_parser(json_obj, all_tokens) result = json.dumps(result, indent=4) click.echo(result) def jq_parser(json_obj,", "the only case for a valid json if isinstance(obj, dict): #case insensitive obj", "a valid json if isinstance(obj, dict): #case insensitive obj = {k.strip().lower() : v", "k,v in json_obj.items()} val = json_obj.get(token[0].value.strip().lower(), NULL) if isinstance(val, list): if index is", "val if index >= len(val): raise click.ClickException(f\"Bad index {index}. There are only {len(val)}", "+ \", \" + str(g[1]) raise click.ClickException(f\"{message} is a bad token. Currently supports", "[x for x in self.list_commands(ctx) if x.startswith(cmd_name)] if not matches: return None elif", "token): if not (json_obj and json_obj != NULL and token): return json_obj index", "NULL = \"null\" #from click documentation to support alias command class AliasedGroup(click.Group): def", "(in case of array)\") def _get_index(token): if not token or len(token) <= 1:", "if not matches: return None elif len(matches) == 1: return click.Group.get_command(self, ctx, matches[0])", "valid json content\", required=True) def cli(expression, file): all_tokens = [g for g in", "insensitive json_obj = {k.strip().lower() : v for k,v in json_obj.items()} val = json_obj.get(token[0].value.strip().lower(),", "style multiple dots (such as .. etc)\") if len(g) == 1: if not", "click.ClickException(f\"{message} is a bad token. Currently supports either plain key or key with", "v for k,v in obj.items()} result.append(obj.get(token[0].value.strip().lower(), NULL)) if index is None: return result", "json_obj = {k.strip().lower() : v for k,v in json_obj.items()} val = json_obj.get(token[0].value.strip().lower(), NULL)", "json_obj != NULL and token): return json_obj index = _get_index(token) if isinstance(json_obj, list):", "1: return None t = token[1] if t.name == TokenName.INDEX: if t.value.strip().isdecimal(): return", "json if isinstance(obj, dict): #case insensitive obj = {k.strip().lower() : v for k,v", "val = json_obj.get(token[0].value.strip().lower(), NULL) if isinstance(val, list): if index is None: return val", "!= NULL: result.append(jq_parser(r, remaining_tokens)) else: result.append(NULL) index = _get_index(first_token) if index is None:", "tokens): if not (json_obj and json_obj != NULL and tokens): return json_obj if(len(tokens)", "= retrieve_token_from_json(obj, first_token) if r and r != NULL: result.append(jq_parser(r, remaining_tokens)) else: result.append(NULL)", "2: if not ( g[0].name == TokenName.KEY and g[1].name == TokenName.INDEX): message =", "len(token) <= 1: return None t = token[1] if t.name == TokenName.INDEX: if", "index >= len(result): raise click.ClickException(f\"Bad index {index}. There are only {len(result)} elements in", "plain key or key with one index (in case of array)\") elif len(g)", "ctx, cmd_name) if rv is not None: return rv matches = [x for", "required=True) def cli(expression, file): all_tokens = [g for g in get_grouped_tokens(expression)] validate_tokens(all_tokens, expression)", "retrieve_token_from_json(json_obj, token) first_token = tokens[0] remaining_tokens = tokens[1:] if isinstance(json_obj, list): result =", "obj = {k.strip().lower() : v for k,v in obj.items()} result.append(obj.get(token[0].value.strip().lower(), NULL)) if index", "array)\") elif len(g) > 2: message = \", \".join([str(r) for r in g])", "type=click.File(\"r\"), help=\"File with valid json content\", required=True) def cli(expression, file): all_tokens = [g", "NULL) if isinstance(val, list): if index is None: return val if index >=", "raise click.ClickException(f\"Bad index {index}. There are only {len(result)} elements in the array\") return", "retrieve_token_from_json(json_obj, first_token) return jq_parser(r, remaining_tokens) def retrieve_token_from_json(json_obj, token): if not (json_obj and json_obj", "val def get_json(fp): try: return json.load(fp) except Exception as ex: raise click.ClickException(str(ex)) def", "unix style multiple dots (such as .. etc)\") if len(g) == 1: if", "len(g) > 2: message = \", \".join([str(r) for r in g]) raise click.ClickException(f\"{message}", "dict): #case insensitive json_obj = {k.strip().lower() : v for k,v in json_obj.items()} val", "= str(g[0]) + \", \" + str(g[1]) raise click.ClickException(f\"{message} is a bad token.", "<= 1: return None t = token[1] if t.name == TokenName.INDEX: if t.value.strip().isdecimal():", "t.name == TokenName.INDEX: if t.value.strip().isdecimal(): return int(t.value.strip()) else: raise click.ClickException(f\"{t.value} is a bad", "matches: return None elif len(matches) == 1: return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many", "def get_command(self, ctx, cmd_name): rv = click.Group.get_command(self, ctx, cmd_name) if rv is not", "result = [] for obj in json_obj: #this is probably the only case", "== 1: return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))", "if t.value.strip().isdecimal(): return int(t.value.strip()) else: raise click.ClickException(f\"{t.value} is a bad value where a", "supports either plain key or key with one index (in case of array)\")", "return jq_parser(r, remaining_tokens) def retrieve_token_from_json(json_obj, token): if not (json_obj and json_obj != NULL", "one index (in case of array)\") elif len(g) > 2: message = \",", "in the array\") return val[index] return val def get_json(fp): try: return json.load(fp) except", "result = [] for obj in json_obj: r = retrieve_token_from_json(obj, first_token) if r", "only case for a valid json if isinstance(obj, dict): #case insensitive obj =", "result[index] elif isinstance(json_obj, dict): #case insensitive json_obj = {k.strip().lower() : v for k,v", "get_json(file) result = jq_parser(json_obj, all_tokens) result = json.dumps(result, indent=4) click.echo(result) def jq_parser(json_obj, tokens):", "try: return json.load(fp) except Exception as ex: raise click.ClickException(str(ex)) def validate_tokens(all_tokens, expression): if", "if rv is not None: return rv matches = [x for x in", "index {index}. There are only {len(val)} elements in the array\") return val[index] return", "json_obj != NULL and tokens): return json_obj if(len(tokens) == 1): token = tokens[0]", "case of array)\") def _get_index(token): if not token or len(token) <= 1: return", "in json_obj: #this is probably the only case for a valid json if", "click documentation to support alias command class AliasedGroup(click.Group): def get_command(self, ctx, cmd_name): rv", "remaining_tokens) def retrieve_token_from_json(json_obj, token): if not (json_obj and json_obj != NULL and token):", "with one index (in case of array)\") elif len(g) > 2: message =", "[] for obj in json_obj: #this is probably the only case for a", "return None t = token[1] if t.name == TokenName.INDEX: if t.value.strip().isdecimal(): return int(t.value.strip())", "for g in all_tokens: if not g: raise click.ClickException(f\"{expression} is a bad expression.", "jq_parser(json_obj, tokens): if not (json_obj and json_obj != NULL and tokens): return json_obj", "array\") return result[index] elif isinstance(json_obj, dict): r = retrieve_token_from_json(json_obj, first_token) return jq_parser(r, remaining_tokens)", "message = \", \".join([str(r) for r in g]) raise click.ClickException(f\"{message} is a bad", "cli(expression, file): all_tokens = [g for g in get_grouped_tokens(expression)] validate_tokens(all_tokens, expression) json_obj =", "in obj.items()} result.append(obj.get(token[0].value.strip().lower(), NULL)) if index is None: return result if index >=", "1: if not ( g[0].name == TokenName.KEY ): message = str(g[0]) raise click.ClickException(f\"{message}", "for obj in json_obj: r = retrieve_token_from_json(obj, first_token) if r and r !=", "= [] for obj in json_obj: #this is probably the only case for", "for g in get_grouped_tokens(expression)] validate_tokens(all_tokens, expression) json_obj = get_json(file) result = jq_parser(json_obj, all_tokens)", "if index >= len(result): raise click.ClickException(f\"Bad index {index}. There are only {len(result)} elements", "not g: raise click.ClickException(f\"{expression} is a bad expression. Currently not supporting unix style", "either plain key or key with one index (in case of array)\") def", "if isinstance(obj, dict): #case insensitive obj = {k.strip().lower() : v for k,v in", "click.ClickException(f\"{expression} is a bad expression\") for g in all_tokens: if not g: raise", "dots (such as .. etc)\") if len(g) == 1: if not ( g[0].name", "key with one index (in case of array)\") elif len(g) == 2: if", "is not None: return rv matches = [x for x in self.list_commands(ctx) if", "array)\") def _get_index(token): if not token or len(token) <= 1: return None t", "g: raise click.ClickException(f\"{expression} is a bad expression. Currently not supporting unix style multiple", "click.ClickException(f\"Bad index {index}. There are only {len(result)} elements in the array\") return result[index]", "str(g[1]) raise click.ClickException(f\"{message} is a bad token. Currently supports either plain key or", "= tokens[0] remaining_tokens = tokens[1:] if isinstance(json_obj, list): result = [] for obj", "support alias command class AliasedGroup(click.Group): def get_command(self, ctx, cmd_name): rv = click.Group.get_command(self, ctx,", "return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) @click.command() @click.option(\"--expression\",", "or len(token) <= 1: return None t = token[1] if t.name == TokenName.INDEX:", "only {len(result)} elements in the array\") return result[index] elif isinstance(json_obj, dict): #case insensitive", "index (in case of array)\") elif len(g) > 2: message = \", \".join([str(r)", "else: raise click.ClickException(f\"{t.value} is a bad value where a numeric index of >=", "import json import click from tokenizer import get_grouped_tokens, TokenName NULL = \"null\" #from", "valid json if isinstance(obj, dict): #case insensitive obj = {k.strip().lower() : v for", "raise click.ClickException(f\"Bad index {index}. There are only {len(val)} elements in the array\") return", "for k,v in json_obj.items()} val = json_obj.get(token[0].value.strip().lower(), NULL) if isinstance(val, list): if index", "get_grouped_tokens, TokenName NULL = \"null\" #from click documentation to support alias command class", "documentation to support alias command class AliasedGroup(click.Group): def get_command(self, ctx, cmd_name): rv =", "x.startswith(cmd_name)] if not matches: return None elif len(matches) == 1: return click.Group.get_command(self, ctx,", "(in case of array)\") elif len(g) > 2: message = \", \".join([str(r) for", "expression): if not all_tokens or len(all_tokens) == 0: raise click.ClickException(f\"{expression} is a bad", "first_token = tokens[0] remaining_tokens = tokens[1:] if isinstance(json_obj, list): result = [] for", "rv = click.Group.get_command(self, ctx, cmd_name) if rv is not None: return rv matches", "{index}. There are only {len(result)} elements in the array\") return result[index] elif isinstance(json_obj,", "with one index (in case of array)\") def _get_index(token): if not token or", "= jq_parser(json_obj, all_tokens) result = json.dumps(result, indent=4) click.echo(result) def jq_parser(json_obj, tokens): if not", "not ( g[0].name == TokenName.KEY and g[1].name == TokenName.INDEX): message = str(g[0]) +", "click from tokenizer import get_grouped_tokens, TokenName NULL = \"null\" #from click documentation to", "and tokens): return json_obj if(len(tokens) == 1): token = tokens[0] return retrieve_token_from_json(json_obj, token)", "search in the json\", required=True) @click.option(\"--file\", \"-f\", type=click.File(\"r\"), help=\"File with valid json content\",", "json_obj.get(token[0].value.strip().lower(), NULL) if isinstance(val, list): if index is None: return val if index", "rv matches = [x for x in self.list_commands(ctx) if x.startswith(cmd_name)] if not matches:", "expression. Currently not supporting unix style multiple dots (such as .. etc)\") if", "r in g]) raise click.ClickException(f\"{message} is a bad token. Currently supports either plain", "ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) @click.command() @click.option(\"--expression\", \"-e\", type=click.STRING, help=\"jq style", "not matches: return None elif len(matches) == 1: return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too", "#case insensitive obj = {k.strip().lower() : v for k,v in obj.items()} result.append(obj.get(token[0].value.strip().lower(), NULL))", "len(g) == 1: if not ( g[0].name == TokenName.KEY ): message = str(g[0])", "list): if index is None: return val if index >= len(val): raise click.ClickException(f\"Bad", "click.echo(result) def jq_parser(json_obj, tokens): if not (json_obj and json_obj != NULL and tokens):", "expression\") for g in all_tokens: if not g: raise click.ClickException(f\"{expression} is a bad", "+ str(g[1]) raise click.ClickException(f\"{message} is a bad token. Currently supports either plain key", "return rv matches = [x for x in self.list_commands(ctx) if x.startswith(cmd_name)] if not", "first_token) if r and r != NULL: result.append(jq_parser(r, remaining_tokens)) else: result.append(NULL) index =", "remaining_tokens = tokens[1:] if isinstance(json_obj, list): result = [] for obj in json_obj:", "= {k.strip().lower() : v for k,v in json_obj.items()} val = json_obj.get(token[0].value.strip().lower(), NULL) if", "if isinstance(val, list): if index is None: return val if index >= len(val):", "len(all_tokens) == 0: raise click.ClickException(f\"{expression} is a bad expression\") for g in all_tokens:", "cmd_name): rv = click.Group.get_command(self, ctx, cmd_name) if rv is not None: return rv", "raise click.ClickException(f\"{message} is a bad token. Currently supports either plain key or key", "index >= len(val): raise click.ClickException(f\"Bad index {index}. There are only {len(val)} elements in", "click.ClickException(f\"{expression} is a bad expression. Currently not supporting unix style multiple dots (such", "class AliasedGroup(click.Group): def get_command(self, ctx, cmd_name): rv = click.Group.get_command(self, ctx, cmd_name) if rv", "TokenName NULL = \"null\" #from click documentation to support alias command class AliasedGroup(click.Group):", "raise click.ClickException(f\"{t.value} is a bad value where a numeric index of >= 0", "matches = [x for x in self.list_commands(ctx) if x.startswith(cmd_name)] if not matches: return", "obj.items()} result.append(obj.get(token[0].value.strip().lower(), NULL)) if index is None: return result if index >= len(result):", "cmd_name) if rv is not None: return rv matches = [x for x", "= click.Group.get_command(self, ctx, cmd_name) if rv is not None: return rv matches =", "TokenName.KEY ): message = str(g[0]) raise click.ClickException(f\"{message} is a bad token. Currently supports", "str(g[0]) + \", \" + str(g[1]) raise click.ClickException(f\"{message} is a bad token. Currently", "validate_tokens(all_tokens, expression) json_obj = get_json(file) result = jq_parser(json_obj, all_tokens) result = json.dumps(result, indent=4)", "if not all_tokens or len(all_tokens) == 0: raise click.ClickException(f\"{expression} is a bad expression\")", "is a bad value where a numeric index of >= 0 is expected\")", "@click.option(\"--file\", \"-f\", type=click.File(\"r\"), help=\"File with valid json content\", required=True) def cli(expression, file): all_tokens", "g]) raise click.ClickException(f\"{message} is a bad token. Currently supports either plain key or", "= str(g[0]) raise click.ClickException(f\"{message} is a bad token. Currently supports either plain key", "elif len(g) == 2: if not ( g[0].name == TokenName.KEY and g[1].name ==", "json_obj.items()} val = json_obj.get(token[0].value.strip().lower(), NULL) if isinstance(val, list): if index is None: return", "elif len(g) > 2: message = \", \".join([str(r) for r in g]) raise", "#from click documentation to support alias command class AliasedGroup(click.Group): def get_command(self, ctx, cmd_name):", "if index is None: return result if index >= len(result): raise click.ClickException(f\"Bad index", "#this is probably the only case for a valid json if isinstance(obj, dict):", "and json_obj != NULL and tokens): return json_obj if(len(tokens) == 1): token =", "== 0: raise click.ClickException(f\"{expression} is a bad expression\") for g in all_tokens: if", "many matches: %s' % ', '.join(sorted(matches))) @click.command() @click.option(\"--expression\", \"-e\", type=click.STRING, help=\"jq style expression", "expression) json_obj = get_json(file) result = jq_parser(json_obj, all_tokens) result = json.dumps(result, indent=4) click.echo(result)", "multiple dots (such as .. etc)\") if len(g) == 1: if not (", "None t = token[1] if t.name == TokenName.INDEX: if t.value.strip().isdecimal(): return int(t.value.strip()) else:", "NULL and tokens): return json_obj if(len(tokens) == 1): token = tokens[0] return retrieve_token_from_json(json_obj,", "elif isinstance(json_obj, dict): #case insensitive json_obj = {k.strip().lower() : v for k,v in", "( g[0].name == TokenName.KEY ): message = str(g[0]) raise click.ClickException(f\"{message} is a bad", "all_tokens = [g for g in get_grouped_tokens(expression)] validate_tokens(all_tokens, expression) json_obj = get_json(file) result", "key with one index (in case of array)\") def _get_index(token): if not token", "def _get_index(token): if not token or len(token) <= 1: return None t =", "index {index}. There are only {len(result)} elements in the array\") return result[index] elif", "= {k.strip().lower() : v for k,v in obj.items()} result.append(obj.get(token[0].value.strip().lower(), NULL)) if index is", "None elif len(matches) == 1: return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches: %s'", "click.ClickException(f\"{t.value} is a bad value where a numeric index of >= 0 is", "index is None: return result if index >= len(result): raise click.ClickException(f\"Bad index {index}.", "matches[0]) ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) @click.command() @click.option(\"--expression\", \"-e\", type=click.STRING, help=\"jq", "= \"null\" #from click documentation to support alias command class AliasedGroup(click.Group): def get_command(self,", "json content\", required=True) def cli(expression, file): all_tokens = [g for g in get_grouped_tokens(expression)]", "validate_tokens(all_tokens, expression): if not all_tokens or len(all_tokens) == 0: raise click.ClickException(f\"{expression} is a", "is a bad token. Currently supports either plain key or key with one", "TokenName.KEY and g[1].name == TokenName.INDEX): message = str(g[0]) + \", \" + str(g[1])", "indent=4) click.echo(result) def jq_parser(json_obj, tokens): if not (json_obj and json_obj != NULL and", "return json_obj index = _get_index(token) if isinstance(json_obj, list): result = [] for obj", "= json.dumps(result, indent=4) click.echo(result) def jq_parser(json_obj, tokens): if not (json_obj and json_obj !=", "g[0].name == TokenName.KEY ): message = str(g[0]) raise click.ClickException(f\"{message} is a bad token.", "type=click.STRING, help=\"jq style expression to search in the json\", required=True) @click.option(\"--file\", \"-f\", type=click.File(\"r\"),", "with one index (in case of array)\") elif len(g) == 2: if not", "obj in json_obj: r = retrieve_token_from_json(obj, first_token) if r and r != NULL:", "= [x for x in self.list_commands(ctx) if x.startswith(cmd_name)] if not matches: return None", "NULL: result.append(jq_parser(r, remaining_tokens)) else: result.append(NULL) index = _get_index(first_token) if index is None: return", "is probably the only case for a valid json if isinstance(obj, dict): #case", "probably the only case for a valid json if isinstance(obj, dict): #case insensitive", "click.ClickException(f\"Bad index {index}. There are only {len(val)} elements in the array\") return val[index]", "result = json.dumps(result, indent=4) click.echo(result) def jq_parser(json_obj, tokens): if not (json_obj and json_obj", "import click from tokenizer import get_grouped_tokens, TokenName NULL = \"null\" #from click documentation", "message = str(g[0]) raise click.ClickException(f\"{message} is a bad token. Currently supports either plain", "isinstance(json_obj, dict): r = retrieve_token_from_json(json_obj, first_token) return jq_parser(r, remaining_tokens) def retrieve_token_from_json(json_obj, token): if", "def retrieve_token_from_json(json_obj, token): if not (json_obj and json_obj != NULL and token): return", "g in all_tokens: if not g: raise click.ClickException(f\"{expression} is a bad expression. Currently", "to support alias command class AliasedGroup(click.Group): def get_command(self, ctx, cmd_name): rv = click.Group.get_command(self,", "click.ClickException(str(ex)) def validate_tokens(all_tokens, expression): if not all_tokens or len(all_tokens) == 0: raise click.ClickException(f\"{expression}", "get_command(self, ctx, cmd_name): rv = click.Group.get_command(self, ctx, cmd_name) if rv is not None:", "as .. etc)\") if len(g) == 1: if not ( g[0].name == TokenName.KEY", "return val if index >= len(val): raise click.ClickException(f\"Bad index {index}. There are only", "= _get_index(first_token) if index is None: return result if index >= len(result): raise", "token. Currently supports either plain key or key with one index (in case", "json_obj: r = retrieve_token_from_json(obj, first_token) if r and r != NULL: result.append(jq_parser(r, remaining_tokens))", "val[index] return val def get_json(fp): try: return json.load(fp) except Exception as ex: raise", "result = jq_parser(json_obj, all_tokens) result = json.dumps(result, indent=4) click.echo(result) def jq_parser(json_obj, tokens): if", "return result[index] elif isinstance(json_obj, dict): #case insensitive json_obj = {k.strip().lower() : v for", "help=\"File with valid json content\", required=True) def cli(expression, file): all_tokens = [g for", "for k,v in obj.items()} result.append(obj.get(token[0].value.strip().lower(), NULL)) if index is None: return result if", "( g[0].name == TokenName.KEY and g[1].name == TokenName.INDEX): message = str(g[0]) + \",", "raise click.ClickException(str(ex)) def validate_tokens(all_tokens, expression): if not all_tokens or len(all_tokens) == 0: raise", "k,v in obj.items()} result.append(obj.get(token[0].value.strip().lower(), NULL)) if index is None: return result if index", "if index >= len(val): raise click.ClickException(f\"Bad index {index}. There are only {len(val)} elements", "is a bad expression\") for g in all_tokens: if not g: raise click.ClickException(f\"{expression}", "matches: %s' % ', '.join(sorted(matches))) @click.command() @click.option(\"--expression\", \"-e\", type=click.STRING, help=\"jq style expression to", "bad expression. Currently not supporting unix style multiple dots (such as .. etc)\")", "the array\") return result[index] elif isinstance(json_obj, dict): #case insensitive json_obj = {k.strip().lower() :", "t.value.strip().isdecimal(): return int(t.value.strip()) else: raise click.ClickException(f\"{t.value} is a bad value where a numeric", "not None: return rv matches = [x for x in self.list_commands(ctx) if x.startswith(cmd_name)]", "required=True) @click.option(\"--file\", \"-f\", type=click.File(\"r\"), help=\"File with valid json content\", required=True) def cli(expression, file):", ".. etc)\") if len(g) == 1: if not ( g[0].name == TokenName.KEY ):", "AliasedGroup(click.Group): def get_command(self, ctx, cmd_name): rv = click.Group.get_command(self, ctx, cmd_name) if rv is", "either plain key or key with one index (in case of array)\") elif", "isinstance(json_obj, dict): #case insensitive json_obj = {k.strip().lower() : v for k,v in json_obj.items()}", "(in case of array)\") elif len(g) == 2: if not ( g[0].name ==", "return json.load(fp) except Exception as ex: raise click.ClickException(str(ex)) def validate_tokens(all_tokens, expression): if not", "isinstance(json_obj, list): result = [] for obj in json_obj: #this is probably the", "if len(g) == 1: if not ( g[0].name == TokenName.KEY ): message =", "(such as .. etc)\") if len(g) == 1: if not ( g[0].name ==", "result.append(NULL) index = _get_index(first_token) if index is None: return result if index >=", "dict): r = retrieve_token_from_json(json_obj, first_token) return jq_parser(r, remaining_tokens) def retrieve_token_from_json(json_obj, token): if not", "key with one index (in case of array)\") elif len(g) > 2: message", "case of array)\") elif len(g) == 2: if not ( g[0].name == TokenName.KEY", "file): all_tokens = [g for g in get_grouped_tokens(expression)] validate_tokens(all_tokens, expression) json_obj = get_json(file)", "None: return rv matches = [x for x in self.list_commands(ctx) if x.startswith(cmd_name)] if", "index (in case of array)\") elif len(g) == 2: if not ( g[0].name", ": v for k,v in json_obj.items()} val = json_obj.get(token[0].value.strip().lower(), NULL) if isinstance(val, list):", "dict): #case insensitive obj = {k.strip().lower() : v for k,v in obj.items()} result.append(obj.get(token[0].value.strip().lower(),", "not supporting unix style multiple dots (such as .. etc)\") if len(g) ==", "ctx, cmd_name): rv = click.Group.get_command(self, ctx, cmd_name) if rv is not None: return", "if x.startswith(cmd_name)] if not matches: return None elif len(matches) == 1: return click.Group.get_command(self,", "len(matches) == 1: return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches: %s' % ',", "the json\", required=True) @click.option(\"--file\", \"-f\", type=click.File(\"r\"), help=\"File with valid json content\", required=True) def", "help=\"jq style expression to search in the json\", required=True) @click.option(\"--file\", \"-f\", type=click.File(\"r\"), help=\"File", "if not ( g[0].name == TokenName.KEY and g[1].name == TokenName.INDEX): message = str(g[0])", "alias command class AliasedGroup(click.Group): def get_command(self, ctx, cmd_name): rv = click.Group.get_command(self, ctx, cmd_name)", "_get_index(first_token) if index is None: return result if index >= len(result): raise click.ClickException(f\"Bad", "tokens): return json_obj if(len(tokens) == 1): token = tokens[0] return retrieve_token_from_json(json_obj, token) first_token", "if t.name == TokenName.INDEX: if t.value.strip().isdecimal(): return int(t.value.strip()) else: raise click.ClickException(f\"{t.value} is a", "> 2: message = \", \".join([str(r) for r in g]) raise click.ClickException(f\"{message} is", "a bad token. Currently supports either plain key or key with one index", "def validate_tokens(all_tokens, expression): if not all_tokens or len(all_tokens) == 0: raise click.ClickException(f\"{expression} is", "result.append(jq_parser(r, remaining_tokens)) else: result.append(NULL) index = _get_index(first_token) if index is None: return result", "token): return json_obj index = _get_index(token) if isinstance(json_obj, list): result = [] for", "Currently supports either plain key or key with one index (in case of", "token = tokens[0] return retrieve_token_from_json(json_obj, token) first_token = tokens[0] remaining_tokens = tokens[1:] if", "in get_grouped_tokens(expression)] validate_tokens(all_tokens, expression) json_obj = get_json(file) result = jq_parser(json_obj, all_tokens) result =", "key or key with one index (in case of array)\") elif len(g) >", "case for a valid json if isinstance(obj, dict): #case insensitive obj = {k.strip().lower()", "if index is None: return val if index >= len(val): raise click.ClickException(f\"Bad index", "\".join([str(r) for r in g]) raise click.ClickException(f\"{message} is a bad token. Currently supports", "_get_index(token) if isinstance(json_obj, list): result = [] for obj in json_obj: #this is", "array)\") elif len(g) == 2: if not ( g[0].name == TokenName.KEY and g[1].name", "for x in self.list_commands(ctx) if x.startswith(cmd_name)] if not matches: return None elif len(matches)", "and r != NULL: result.append(jq_parser(r, remaining_tokens)) else: result.append(NULL) index = _get_index(first_token) if index", "in json_obj.items()} val = json_obj.get(token[0].value.strip().lower(), NULL) if isinstance(val, list): if index is None:", "json.load(fp) except Exception as ex: raise click.ClickException(str(ex)) def validate_tokens(all_tokens, expression): if not all_tokens", "r and r != NULL: result.append(jq_parser(r, remaining_tokens)) else: result.append(NULL) index = _get_index(first_token) if", "%s' % ', '.join(sorted(matches))) @click.command() @click.option(\"--expression\", \"-e\", type=click.STRING, help=\"jq style expression to search", "1): token = tokens[0] return retrieve_token_from_json(json_obj, token) first_token = tokens[0] remaining_tokens = tokens[1:]", "the array\") return result[index] elif isinstance(json_obj, dict): r = retrieve_token_from_json(json_obj, first_token) return jq_parser(r,", "isinstance(val, list): if index is None: return val if index >= len(val): raise", "(json_obj and json_obj != NULL and tokens): return json_obj if(len(tokens) == 1): token", "only {len(result)} elements in the array\") return result[index] elif isinstance(json_obj, dict): r =", "len(g) == 2: if not ( g[0].name == TokenName.KEY and g[1].name == TokenName.INDEX):", "click.Group.get_command(self, ctx, cmd_name) if rv is not None: return rv matches = [x", "return val[index] return val def get_json(fp): try: return json.load(fp) except Exception as ex:", "in all_tokens: if not g: raise click.ClickException(f\"{expression} is a bad expression. Currently not", "not (json_obj and json_obj != NULL and token): return json_obj index = _get_index(token)", "r = retrieve_token_from_json(obj, first_token) if r and r != NULL: result.append(jq_parser(r, remaining_tokens)) else:", "!= NULL and tokens): return json_obj if(len(tokens) == 1): token = tokens[0] return", "x in self.list_commands(ctx) if x.startswith(cmd_name)] if not matches: return None elif len(matches) ==", "index is None: return val if index >= len(val): raise click.ClickException(f\"Bad index {index}.", "= tokens[1:] if isinstance(json_obj, list): result = [] for obj in json_obj: r", "ctx, matches[0]) ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) @click.command() @click.option(\"--expression\", \"-e\", type=click.STRING,", "return result if index >= len(result): raise click.ClickException(f\"Bad index {index}. There are only", "def jq_parser(json_obj, tokens): if not (json_obj and json_obj != NULL and tokens): return", "{len(result)} elements in the array\") return result[index] elif isinstance(json_obj, dict): #case insensitive json_obj", "Currently not supporting unix style multiple dots (such as .. etc)\") if len(g)", "g[0].name == TokenName.KEY and g[1].name == TokenName.INDEX): message = str(g[0]) + \", \"", "key or key with one index (in case of array)\") elif len(g) ==", "a bad expression\") for g in all_tokens: if not g: raise click.ClickException(f\"{expression} is", "are only {len(result)} elements in the array\") return result[index] elif isinstance(json_obj, dict): r", "None: return val if index >= len(val): raise click.ClickException(f\"Bad index {index}. There are", "): message = str(g[0]) raise click.ClickException(f\"{message} is a bad token. Currently supports either", "if r and r != NULL: result.append(jq_parser(r, remaining_tokens)) else: result.append(NULL) index = _get_index(first_token)", "= \", \".join([str(r) for r in g]) raise click.ClickException(f\"{message} is a bad token.", "return None elif len(matches) == 1: return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches:", "elements in the array\") return result[index] elif isinstance(json_obj, dict): #case insensitive json_obj =", "== 1: if not ( g[0].name == TokenName.KEY ): message = str(g[0]) raise", "== TokenName.KEY ): message = str(g[0]) raise click.ClickException(f\"{message} is a bad token. Currently", "or key with one index (in case of array)\") def _get_index(token): if not", "return result[index] elif isinstance(json_obj, dict): r = retrieve_token_from_json(json_obj, first_token) return jq_parser(r, remaining_tokens) def", "and g[1].name == TokenName.INDEX): message = str(g[0]) + \", \" + str(g[1]) raise", "\"-f\", type=click.File(\"r\"), help=\"File with valid json content\", required=True) def cli(expression, file): all_tokens =", "in json_obj: r = retrieve_token_from_json(obj, first_token) if r and r != NULL: result.append(jq_parser(r,", "r != NULL: result.append(jq_parser(r, remaining_tokens)) else: result.append(NULL) index = _get_index(first_token) if index is", "for r in g]) raise click.ClickException(f\"{message} is a bad token. Currently supports either", "json import click from tokenizer import get_grouped_tokens, TokenName NULL = \"null\" #from click", "json_obj if(len(tokens) == 1): token = tokens[0] return retrieve_token_from_json(json_obj, token) first_token = tokens[0]", "g in get_grouped_tokens(expression)] validate_tokens(all_tokens, expression) json_obj = get_json(file) result = jq_parser(json_obj, all_tokens) result", "'.join(sorted(matches))) @click.command() @click.option(\"--expression\", \"-e\", type=click.STRING, help=\"jq style expression to search in the json\",", "result.append(obj.get(token[0].value.strip().lower(), NULL)) if index is None: return result if index >= len(result): raise", "not ( g[0].name == TokenName.KEY ): message = str(g[0]) raise click.ClickException(f\"{message} is a", "tokenizer import get_grouped_tokens, TokenName NULL = \"null\" #from click documentation to support alias", "for a valid json if isinstance(obj, dict): #case insensitive obj = {k.strip().lower() :", "bad token. Currently supports either plain key or key with one index (in", "isinstance(obj, dict): #case insensitive obj = {k.strip().lower() : v for k,v in obj.items()}", "int(t.value.strip()) else: raise click.ClickException(f\"{t.value} is a bad value where a numeric index of", "index = _get_index(first_token) if index is None: return result if index >= len(result):", "all_tokens or len(all_tokens) == 0: raise click.ClickException(f\"{expression} is a bad expression\") for g", "plain key or key with one index (in case of array)\") def _get_index(token):", "ex: raise click.ClickException(str(ex)) def validate_tokens(all_tokens, expression): if not all_tokens or len(all_tokens) == 0:", "json_obj = get_json(file) result = jq_parser(json_obj, all_tokens) result = json.dumps(result, indent=4) click.echo(result) def", "if not ( g[0].name == TokenName.KEY ): message = str(g[0]) raise click.ClickException(f\"{message} is", "click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) @click.command() @click.option(\"--expression\", \"-e\",", "elements in the array\") return val[index] return val def get_json(fp): try: return json.load(fp)", "etc)\") if len(g) == 1: if not ( g[0].name == TokenName.KEY ): message", "g[1].name == TokenName.INDEX): message = str(g[0]) + \", \" + str(g[1]) raise click.ClickException(f\"{message}", "bad expression\") for g in all_tokens: if not g: raise click.ClickException(f\"{expression} is a", "2: message = \", \".join([str(r) for r in g]) raise click.ClickException(f\"{message} is a", "t = token[1] if t.name == TokenName.INDEX: if t.value.strip().isdecimal(): return int(t.value.strip()) else: raise", "[g for g in get_grouped_tokens(expression)] validate_tokens(all_tokens, expression) json_obj = get_json(file) result = jq_parser(json_obj,", "are only {len(val)} elements in the array\") return val[index] return val def get_json(fp):", "expression to search in the json\", required=True) @click.option(\"--file\", \"-f\", type=click.File(\"r\"), help=\"File with valid", "in the array\") return result[index] elif isinstance(json_obj, dict): #case insensitive json_obj = {k.strip().lower()", "token or len(token) <= 1: return None t = token[1] if t.name ==", "return json_obj if(len(tokens) == 1): token = tokens[0] return retrieve_token_from_json(json_obj, token) first_token =", "result if index >= len(result): raise click.ClickException(f\"Bad index {index}. There are only {len(result)}", "json.dumps(result, indent=4) click.echo(result) def jq_parser(json_obj, tokens): if not (json_obj and json_obj != NULL", "except Exception as ex: raise click.ClickException(str(ex)) def validate_tokens(all_tokens, expression): if not all_tokens or", "if isinstance(json_obj, list): result = [] for obj in json_obj: r = retrieve_token_from_json(obj,", "if not g: raise click.ClickException(f\"{expression} is a bad expression. Currently not supporting unix", "to search in the json\", required=True) @click.option(\"--file\", \"-f\", type=click.File(\"r\"), help=\"File with valid json", "if not token or len(token) <= 1: return None t = token[1] if", "= token[1] if t.name == TokenName.INDEX: if t.value.strip().isdecimal(): return int(t.value.strip()) else: raise click.ClickException(f\"{t.value}", "def get_json(fp): try: return json.load(fp) except Exception as ex: raise click.ClickException(str(ex)) def validate_tokens(all_tokens,", "0: raise click.ClickException(f\"{expression} is a bad expression\") for g in all_tokens: if not", "', '.join(sorted(matches))) @click.command() @click.option(\"--expression\", \"-e\", type=click.STRING, help=\"jq style expression to search in the", "return int(t.value.strip()) else: raise click.ClickException(f\"{t.value} is a bad value where a numeric index", "= json_obj.get(token[0].value.strip().lower(), NULL) if isinstance(val, list): if index is None: return val if", "\"-e\", type=click.STRING, help=\"jq style expression to search in the json\", required=True) @click.option(\"--file\", \"-f\",", "not (json_obj and json_obj != NULL and tokens): return json_obj if(len(tokens) == 1):", "NULL and token): return json_obj index = _get_index(token) if isinstance(json_obj, list): result =", "list): result = [] for obj in json_obj: #this is probably the only", "index (in case of array)\") def _get_index(token): if not token or len(token) <=", "\" + str(g[1]) raise click.ClickException(f\"{message} is a bad token. Currently supports either plain", "tokens[0] return retrieve_token_from_json(json_obj, token) first_token = tokens[0] remaining_tokens = tokens[1:] if isinstance(json_obj, list):", ": v for k,v in obj.items()} result.append(obj.get(token[0].value.strip().lower(), NULL)) if index is None: return", "message = str(g[0]) + \", \" + str(g[1]) raise click.ClickException(f\"{message} is a bad", "{k.strip().lower() : v for k,v in json_obj.items()} val = json_obj.get(token[0].value.strip().lower(), NULL) if isinstance(val,", "only {len(val)} elements in the array\") return val[index] return val def get_json(fp): try:", "_get_index(token): if not token or len(token) <= 1: return None t = token[1]", "obj in json_obj: #this is probably the only case for a valid json", "#case insensitive json_obj = {k.strip().lower() : v for k,v in json_obj.items()} val =", "token[1] if t.name == TokenName.INDEX: if t.value.strip().isdecimal(): return int(t.value.strip()) else: raise click.ClickException(f\"{t.value} is", "get_json(fp): try: return json.load(fp) except Exception as ex: raise click.ClickException(str(ex)) def validate_tokens(all_tokens, expression):", "elements in the array\") return result[index] elif isinstance(json_obj, dict): r = retrieve_token_from_json(json_obj, first_token)", "return val def get_json(fp): try: return json.load(fp) except Exception as ex: raise click.ClickException(str(ex))", "as ex: raise click.ClickException(str(ex)) def validate_tokens(all_tokens, expression): if not all_tokens or len(all_tokens) ==", "r = retrieve_token_from_json(json_obj, first_token) return jq_parser(r, remaining_tokens) def retrieve_token_from_json(json_obj, token): if not (json_obj", "None: return result if index >= len(result): raise click.ClickException(f\"Bad index {index}. There are", "from tokenizer import get_grouped_tokens, TokenName NULL = \"null\" #from click documentation to support", "json_obj: #this is probably the only case for a valid json if isinstance(obj,", "There are only {len(val)} elements in the array\") return val[index] return val def", "jq_parser(r, remaining_tokens) def retrieve_token_from_json(json_obj, token): if not (json_obj and json_obj != NULL and", "tokens[1:] if isinstance(json_obj, list): result = [] for obj in json_obj: r =", "json_obj index = _get_index(token) if isinstance(json_obj, list): result = [] for obj in", "import get_grouped_tokens, TokenName NULL = \"null\" #from click documentation to support alias command", "a bad expression. Currently not supporting unix style multiple dots (such as ..", "!= NULL and token): return json_obj index = _get_index(token) if isinstance(json_obj, list): result", "isinstance(json_obj, list): result = [] for obj in json_obj: r = retrieve_token_from_json(obj, first_token)", "== TokenName.INDEX): message = str(g[0]) + \", \" + str(g[1]) raise click.ClickException(f\"{message} is", "are only {len(result)} elements in the array\") return result[index] elif isinstance(json_obj, dict): #case", "elif isinstance(json_obj, dict): r = retrieve_token_from_json(json_obj, first_token) return jq_parser(r, remaining_tokens) def retrieve_token_from_json(json_obj, token):", "= tokens[0] return retrieve_token_from_json(json_obj, token) first_token = tokens[0] remaining_tokens = tokens[1:] if isinstance(json_obj,", "{len(result)} elements in the array\") return result[index] elif isinstance(json_obj, dict): r = retrieve_token_from_json(json_obj,", "str(g[0]) raise click.ClickException(f\"{message} is a bad token. Currently supports either plain key or", "% ', '.join(sorted(matches))) @click.command() @click.option(\"--expression\", \"-e\", type=click.STRING, help=\"jq style expression to search in", "get_grouped_tokens(expression)] validate_tokens(all_tokens, expression) json_obj = get_json(file) result = jq_parser(json_obj, all_tokens) result = json.dumps(result,", "= [] for obj in json_obj: r = retrieve_token_from_json(obj, first_token) if r and", "and token): return json_obj index = _get_index(token) if isinstance(json_obj, list): result = []", "TokenName.INDEX: if t.value.strip().isdecimal(): return int(t.value.strip()) else: raise click.ClickException(f\"{t.value} is a bad value where", ">= len(val): raise click.ClickException(f\"Bad index {index}. There are only {len(val)} elements in the", "\", \".join([str(r) for r in g]) raise click.ClickException(f\"{message} is a bad token. Currently", "is None: return val if index >= len(val): raise click.ClickException(f\"Bad index {index}. There", "bad value where a numeric index of >= 0 is expected\") return None", "== 1): token = tokens[0] return retrieve_token_from_json(json_obj, token) first_token = tokens[0] remaining_tokens =", "\"null\" #from click documentation to support alias command class AliasedGroup(click.Group): def get_command(self, ctx,", "[] for obj in json_obj: r = retrieve_token_from_json(obj, first_token) if r and r", "len(val): raise click.ClickException(f\"Bad index {index}. There are only {len(val)} elements in the array\")", "of array)\") def _get_index(token): if not token or len(token) <= 1: return None", "one index (in case of array)\") elif len(g) == 2: if not (", "is a bad expression. Currently not supporting unix style multiple dots (such as", "v for k,v in json_obj.items()} val = json_obj.get(token[0].value.strip().lower(), NULL) if isinstance(val, list): if", "== TokenName.KEY and g[1].name == TokenName.INDEX): message = str(g[0]) + \", \" +", "tokens[0] remaining_tokens = tokens[1:] if isinstance(json_obj, list): result = [] for obj in", "rv is not None: return rv matches = [x for x in self.list_commands(ctx)", "the array\") return val[index] return val def get_json(fp): try: return json.load(fp) except Exception", "and json_obj != NULL and token): return json_obj index = _get_index(token) if isinstance(json_obj,", "insensitive obj = {k.strip().lower() : v for k,v in obj.items()} result.append(obj.get(token[0].value.strip().lower(), NULL)) if", "in the json\", required=True) @click.option(\"--file\", \"-f\", type=click.File(\"r\"), help=\"File with valid json content\", required=True)", "result[index] elif isinstance(json_obj, dict): r = retrieve_token_from_json(json_obj, first_token) return jq_parser(r, remaining_tokens) def retrieve_token_from_json(json_obj,", "def cli(expression, file): all_tokens = [g for g in get_grouped_tokens(expression)] validate_tokens(all_tokens, expression) json_obj", "all_tokens) result = json.dumps(result, indent=4) click.echo(result) def jq_parser(json_obj, tokens): if not (json_obj and", "first_token) return jq_parser(r, remaining_tokens) def retrieve_token_from_json(json_obj, token): if not (json_obj and json_obj !=", "1: return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) @click.command()", "Exception as ex: raise click.ClickException(str(ex)) def validate_tokens(all_tokens, expression): if not all_tokens or len(all_tokens)", "json\", required=True) @click.option(\"--file\", \"-f\", type=click.File(\"r\"), help=\"File with valid json content\", required=True) def cli(expression,", "or key with one index (in case of array)\") elif len(g) == 2:", "(json_obj and json_obj != NULL and token): return json_obj index = _get_index(token) if", "all_tokens: if not g: raise click.ClickException(f\"{expression} is a bad expression. Currently not supporting", "There are only {len(result)} elements in the array\") return result[index] elif isinstance(json_obj, dict):", "@click.command() @click.option(\"--expression\", \"-e\", type=click.STRING, help=\"jq style expression to search in the json\", required=True)", "self.list_commands(ctx) if x.startswith(cmd_name)] if not matches: return None elif len(matches) == 1: return", "not all_tokens or len(all_tokens) == 0: raise click.ClickException(f\"{expression} is a bad expression\") for", "array\") return result[index] elif isinstance(json_obj, dict): #case insensitive json_obj = {k.strip().lower() : v", "raise click.ClickException(f\"{expression} is a bad expression. Currently not supporting unix style multiple dots", "NULL)) if index is None: return result if index >= len(result): raise click.ClickException(f\"Bad", "style expression to search in the json\", required=True) @click.option(\"--file\", \"-f\", type=click.File(\"r\"), help=\"File with", "= [g for g in get_grouped_tokens(expression)] validate_tokens(all_tokens, expression) json_obj = get_json(file) result =", "if(len(tokens) == 1): token = tokens[0] return retrieve_token_from_json(json_obj, token) first_token = tokens[0] remaining_tokens", "len(result): raise click.ClickException(f\"Bad index {index}. There are only {len(result)} elements in the array\")", "in the array\") return result[index] elif isinstance(json_obj, dict): r = retrieve_token_from_json(json_obj, first_token) return", "retrieve_token_from_json(obj, first_token) if r and r != NULL: result.append(jq_parser(r, remaining_tokens)) else: result.append(NULL) index", "for obj in json_obj: #this is probably the only case for a valid", "\", \" + str(g[1]) raise click.ClickException(f\"{message} is a bad token. Currently supports either", "{len(val)} elements in the array\") return val[index] return val def get_json(fp): try: return", "if isinstance(json_obj, list): result = [] for obj in json_obj: #this is probably", "list): result = [] for obj in json_obj: r = retrieve_token_from_json(obj, first_token) if", "else: result.append(NULL) index = _get_index(first_token) if index is None: return result if index", "is None: return result if index >= len(result): raise click.ClickException(f\"Bad index {index}. There", "retrieve_token_from_json(json_obj, token): if not (json_obj and json_obj != NULL and token): return json_obj", "== 2: if not ( g[0].name == TokenName.KEY and g[1].name == TokenName.INDEX): message", "= retrieve_token_from_json(json_obj, first_token) return jq_parser(r, remaining_tokens) def retrieve_token_from_json(json_obj, token): if not (json_obj and", "@click.option(\"--expression\", \"-e\", type=click.STRING, help=\"jq style expression to search in the json\", required=True) @click.option(\"--file\",", "remaining_tokens)) else: result.append(NULL) index = _get_index(first_token) if index is None: return result if", "token) first_token = tokens[0] remaining_tokens = tokens[1:] if isinstance(json_obj, list): result = []", "in g]) raise click.ClickException(f\"{message} is a bad token. Currently supports either plain key", "TokenName.INDEX): message = str(g[0]) + \", \" + str(g[1]) raise click.ClickException(f\"{message} is a", ">= len(result): raise click.ClickException(f\"Bad index {index}. There are only {len(result)} elements in the", "content\", required=True) def cli(expression, file): all_tokens = [g for g in get_grouped_tokens(expression)] validate_tokens(all_tokens,", "elif len(matches) == 1: return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches: %s' %", "in self.list_commands(ctx) if x.startswith(cmd_name)] if not matches: return None elif len(matches) == 1:", "one index (in case of array)\") def _get_index(token): if not token or len(token)", "or key with one index (in case of array)\") elif len(g) > 2:", "== TokenName.INDEX: if t.value.strip().isdecimal(): return int(t.value.strip()) else: raise click.ClickException(f\"{t.value} is a bad value", "case of array)\") elif len(g) > 2: message = \", \".join([str(r) for r", "{index}. There are only {len(val)} elements in the array\") return val[index] return val", "jq_parser(json_obj, all_tokens) result = json.dumps(result, indent=4) click.echo(result) def jq_parser(json_obj, tokens): if not (json_obj", "{k.strip().lower() : v for k,v in obj.items()} result.append(obj.get(token[0].value.strip().lower(), NULL)) if index is None:", "= _get_index(token) if isinstance(json_obj, list): result = [] for obj in json_obj: #this", "or len(all_tokens) == 0: raise click.ClickException(f\"{expression} is a bad expression\") for g in", "array\") return val[index] return val def get_json(fp): try: return json.load(fp) except Exception as" ]
[ "import os BASE_DIR = os.path.abspath(os.path.dirname(__file__)) MAIN_DB_URL = os.path.join(BASE_DIR, 'bucketlist.sqlite') TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite')", "for the development environment ''' SQLALCHEMY_DATABASE_URI = 'sqlite:///' + MAIN_DB_URL DEBUG = True", "ERROR_404_HELP = False DEBUG = False TESTING = False class DevelopmentConfig(BaseConfig): ''' configuration", "development environment ''' SQLALCHEMY_DATABASE_URI = 'sqlite:///' + MAIN_DB_URL DEBUG = True DEVELOPMENT =", "SQLALCHEMY_TRACK_MODIFICATIONS = False ERROR_404_HELP = False DEBUG = False TESTING = False class", "class BaseConfig(object): ''' The class holds base config for each environment ''' SECRET_KEY", "''' SQLALCHEMY_DATABASE_URI = 'sqlite:///' + MAIN_DB_URL DEBUG = True DEVELOPMENT = True class", "\"'sqlite:///' + {}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS = False ERROR_404_HELP = False DEBUG = False TESTING", "SECRET_KEY = os.getenv('SECRET_KEY', 'This should be changed') SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABASE_URI', \"'sqlite:///' +", "os.getenv('SECRET_KEY', 'This should be changed') SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABASE_URI', \"'sqlite:///' + {}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS", "TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite') class BaseConfig(object): ''' The class holds base config for", "TESTING = False class DevelopmentConfig(BaseConfig): ''' configuration for the development environment ''' SQLALCHEMY_DATABASE_URI", "config for each environment ''' SECRET_KEY = os.getenv('SECRET_KEY', 'This should be changed') SQLALCHEMY_DATABASE_URI", "os.path.abspath(os.path.dirname(__file__)) MAIN_DB_URL = os.path.join(BASE_DIR, 'bucketlist.sqlite') TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite') class BaseConfig(object): ''' The", "when testing ''' TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///' + TEST_DB_URL CSRF_ENABLED =", "True DEBUG = True class ProductionConfig(BaseConfig): ''' config for when in production '''", "= os.path.abspath(os.path.dirname(__file__)) MAIN_DB_URL = os.path.join(BASE_DIR, 'bucketlist.sqlite') TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite') class BaseConfig(object): '''", "= True DEBUG = True class ProductionConfig(BaseConfig): ''' config for when in production", "DEBUG = True class ProductionConfig(BaseConfig): ''' config for when in production ''' DEBUG", "''' config when testing ''' TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///' + TEST_DB_URL", "= True SQLALCHEMY_DATABASE_URI = 'sqlite:///' + TEST_DB_URL CSRF_ENABLED = False class StagingConfig(BaseConfig): DEVELOPMENT", "os BASE_DIR = os.path.abspath(os.path.dirname(__file__)) MAIN_DB_URL = os.path.join(BASE_DIR, 'bucketlist.sqlite') TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite') class", "+ MAIN_DB_URL DEBUG = True DEVELOPMENT = True class TestingConfig(BaseConfig): ''' config when", "DEVELOPMENT = True DEBUG = True class ProductionConfig(BaseConfig): ''' config for when in", "{}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS = False ERROR_404_HELP = False DEBUG = False TESTING = False", "= os.path.join(BASE_DIR, 'bucketlist.sqlite') TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite') class BaseConfig(object): ''' The class holds", "TestingConfig(BaseConfig): ''' config when testing ''' TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///' +", "'sqlite:///' + MAIN_DB_URL DEBUG = True DEVELOPMENT = True class TestingConfig(BaseConfig): ''' config", "False class StagingConfig(BaseConfig): DEVELOPMENT = True DEBUG = True class ProductionConfig(BaseConfig): ''' config", "True class ProductionConfig(BaseConfig): ''' config for when in production ''' DEBUG = False", "= os.getenv( 'DATABASE_URI', \"'sqlite:///' + {}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS = False ERROR_404_HELP = False DEBUG", "= os.getenv('SECRET_KEY', 'This should be changed') SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABASE_URI', \"'sqlite:///' + {}\".format(MAIN_DB_URL))", "= True DEVELOPMENT = True class TestingConfig(BaseConfig): ''' config when testing ''' TESTING", "each environment ''' SECRET_KEY = os.getenv('SECRET_KEY', 'This should be changed') SQLALCHEMY_DATABASE_URI = os.getenv(", "class holds base config for each environment ''' SECRET_KEY = os.getenv('SECRET_KEY', 'This should", "SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABASE_URI', \"'sqlite:///' + {}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS = False ERROR_404_HELP = False", "'bucketlist.sqlite') TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite') class BaseConfig(object): ''' The class holds base config", "True DEVELOPMENT = True class TestingConfig(BaseConfig): ''' config when testing ''' TESTING =", "True class TestingConfig(BaseConfig): ''' config when testing ''' TESTING = True SQLALCHEMY_DATABASE_URI =", "BASE_DIR = os.path.abspath(os.path.dirname(__file__)) MAIN_DB_URL = os.path.join(BASE_DIR, 'bucketlist.sqlite') TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite') class BaseConfig(object):", "False class DevelopmentConfig(BaseConfig): ''' configuration for the development environment ''' SQLALCHEMY_DATABASE_URI = 'sqlite:///'", "os.path.join(BASE_DIR, 'test.sqlite') class BaseConfig(object): ''' The class holds base config for each environment", "holds base config for each environment ''' SECRET_KEY = os.getenv('SECRET_KEY', 'This should be", "= True class TestingConfig(BaseConfig): ''' config when testing ''' TESTING = True SQLALCHEMY_DATABASE_URI", "False ERROR_404_HELP = False DEBUG = False TESTING = False class DevelopmentConfig(BaseConfig): '''", "False TESTING = False class DevelopmentConfig(BaseConfig): ''' configuration for the development environment '''", "configuration for the development environment ''' SQLALCHEMY_DATABASE_URI = 'sqlite:///' + MAIN_DB_URL DEBUG =", "''' TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///' + TEST_DB_URL CSRF_ENABLED = False class", "base config for each environment ''' SECRET_KEY = os.getenv('SECRET_KEY', 'This should be changed')", "be changed') SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABASE_URI', \"'sqlite:///' + {}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS = False ERROR_404_HELP", "'This should be changed') SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABASE_URI', \"'sqlite:///' + {}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS =", "MAIN_DB_URL = os.path.join(BASE_DIR, 'bucketlist.sqlite') TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite') class BaseConfig(object): ''' The class", "DevelopmentConfig(BaseConfig): ''' configuration for the development environment ''' SQLALCHEMY_DATABASE_URI = 'sqlite:///' + MAIN_DB_URL", "SQLALCHEMY_DATABASE_URI = 'sqlite:///' + TEST_DB_URL CSRF_ENABLED = False class StagingConfig(BaseConfig): DEVELOPMENT = True", "False DEBUG = False TESTING = False class DevelopmentConfig(BaseConfig): ''' configuration for the", "should be changed') SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABASE_URI', \"'sqlite:///' + {}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS = False", "''' configuration for the development environment ''' SQLALCHEMY_DATABASE_URI = 'sqlite:///' + MAIN_DB_URL DEBUG", "CSRF_ENABLED = False class StagingConfig(BaseConfig): DEVELOPMENT = True DEBUG = True class ProductionConfig(BaseConfig):", "MAIN_DB_URL DEBUG = True DEVELOPMENT = True class TestingConfig(BaseConfig): ''' config when testing", "= False class StagingConfig(BaseConfig): DEVELOPMENT = True DEBUG = True class ProductionConfig(BaseConfig): '''", "TEST_DB_URL CSRF_ENABLED = False class StagingConfig(BaseConfig): DEVELOPMENT = True DEBUG = True class", "+ {}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS = False ERROR_404_HELP = False DEBUG = False TESTING =", "the development environment ''' SQLALCHEMY_DATABASE_URI = 'sqlite:///' + MAIN_DB_URL DEBUG = True DEVELOPMENT", "= 'sqlite:///' + MAIN_DB_URL DEBUG = True DEVELOPMENT = True class TestingConfig(BaseConfig): '''", "class DevelopmentConfig(BaseConfig): ''' configuration for the development environment ''' SQLALCHEMY_DATABASE_URI = 'sqlite:///' +", "True SQLALCHEMY_DATABASE_URI = 'sqlite:///' + TEST_DB_URL CSRF_ENABLED = False class StagingConfig(BaseConfig): DEVELOPMENT =", "config when testing ''' TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///' + TEST_DB_URL CSRF_ENABLED", "TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///' + TEST_DB_URL CSRF_ENABLED = False class StagingConfig(BaseConfig):", "class StagingConfig(BaseConfig): DEVELOPMENT = True DEBUG = True class ProductionConfig(BaseConfig): ''' config for", "= False class DevelopmentConfig(BaseConfig): ''' configuration for the development environment ''' SQLALCHEMY_DATABASE_URI =", "os.getenv( 'DATABASE_URI', \"'sqlite:///' + {}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS = False ERROR_404_HELP = False DEBUG =", "= 'sqlite:///' + TEST_DB_URL CSRF_ENABLED = False class StagingConfig(BaseConfig): DEVELOPMENT = True DEBUG", "+ TEST_DB_URL CSRF_ENABLED = False class StagingConfig(BaseConfig): DEVELOPMENT = True DEBUG = True", "DEBUG = True DEVELOPMENT = True class TestingConfig(BaseConfig): ''' config when testing '''", "= False ERROR_404_HELP = False DEBUG = False TESTING = False class DevelopmentConfig(BaseConfig):", "SQLALCHEMY_DATABASE_URI = 'sqlite:///' + MAIN_DB_URL DEBUG = True DEVELOPMENT = True class TestingConfig(BaseConfig):", "BaseConfig(object): ''' The class holds base config for each environment ''' SECRET_KEY =", "class TestingConfig(BaseConfig): ''' config when testing ''' TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///'", "os.path.join(BASE_DIR, 'bucketlist.sqlite') TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite') class BaseConfig(object): ''' The class holds base", "StagingConfig(BaseConfig): DEVELOPMENT = True DEBUG = True class ProductionConfig(BaseConfig): ''' config for when", "environment ''' SQLALCHEMY_DATABASE_URI = 'sqlite:///' + MAIN_DB_URL DEBUG = True DEVELOPMENT = True", "for each environment ''' SECRET_KEY = os.getenv('SECRET_KEY', 'This should be changed') SQLALCHEMY_DATABASE_URI =", "DEVELOPMENT = True class TestingConfig(BaseConfig): ''' config when testing ''' TESTING = True", "The class holds base config for each environment ''' SECRET_KEY = os.getenv('SECRET_KEY', 'This", "changed') SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABASE_URI', \"'sqlite:///' + {}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS = False ERROR_404_HELP =", "DEBUG = False TESTING = False class DevelopmentConfig(BaseConfig): ''' configuration for the development", "testing ''' TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///' + TEST_DB_URL CSRF_ENABLED = False", "= False TESTING = False class DevelopmentConfig(BaseConfig): ''' configuration for the development environment", "'sqlite:///' + TEST_DB_URL CSRF_ENABLED = False class StagingConfig(BaseConfig): DEVELOPMENT = True DEBUG =", "= True class ProductionConfig(BaseConfig): ''' config for when in production ''' DEBUG =", "= os.path.join(BASE_DIR, 'test.sqlite') class BaseConfig(object): ''' The class holds base config for each", "'test.sqlite') class BaseConfig(object): ''' The class holds base config for each environment '''", "''' The class holds base config for each environment ''' SECRET_KEY = os.getenv('SECRET_KEY',", "= False DEBUG = False TESTING = False class DevelopmentConfig(BaseConfig): ''' configuration for", "environment ''' SECRET_KEY = os.getenv('SECRET_KEY', 'This should be changed') SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABASE_URI',", "'DATABASE_URI', \"'sqlite:///' + {}\".format(MAIN_DB_URL)) SQLALCHEMY_TRACK_MODIFICATIONS = False ERROR_404_HELP = False DEBUG = False", "''' SECRET_KEY = os.getenv('SECRET_KEY', 'This should be changed') SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABASE_URI', \"'sqlite:///'" ]
[ "a table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") found = self.db.execute(\"SELECT count, name FROM", "from the table with a where clause?\" rowids = self._makerows() results = ModelTests.MyTable.list_where(self.db,", "\"foo\") self.assertEqual(results.count, 2) def test_search_many(self): \"Do we error when we have multiple matches?\"", "def test_list_with_none(self): \"Can we get a list where an item is NULL?\" rowids", "error when we have multiple matches?\" self._makerows(names=[\"foo\", \"foo\", \"bar\"]) with self.assertRaises(ValueError): ModelTests.MyTable.search(self.db, name=\"foo\")", "rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\", count=100) results = ModelTests.MyTable.get(self.db,", "\"name\": ColumnText(null=True), } def _makerows(self, names: Optional[List[str]] = None): \"Make a few rows.", "\"count INTEGER,\", \"id INTEGER PRIMARY KEY AUTOINCREMENT,\", \"name TEXT\", \");\", )) self.assertEqual(result, expected)", "the model class.\" class MyTable(Model): COLUMNS = { \"id\": ColumnInteger(autoincrement=True, primary_key=True), \"count\": ColumnInteger(),", "expected = \"\\n\".join(( \"CREATE TABLE IF NOT EXISTS MyTable (\", \"count INTEGER,\", \"id", "def test_get(self): \"Can we get a row from the table?\" rowid = ModelTests.MyTable.insert(self.db,", "INTEGER PRIMARY KEY AUTOINCREMENT,\", \"name TEXT\", \");\", )) self.assertEqual(result, expected) def test_insert(self): \"Can", "def test_search_many(self): \"Do we error when we have multiple matches?\" self._makerows(names=[\"foo\", \"foo\", \"bar\"])", "self.assertRaises(ValueError): ModelTests.MyTable.search(self.db, name=\"foo\") def test_search_with_none(self): \"Do we properly search for NULL columns?\" self._makerows(names=[\"foo\",", "import List, Optional import unittest from parentopticon.db import test_utilities from parentopticon.db.model import ColumnInteger,", "the table?\" rowids = self._makerows() results = ModelTests.MyTable.list(self.db) self.assertEqual({result.id for result in results},", "enumerate(names) } def setUp(self): super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def test_create_statement(self): \"Can we get a", "we get None when the row does not exist?\" result = ModelTests.MyTable.get(self.db, -1)", "\"foobar\") def test_get_none(self): \"Can we get None when the row does not exist?\"", "have multiple matches?\" self._makerows(names=[\"foo\", \"foo\", \"bar\"]) with self.assertRaises(ValueError): ModelTests.MyTable.search(self.db, name=\"foo\") def test_search_with_none(self): \"Do", "several rows from the table?\" rowids = self._makerows() results = ModelTests.MyTable.list(self.db) self.assertEqual({result.id for", "properly search for NULL columns?\" self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.search(self.db, name=None) self.assertEqual(results.name,", "row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\") results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.name, \"biff\") def", "\");\", )) self.assertEqual(result, expected) def test_insert(self): \"Can we insert a row into a", "_makerows(self, names: Optional[List[str]] = None): \"Make a few rows. Useful for many tests.\"", "we update a row with update()?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db,", "AUTOINCREMENT,\", \"name TEXT\", \");\", )) self.assertEqual(result, expected) def test_insert(self): \"Can we insert a", "def test_search_one(self): \"Can we search and find a single row?\" rowids = self._makerows()", "and not find something?\" results = ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\") self.assertIs(results, None) def test_search_one(self): \"Can", "create table clause?\" result = ModelTests.MyTable.create_statement() expected = \"\\n\".join(( \"CREATE TABLE IF NOT", "os from typing import List, Optional import unittest from parentopticon.db import test_utilities from", "2) def test_search_many(self): \"Do we error when we have multiple matches?\" self._makerows(names=[\"foo\", \"foo\",", "item is NULL?\" rowids = self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.list(self.db, name=None) self.assertEqual({result.count", ")) self.assertEqual(result, expected) def test_insert(self): \"Can we insert a row into a table?\"", "= ModelTests.MyTable.search(self.db, name=\"foo\") self.assertEqual(results.name, \"foo\") self.assertEqual(results.count, 2) def test_search_many(self): \"Do we error when", "= self._makerows() results = ModelTests.MyTable.list(self.db) self.assertEqual({result.id for result in results}, rowids) def test_list_some(self):", "= self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.list(self.db, name=None) self.assertEqual({result.count for result in results},", "in results}, rowids) def test_list_some(self): \"Can we get several rows from the table", "a few rows. Useful for many tests.\" names = names or [\"foo\", \"bar\",", "= self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\") results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.name,", "6}) def test_list_with_none(self): \"Can we get a list where an item is NULL?\"", "= ModelTests.MyTable.list(self.db, name=None) self.assertEqual({result.count for result in results}, {4}) def test_search_not_found(self): \"Can we", "results}, {4}) def test_search_not_found(self): \"Can we search and not find something?\" results =", "list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\") results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.name, \"biff\") def test_update_multiple(self): \"Can", "not exist?\" result = ModelTests.MyTable.get(self.db, -1) self.assertIs(result, None) def test_list_all(self): \"Can we get", "self.assertEqual({result.count for result in results}, {4}) def test_search_not_found(self): \"Can we search and not", "multiple values?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\", count=100) results", "4) def test_update(self): \"Can we update a row with update()?\" rows = self._makerows(names=[\"foo\"])", "MyTable(Model): COLUMNS = { \"id\": ColumnInteger(autoincrement=True, primary_key=True), \"count\": ColumnInteger(), \"name\": ColumnText(null=True), } def", "\"Do we error when we have multiple matches?\" self._makerows(names=[\"foo\", \"foo\", \"bar\"]) with self.assertRaises(ValueError):", "Useful for many tests.\" names = names or [\"foo\", \"bar\", \"baz\"] return {", "we properly search for NULL columns?\" self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.search(self.db, name=None)", "\"Can we update a row with update()?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0]", "with update()?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\") results =", "= ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.name, \"biff\") def test_update_multiple(self): \"Can we update a row with", "around the model class.\" class MyTable(Model): COLUMNS = { \"id\": ColumnInteger(autoincrement=True, primary_key=True), \"count\":", "from parentopticon.db import test_utilities from parentopticon.db.model import ColumnInteger, ColumnText, Model class ModelTests(test_utilities.DBTestCase): \"Test", "ColumnInteger(autoincrement=True, primary_key=True), \"count\": ColumnInteger(), \"name\": ColumnText(null=True), } def _makerows(self, names: Optional[List[str]] = None):", "def test_update_multiple(self): \"Can we update a row with multiple values?\" rows = self._makerows(names=[\"foo\"])", "with self.assertRaises(ValueError): ModelTests.MyTable.search(self.db, name=\"foo\") def test_search_with_none(self): \"Do we properly search for NULL columns?\"", "we update a row with multiple values?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0]", "results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.name, \"biff\") def test_update_multiple(self): \"Can we update a row", "table with a where clause?\" rowids = self._makerows() results = ModelTests.MyTable.list_where(self.db, where=\"count >=", "a row with multiple values?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id,", "search and find a single row?\" rowids = self._makerows() results = ModelTests.MyTable.search(self.db, name=\"foo\")", "\"Can we search and find a single row?\" rowids = self._makerows() results =", "clause?\" result = ModelTests.MyTable.create_statement() expected = \"\\n\".join(( \"CREATE TABLE IF NOT EXISTS MyTable", "self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\") results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.name, \"biff\")", "self.assertEqual(results.count, 4) def test_update(self): \"Can we update a row with update()?\" rows =", "None, \"bar\"]) results = ModelTests.MyTable.search(self.db, name=None) self.assertEqual(results.name, None) self.assertEqual(results.count, 4) def test_update(self): \"Can", "self.assertEqual(results.name, None) self.assertEqual(results.count, 4) def test_update(self): \"Can we update a row with update()?\"", "= ModelTests.MyTable.list(self.db) self.assertEqual({result.id for result in results}, rowids) def test_list_some(self): \"Can we get", "import datetime import os from typing import List, Optional import unittest from parentopticon.db", "name=None) self.assertEqual(results.name, None) self.assertEqual(results.count, 4) def test_update(self): \"Can we update a row with", "we error when we have multiple matches?\" self._makerows(names=[\"foo\", \"foo\", \"bar\"]) with self.assertRaises(ValueError): ModelTests.MyTable.search(self.db,", "from the table?\" rowids = self._makerows() results = ModelTests.MyTable.list(self.db) self.assertEqual({result.id for result in", "names = names or [\"foo\", \"bar\", \"baz\"] return { ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name) for", "the row does not exist?\" result = ModelTests.MyTable.get(self.db, -1) self.assertIs(result, None) def test_list_all(self):", "results = ModelTests.MyTable.search(self.db, name=\"foo\") self.assertEqual(results.name, \"foo\") self.assertEqual(results.count, 2) def test_search_many(self): \"Do we error", "and find a single row?\" rowids = self._makerows() results = ModelTests.MyTable.search(self.db, name=\"foo\") self.assertEqual(results.name,", "into a table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") found = self.db.execute(\"SELECT count, name", "name in enumerate(names) } def setUp(self): super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def test_create_statement(self): \"Can we", "rowid) self.assertEqual(result.count, 3) self.assertEqual(result.name, \"foobar\") def test_get_none(self): \"Can we get None when the", "row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\", count=100) results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.count, 100)", "{ ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name) for i, name in enumerate(names) } def setUp(self): super().setUp()", "ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name) for i, name in enumerate(names) } def setUp(self): super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement())", "rowids = self._makerows() results = ModelTests.MyTable.list(self.db) self.assertEqual({result.id for result in results}, rowids) def", "KEY AUTOINCREMENT,\", \"name TEXT\", \");\", )) self.assertEqual(result, expected) def test_insert(self): \"Can we insert", "with a where clause?\" rowids = self._makerows() results = ModelTests.MyTable.list_where(self.db, where=\"count >= 4\")", "NULL?\" rowids = self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.list(self.db, name=None) self.assertEqual({result.count for result", "an item is NULL?\" rowids = self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.list(self.db, name=None)", "all of our logic around the model class.\" class MyTable(Model): COLUMNS = {", "\"foo\", \"bar\"]) with self.assertRaises(ValueError): ModelTests.MyTable.search(self.db, name=\"foo\") def test_search_with_none(self): \"Do we properly search for", "does not exist?\" result = ModelTests.MyTable.get(self.db, -1) self.assertIs(result, None) def test_list_all(self): \"Can we", "results}, {4, 6}) def test_list_with_none(self): \"Can we get a list where an item", "= self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\", count=100) results = ModelTests.MyTable.get(self.db, row_id)", "= ModelTests.MyTable.get(self.db, -1) self.assertIs(result, None) def test_list_all(self): \"Can we get several rows from", "setUp(self): super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def test_create_statement(self): \"Can we get a proper create table", "when we have multiple matches?\" self._makerows(names=[\"foo\", \"foo\", \"bar\"]) with self.assertRaises(ValueError): ModelTests.MyTable.search(self.db, name=\"foo\") def", "search for NULL columns?\" self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.search(self.db, name=None) self.assertEqual(results.name, None)", "self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def test_create_statement(self): \"Can we get a proper create table clause?\" result =", "test_search_not_found(self): \"Can we search and not find something?\" results = ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\") self.assertIs(results,", "from parentopticon.db.model import ColumnInteger, ColumnText, Model class ModelTests(test_utilities.DBTestCase): \"Test all of our logic", "ModelTests.MyTable.list(self.db, name=None) self.assertEqual({result.count for result in results}, {4}) def test_search_not_found(self): \"Can we search", "get a list where an item is NULL?\" rowids = self._makerows(names=[\"foo\", None, \"bar\"])", "insert a row into a table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") found =", "result in results}, {4}) def test_search_not_found(self): \"Can we search and not find something?\"", "= self._makerows() results = ModelTests.MyTable.list_where(self.db, where=\"count >= 4\") self.assertEqual({result.count for result in results},", "name=\"foobar\") found = self.db.execute(\"SELECT count, name FROM MyTable\").fetchall() self.assertEqual(len(found), 1) def test_get(self): \"Can", "\"Do we properly search for NULL columns?\" self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.search(self.db,", "ColumnText(null=True), } def _makerows(self, names: Optional[List[str]] = None): \"Make a few rows. Useful", "\"CREATE TABLE IF NOT EXISTS MyTable (\", \"count INTEGER,\", \"id INTEGER PRIMARY KEY", "{4, 6}) def test_list_with_none(self): \"Can we get a list where an item is", "self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.search(self.db, name=None) self.assertEqual(results.name, None) self.assertEqual(results.count, 4) def test_update(self):", "self.assertIs(result, None) def test_list_all(self): \"Can we get several rows from the table?\" rowids", "ColumnText, Model class ModelTests(test_utilities.DBTestCase): \"Test all of our logic around the model class.\"", "None) self.assertEqual(results.count, 4) def test_update(self): \"Can we update a row with update()?\" rows", "def test_create_statement(self): \"Can we get a proper create table clause?\" result = ModelTests.MyTable.create_statement()", "result = ModelTests.MyTable.get(self.db, rowid) self.assertEqual(result.id, rowid) self.assertEqual(result.count, 3) self.assertEqual(result.name, \"foobar\") def test_get_none(self): \"Can", "tests.\" names = names or [\"foo\", \"bar\", \"baz\"] return { ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name)", "results = ModelTests.MyTable.list_where(self.db, where=\"count >= 4\") self.assertEqual({result.count for result in results}, {4, 6})", "ModelTests.MyTable.update(self.db, row_id, name=\"biff\") results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.name, \"biff\") def test_update_multiple(self): \"Can we", "row with multiple values?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\",", "ColumnInteger(), \"name\": ColumnText(null=True), } def _makerows(self, names: Optional[List[str]] = None): \"Make a few", "the table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") result = ModelTests.MyTable.get(self.db, rowid) self.assertEqual(result.id, rowid)", "parentopticon.db.model import ColumnInteger, ColumnText, Model class ModelTests(test_utilities.DBTestCase): \"Test all of our logic around", "when the row does not exist?\" result = ModelTests.MyTable.get(self.db, -1) self.assertIs(result, None) def", "COLUMNS = { \"id\": ColumnInteger(autoincrement=True, primary_key=True), \"count\": ColumnInteger(), \"name\": ColumnText(null=True), } def _makerows(self,", "test_update_multiple(self): \"Can we update a row with multiple values?\" rows = self._makerows(names=[\"foo\"]) row_id", "table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") found = self.db.execute(\"SELECT count, name FROM MyTable\").fetchall()", "name=\"sir-not-appearing\") self.assertIs(results, None) def test_search_one(self): \"Can we search and find a single row?\"", "Optional import unittest from parentopticon.db import test_utilities from parentopticon.db.model import ColumnInteger, ColumnText, Model", "\"Can we get a list where an item is NULL?\" rowids = self._makerows(names=[\"foo\",", "where clause?\" rowids = self._makerows() results = ModelTests.MyTable.list_where(self.db, where=\"count >= 4\") self.assertEqual({result.count for", "many tests.\" names = names or [\"foo\", \"bar\", \"baz\"] return { ModelTests.MyTable.insert(self.db, count=(i+1)*2,", "a list where an item is NULL?\" rowids = self._makerows(names=[\"foo\", None, \"bar\"]) results", "multiple matches?\" self._makerows(names=[\"foo\", \"foo\", \"bar\"]) with self.assertRaises(ValueError): ModelTests.MyTable.search(self.db, name=\"foo\") def test_search_with_none(self): \"Do we", "count, name FROM MyTable\").fetchall() self.assertEqual(len(found), 1) def test_get(self): \"Can we get a row", "rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") result = ModelTests.MyTable.get(self.db, rowid) self.assertEqual(result.id, rowid) self.assertEqual(result.count, 3)", "IF NOT EXISTS MyTable (\", \"count INTEGER,\", \"id INTEGER PRIMARY KEY AUTOINCREMENT,\", \"name", "test_search_one(self): \"Can we search and find a single row?\" rowids = self._makerows() results", "something?\" results = ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\") self.assertIs(results, None) def test_search_one(self): \"Can we search and", "for result in results}, {4, 6}) def test_list_with_none(self): \"Can we get a list", "we get a proper create table clause?\" result = ModelTests.MyTable.create_statement() expected = \"\\n\".join((", "rowids = self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.list(self.db, name=None) self.assertEqual({result.count for result in", "from typing import List, Optional import unittest from parentopticon.db import test_utilities from parentopticon.db.model", "ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") result = ModelTests.MyTable.get(self.db, rowid) self.assertEqual(result.id, rowid) self.assertEqual(result.count, 3) self.assertEqual(result.name, \"foobar\")", "get several rows from the table with a where clause?\" rowids = self._makerows()", "= ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\") self.assertIs(results, None) def test_search_one(self): \"Can we search and find a", "logic around the model class.\" class MyTable(Model): COLUMNS = { \"id\": ColumnInteger(autoincrement=True, primary_key=True),", "\"id\": ColumnInteger(autoincrement=True, primary_key=True), \"count\": ColumnInteger(), \"name\": ColumnText(null=True), } def _makerows(self, names: Optional[List[str]] =", "rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") found = self.db.execute(\"SELECT count, name FROM MyTable\").fetchall() self.assertEqual(len(found),", "get several rows from the table?\" rowids = self._makerows() results = ModelTests.MyTable.list(self.db) self.assertEqual({result.id", "ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") found = self.db.execute(\"SELECT count, name FROM MyTable\").fetchall() self.assertEqual(len(found), 1) def", "rows from the table?\" rowids = self._makerows() results = ModelTests.MyTable.list(self.db) self.assertEqual({result.id for result", "single row?\" rowids = self._makerows() results = ModelTests.MyTable.search(self.db, name=\"foo\") self.assertEqual(results.name, \"foo\") self.assertEqual(results.count, 2)", "\"Can we get a proper create table clause?\" result = ModelTests.MyTable.create_statement() expected =", "= None): \"Make a few rows. Useful for many tests.\" names = names", "\"bar\", \"baz\"] return { ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name) for i, name in enumerate(names) }", "a row into a table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") found = self.db.execute(\"SELECT", "test_list_some(self): \"Can we get several rows from the table with a where clause?\"", "\"Test all of our logic around the model class.\" class MyTable(Model): COLUMNS =", "several rows from the table with a where clause?\" rowids = self._makerows() results", "(\", \"count INTEGER,\", \"id INTEGER PRIMARY KEY AUTOINCREMENT,\", \"name TEXT\", \");\", )) self.assertEqual(result,", "def test_search_not_found(self): \"Can we search and not find something?\" results = ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\")", "def test_list_some(self): \"Can we get several rows from the table with a where", "Optional[List[str]] = None): \"Make a few rows. Useful for many tests.\" names =", "Model class ModelTests(test_utilities.DBTestCase): \"Test all of our logic around the model class.\" class", "we get several rows from the table?\" rowids = self._makerows() results = ModelTests.MyTable.list(self.db)", "List, Optional import unittest from parentopticon.db import test_utilities from parentopticon.db.model import ColumnInteger, ColumnText,", "row_id) self.assertEqual(results.name, \"biff\") def test_update_multiple(self): \"Can we update a row with multiple values?\"", "name=\"foobar\") result = ModelTests.MyTable.get(self.db, rowid) self.assertEqual(result.id, rowid) self.assertEqual(result.count, 3) self.assertEqual(result.name, \"foobar\") def test_get_none(self):", "def test_list_all(self): \"Can we get several rows from the table?\" rowids = self._makerows()", "self.assertIs(results, None) def test_search_one(self): \"Can we search and find a single row?\" rowids", "\"Can we get None when the row does not exist?\" result = ModelTests.MyTable.get(self.db,", "rowid) self.assertEqual(result.id, rowid) self.assertEqual(result.count, 3) self.assertEqual(result.name, \"foobar\") def test_get_none(self): \"Can we get None", "= \"\\n\".join(( \"CREATE TABLE IF NOT EXISTS MyTable (\", \"count INTEGER,\", \"id INTEGER", "update a row with multiple values?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db,", "import os from typing import List, Optional import unittest from parentopticon.db import test_utilities", "self.assertEqual(results.name, \"foo\") self.assertEqual(results.count, 2) def test_search_many(self): \"Do we error when we have multiple", "not find something?\" results = ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\") self.assertIs(results, None) def test_search_one(self): \"Can we", "test_get(self): \"Can we get a row from the table?\" rowid = ModelTests.MyTable.insert(self.db, count=3,", "class ModelTests(test_utilities.DBTestCase): \"Test all of our logic around the model class.\" class MyTable(Model):", "result = ModelTests.MyTable.create_statement() expected = \"\\n\".join(( \"CREATE TABLE IF NOT EXISTS MyTable (\",", "class MyTable(Model): COLUMNS = { \"id\": ColumnInteger(autoincrement=True, primary_key=True), \"count\": ColumnInteger(), \"name\": ColumnText(null=True), }", "-1) self.assertIs(result, None) def test_list_all(self): \"Can we get several rows from the table?\"", "a proper create table clause?\" result = ModelTests.MyTable.create_statement() expected = \"\\n\".join(( \"CREATE TABLE", "a where clause?\" rowids = self._makerows() results = ModelTests.MyTable.list_where(self.db, where=\"count >= 4\") self.assertEqual({result.count", "ModelTests.MyTable.search(self.db, name=None) self.assertEqual(results.name, None) self.assertEqual(results.count, 4) def test_update(self): \"Can we update a row", "result = ModelTests.MyTable.get(self.db, -1) self.assertIs(result, None) def test_list_all(self): \"Can we get several rows", "datetime import os from typing import List, Optional import unittest from parentopticon.db import", "def test_get_none(self): \"Can we get None when the row does not exist?\" result", "PRIMARY KEY AUTOINCREMENT,\", \"name TEXT\", \");\", )) self.assertEqual(result, expected) def test_insert(self): \"Can we", "None) def test_list_all(self): \"Can we get several rows from the table?\" rowids =", "for i, name in enumerate(names) } def setUp(self): super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def test_create_statement(self):", "update a row with update()?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id,", "row_id, name=\"biff\") results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.name, \"biff\") def test_update_multiple(self): \"Can we update", "count=3, name=\"foobar\") found = self.db.execute(\"SELECT count, name FROM MyTable\").fetchall() self.assertEqual(len(found), 1) def test_get(self):", "self.assertEqual(result.count, 3) self.assertEqual(result.name, \"foobar\") def test_get_none(self): \"Can we get None when the row", "} def _makerows(self, names: Optional[List[str]] = None): \"Make a few rows. Useful for", "name=name) for i, name in enumerate(names) } def setUp(self): super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def", "found = self.db.execute(\"SELECT count, name FROM MyTable\").fetchall() self.assertEqual(len(found), 1) def test_get(self): \"Can we", "\"\\n\".join(( \"CREATE TABLE IF NOT EXISTS MyTable (\", \"count INTEGER,\", \"id INTEGER PRIMARY", "where an item is NULL?\" rowids = self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.list(self.db,", "= ModelTests.MyTable.get(self.db, rowid) self.assertEqual(result.id, rowid) self.assertEqual(result.count, 3) self.assertEqual(result.name, \"foobar\") def test_get_none(self): \"Can we", "model class.\" class MyTable(Model): COLUMNS = { \"id\": ColumnInteger(autoincrement=True, primary_key=True), \"count\": ColumnInteger(), \"name\":", "class.\" class MyTable(Model): COLUMNS = { \"id\": ColumnInteger(autoincrement=True, primary_key=True), \"count\": ColumnInteger(), \"name\": ColumnText(null=True),", "self.db.execute(\"SELECT count, name FROM MyTable\").fetchall() self.assertEqual(len(found), 1) def test_get(self): \"Can we get a", "name=\"foo\") def test_search_with_none(self): \"Do we properly search for NULL columns?\" self._makerows(names=[\"foo\", None, \"bar\"])", "we get several rows from the table with a where clause?\" rowids =", "few rows. Useful for many tests.\" names = names or [\"foo\", \"bar\", \"baz\"]", "ModelTests.MyTable.list(self.db) self.assertEqual({result.id for result in results}, rowids) def test_list_some(self): \"Can we get several", "self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.list(self.db, name=None) self.assertEqual({result.count for result in results}, {4})", "test_list_with_none(self): \"Can we get a list where an item is NULL?\" rowids =", "or [\"foo\", \"bar\", \"baz\"] return { ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name) for i, name in", "for result in results}, rowids) def test_list_some(self): \"Can we get several rows from", "results = ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\") self.assertIs(results, None) def test_search_one(self): \"Can we search and find", "a row from the table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") result = ModelTests.MyTable.get(self.db,", "test_update(self): \"Can we update a row with update()?\" rows = self._makerows(names=[\"foo\"]) row_id =", "row does not exist?\" result = ModelTests.MyTable.get(self.db, -1) self.assertIs(result, None) def test_list_all(self): \"Can", "test_create_statement(self): \"Can we get a proper create table clause?\" result = ModelTests.MyTable.create_statement() expected", "= list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\") results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.name, \"biff\") def test_update_multiple(self):", "MyTable (\", \"count INTEGER,\", \"id INTEGER PRIMARY KEY AUTOINCREMENT,\", \"name TEXT\", \");\", ))", "rowids = self._makerows() results = ModelTests.MyTable.search(self.db, name=\"foo\") self.assertEqual(results.name, \"foo\") self.assertEqual(results.count, 2) def test_search_many(self):", "self.assertEqual(result, expected) def test_insert(self): \"Can we insert a row into a table?\" rowid", "self.assertEqual(result.name, \"foobar\") def test_get_none(self): \"Can we get None when the row does not", "we get a list where an item is NULL?\" rowids = self._makerows(names=[\"foo\", None,", "def test_insert(self): \"Can we insert a row into a table?\" rowid = ModelTests.MyTable.insert(self.db,", "row?\" rowids = self._makerows() results = ModelTests.MyTable.search(self.db, name=\"foo\") self.assertEqual(results.name, \"foo\") self.assertEqual(results.count, 2) def", "results = ModelTests.MyTable.list(self.db) self.assertEqual({result.id for result in results}, rowids) def test_list_some(self): \"Can we", "row into a table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") found = self.db.execute(\"SELECT count,", "{ \"id\": ColumnInteger(autoincrement=True, primary_key=True), \"count\": ColumnInteger(), \"name\": ColumnText(null=True), } def _makerows(self, names: Optional[List[str]]", "for result in results}, {4}) def test_search_not_found(self): \"Can we search and not find", "where=\"count >= 4\") self.assertEqual({result.count for result in results}, {4, 6}) def test_list_with_none(self): \"Can", "ModelTests.MyTable.search(self.db, name=\"foo\") self.assertEqual(results.name, \"foo\") self.assertEqual(results.count, 2) def test_search_many(self): \"Do we error when we", "def setUp(self): super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def test_create_statement(self): \"Can we get a proper create", "None when the row does not exist?\" result = ModelTests.MyTable.get(self.db, -1) self.assertIs(result, None)", "= self._makerows() results = ModelTests.MyTable.search(self.db, name=\"foo\") self.assertEqual(results.name, \"foo\") self.assertEqual(results.count, 2) def test_search_many(self): \"Do", "\"Can we insert a row into a table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\")", "INTEGER,\", \"id INTEGER PRIMARY KEY AUTOINCREMENT,\", \"name TEXT\", \");\", )) self.assertEqual(result, expected) def", "\"baz\"] return { ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name) for i, name in enumerate(names) } def", "= names or [\"foo\", \"bar\", \"baz\"] return { ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name) for i,", "in results}, {4, 6}) def test_list_with_none(self): \"Can we get a list where an", "find a single row?\" rowids = self._makerows() results = ModelTests.MyTable.search(self.db, name=\"foo\") self.assertEqual(results.name, \"foo\")", "ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.name, \"biff\") def test_update_multiple(self): \"Can we update a row with multiple", "\"Can we get a row from the table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\")", "self.assertEqual({result.count for result in results}, {4, 6}) def test_list_with_none(self): \"Can we get a", "{4}) def test_search_not_found(self): \"Can we search and not find something?\" results = ModelTests.MyTable.search(self.db,", "self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\", count=100) results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.count,", "self.assertEqual({result.id for result in results}, rowids) def test_list_some(self): \"Can we get several rows", "\"Can we get several rows from the table?\" rowids = self._makerows() results =", "self.assertEqual(len(found), 1) def test_get(self): \"Can we get a row from the table?\" rowid", "find something?\" results = ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\") self.assertIs(results, None) def test_search_one(self): \"Can we search", "test_search_many(self): \"Do we error when we have multiple matches?\" self._makerows(names=[\"foo\", \"foo\", \"bar\"]) with", "count=(i+1)*2, name=name) for i, name in enumerate(names) } def setUp(self): super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement())", "NOT EXISTS MyTable (\", \"count INTEGER,\", \"id INTEGER PRIMARY KEY AUTOINCREMENT,\", \"name TEXT\",", "search and not find something?\" results = ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\") self.assertIs(results, None) def test_search_one(self):", "rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\") results = ModelTests.MyTable.get(self.db, row_id)", "table?\" rowids = self._makerows() results = ModelTests.MyTable.list(self.db) self.assertEqual({result.id for result in results}, rowids)", "\"Make a few rows. Useful for many tests.\" names = names or [\"foo\",", "typing import List, Optional import unittest from parentopticon.db import test_utilities from parentopticon.db.model import", "\"name TEXT\", \");\", )) self.assertEqual(result, expected) def test_insert(self): \"Can we insert a row", "row with update()?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\") results", "def test_search_with_none(self): \"Do we properly search for NULL columns?\" self._makerows(names=[\"foo\", None, \"bar\"]) results", "the table with a where clause?\" rowids = self._makerows() results = ModelTests.MyTable.list_where(self.db, where=\"count", "= list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\", count=100) results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.count, 100) self.assertEqual(results.name,", "3) self.assertEqual(result.name, \"foobar\") def test_get_none(self): \"Can we get None when the row does", "rowids) def test_list_some(self): \"Can we get several rows from the table with a", "= self.db.execute(\"SELECT count, name FROM MyTable\").fetchall() self.assertEqual(len(found), 1) def test_get(self): \"Can we get", "get a row from the table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") result =", "self.assertEqual(results.name, \"biff\") def test_update_multiple(self): \"Can we update a row with multiple values?\" rows", "update()?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\") results = ModelTests.MyTable.get(self.db,", "FROM MyTable\").fetchall() self.assertEqual(len(found), 1) def test_get(self): \"Can we get a row from the", "test_list_all(self): \"Can we get several rows from the table?\" rowids = self._makerows() results", "results = ModelTests.MyTable.list(self.db, name=None) self.assertEqual({result.count for result in results}, {4}) def test_search_not_found(self): \"Can", "self._makerows() results = ModelTests.MyTable.list(self.db) self.assertEqual({result.id for result in results}, rowids) def test_list_some(self): \"Can", "ModelTests.MyTable.list_where(self.db, where=\"count >= 4\") self.assertEqual({result.count for result in results}, {4, 6}) def test_list_with_none(self):", "count=3, name=\"foobar\") result = ModelTests.MyTable.get(self.db, rowid) self.assertEqual(result.id, rowid) self.assertEqual(result.count, 3) self.assertEqual(result.name, \"foobar\") def", "\"id INTEGER PRIMARY KEY AUTOINCREMENT,\", \"name TEXT\", \");\", )) self.assertEqual(result, expected) def test_insert(self):", "expected) def test_insert(self): \"Can we insert a row into a table?\" rowid =", "we search and find a single row?\" rowids = self._makerows() results = ModelTests.MyTable.search(self.db,", "ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\") self.assertIs(results, None) def test_search_one(self): \"Can we search and find a single", "ColumnInteger, ColumnText, Model class ModelTests(test_utilities.DBTestCase): \"Test all of our logic around the model", "table clause?\" result = ModelTests.MyTable.create_statement() expected = \"\\n\".join(( \"CREATE TABLE IF NOT EXISTS", "\"biff\") def test_update_multiple(self): \"Can we update a row with multiple values?\" rows =", "self.assertEqual(result.id, rowid) self.assertEqual(result.count, 3) self.assertEqual(result.name, \"foobar\") def test_get_none(self): \"Can we get None when", "test_utilities from parentopticon.db.model import ColumnInteger, ColumnText, Model class ModelTests(test_utilities.DBTestCase): \"Test all of our", "in results}, {4}) def test_search_not_found(self): \"Can we search and not find something?\" results", "our logic around the model class.\" class MyTable(Model): COLUMNS = { \"id\": ColumnInteger(autoincrement=True,", "\"Can we update a row with multiple values?\" rows = self._makerows(names=[\"foo\"]) row_id =", "\"bar\"]) results = ModelTests.MyTable.list(self.db, name=None) self.assertEqual({result.count for result in results}, {4}) def test_search_not_found(self):", "ModelTests.MyTable.get(self.db, rowid) self.assertEqual(result.id, rowid) self.assertEqual(result.count, 3) self.assertEqual(result.name, \"foobar\") def test_get_none(self): \"Can we get", "MyTable\").fetchall() self.assertEqual(len(found), 1) def test_get(self): \"Can we get a row from the table?\"", "a single row?\" rowids = self._makerows() results = ModelTests.MyTable.search(self.db, name=\"foo\") self.assertEqual(results.name, \"foo\") self.assertEqual(results.count,", "EXISTS MyTable (\", \"count INTEGER,\", \"id INTEGER PRIMARY KEY AUTOINCREMENT,\", \"name TEXT\", \");\",", "exist?\" result = ModelTests.MyTable.get(self.db, -1) self.assertIs(result, None) def test_list_all(self): \"Can we get several", "results = ModelTests.MyTable.search(self.db, name=None) self.assertEqual(results.name, None) self.assertEqual(results.count, 4) def test_update(self): \"Can we update", "\"count\": ColumnInteger(), \"name\": ColumnText(null=True), } def _makerows(self, names: Optional[List[str]] = None): \"Make a", "parentopticon.db import test_utilities from parentopticon.db.model import ColumnInteger, ColumnText, Model class ModelTests(test_utilities.DBTestCase): \"Test all", "[\"foo\", \"bar\", \"baz\"] return { ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name) for i, name in enumerate(names)", "we insert a row into a table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") found", "i, name in enumerate(names) } def setUp(self): super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def test_create_statement(self): \"Can", "import unittest from parentopticon.db import test_utilities from parentopticon.db.model import ColumnInteger, ColumnText, Model class", "= ModelTests.MyTable.search(self.db, name=None) self.assertEqual(results.name, None) self.assertEqual(results.count, 4) def test_update(self): \"Can we update a", "ModelTests.MyTable.search(self.db, name=\"foo\") def test_search_with_none(self): \"Do we properly search for NULL columns?\" self._makerows(names=[\"foo\", None,", "rowids = self._makerows() results = ModelTests.MyTable.list_where(self.db, where=\"count >= 4\") self.assertEqual({result.count for result in", "4\") self.assertEqual({result.count for result in results}, {4, 6}) def test_list_with_none(self): \"Can we get", "= ModelTests.MyTable.list_where(self.db, where=\"count >= 4\") self.assertEqual({result.count for result in results}, {4, 6}) def", "rows. Useful for many tests.\" names = names or [\"foo\", \"bar\", \"baz\"] return", "in enumerate(names) } def setUp(self): super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def test_create_statement(self): \"Can we get", "a row with update()?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\")", "for NULL columns?\" self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.search(self.db, name=None) self.assertEqual(results.name, None) self.assertEqual(results.count,", "NULL columns?\" self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.search(self.db, name=None) self.assertEqual(results.name, None) self.assertEqual(results.count, 4)", "import test_utilities from parentopticon.db.model import ColumnInteger, ColumnText, Model class ModelTests(test_utilities.DBTestCase): \"Test all of", "= ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") result = ModelTests.MyTable.get(self.db, rowid) self.assertEqual(result.id, rowid) self.assertEqual(result.count, 3) self.assertEqual(result.name,", "self._makerows() results = ModelTests.MyTable.list_where(self.db, where=\"count >= 4\") self.assertEqual({result.count for result in results}, {4,", "\"bar\"]) results = ModelTests.MyTable.search(self.db, name=None) self.assertEqual(results.name, None) self.assertEqual(results.count, 4) def test_update(self): \"Can we", "get None when the row does not exist?\" result = ModelTests.MyTable.get(self.db, -1) self.assertIs(result,", "import ColumnInteger, ColumnText, Model class ModelTests(test_utilities.DBTestCase): \"Test all of our logic around the", "ModelTests.MyTable.get(self.db, -1) self.assertIs(result, None) def test_list_all(self): \"Can we get several rows from the", "= ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") found = self.db.execute(\"SELECT count, name FROM MyTable\").fetchall() self.assertEqual(len(found), 1)", "name=None) self.assertEqual({result.count for result in results}, {4}) def test_search_not_found(self): \"Can we search and", "we get a row from the table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") result", "from the table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") result = ModelTests.MyTable.get(self.db, rowid) self.assertEqual(result.id,", "row from the table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") result = ModelTests.MyTable.get(self.db, rowid)", "test_insert(self): \"Can we insert a row into a table?\" rowid = ModelTests.MyTable.insert(self.db, count=3,", "self.assertEqual(results.count, 2) def test_search_many(self): \"Do we error when we have multiple matches?\" self._makerows(names=[\"foo\",", "super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def test_create_statement(self): \"Can we get a proper create table clause?\"", "test_search_with_none(self): \"Do we properly search for NULL columns?\" self._makerows(names=[\"foo\", None, \"bar\"]) results =", "rows from the table with a where clause?\" rowids = self._makerows() results =", "} def setUp(self): super().setUp() self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def test_create_statement(self): \"Can we get a proper", "ModelTests(test_utilities.DBTestCase): \"Test all of our logic around the model class.\" class MyTable(Model): COLUMNS", "for many tests.\" names = names or [\"foo\", \"bar\", \"baz\"] return { ModelTests.MyTable.insert(self.db,", "list where an item is NULL?\" rowids = self._makerows(names=[\"foo\", None, \"bar\"]) results =", "names: Optional[List[str]] = None): \"Make a few rows. Useful for many tests.\" names", "values?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\", count=100) results =", "\"bar\"]) with self.assertRaises(ValueError): ModelTests.MyTable.search(self.db, name=\"foo\") def test_search_with_none(self): \"Do we properly search for NULL", "we search and not find something?\" results = ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\") self.assertIs(results, None) def", "None, \"bar\"]) results = ModelTests.MyTable.list(self.db, name=None) self.assertEqual({result.count for result in results}, {4}) def", "self.db.execute_commit_return(ModelTests.MyTable.create_statement()) self.db.execute_commit_return(ModelTests.MyTable.truncate_statement()) def test_create_statement(self): \"Can we get a proper create table clause?\" result", "\"Can we get several rows from the table with a where clause?\" rowids", "self._makerows() results = ModelTests.MyTable.search(self.db, name=\"foo\") self.assertEqual(results.name, \"foo\") self.assertEqual(results.count, 2) def test_search_many(self): \"Do we", "unittest from parentopticon.db import test_utilities from parentopticon.db.model import ColumnInteger, ColumnText, Model class ModelTests(test_utilities.DBTestCase):", "test_get_none(self): \"Can we get None when the row does not exist?\" result =", "name=\"biff\") results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.name, \"biff\") def test_update_multiple(self): \"Can we update a", "ModelTests.MyTable.create_statement() expected = \"\\n\".join(( \"CREATE TABLE IF NOT EXISTS MyTable (\", \"count INTEGER,\",", "result in results}, rowids) def test_list_some(self): \"Can we get several rows from the", "names or [\"foo\", \"bar\", \"baz\"] return { ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name) for i, name", "name FROM MyTable\").fetchall() self.assertEqual(len(found), 1) def test_get(self): \"Can we get a row from", "proper create table clause?\" result = ModelTests.MyTable.create_statement() expected = \"\\n\".join(( \"CREATE TABLE IF", "clause?\" rowids = self._makerows() results = ModelTests.MyTable.list_where(self.db, where=\"count >= 4\") self.assertEqual({result.count for result", "self._makerows(names=[\"foo\", \"foo\", \"bar\"]) with self.assertRaises(ValueError): ModelTests.MyTable.search(self.db, name=\"foo\") def test_search_with_none(self): \"Do we properly search", "with multiple values?\" rows = self._makerows(names=[\"foo\"]) row_id = list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\", count=100)", "result in results}, {4, 6}) def test_list_with_none(self): \"Can we get a list where", "columns?\" self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.search(self.db, name=None) self.assertEqual(results.name, None) self.assertEqual(results.count, 4) def", "TABLE IF NOT EXISTS MyTable (\", \"count INTEGER,\", \"id INTEGER PRIMARY KEY AUTOINCREMENT,\",", "return { ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name) for i, name in enumerate(names) } def setUp(self):", "name=\"foo\") self.assertEqual(results.name, \"foo\") self.assertEqual(results.count, 2) def test_search_many(self): \"Do we error when we have", "of our logic around the model class.\" class MyTable(Model): COLUMNS = { \"id\":", "= { \"id\": ColumnInteger(autoincrement=True, primary_key=True), \"count\": ColumnInteger(), \"name\": ColumnText(null=True), } def _makerows(self, names:", "def _makerows(self, names: Optional[List[str]] = None): \"Make a few rows. Useful for many", "get a proper create table clause?\" result = ModelTests.MyTable.create_statement() expected = \"\\n\".join(( \"CREATE", "None) def test_search_one(self): \"Can we search and find a single row?\" rowids =", "\"Can we search and not find something?\" results = ModelTests.MyTable.search(self.db, name=\"sir-not-appearing\") self.assertIs(results, None)", "is NULL?\" rowids = self._makerows(names=[\"foo\", None, \"bar\"]) results = ModelTests.MyTable.list(self.db, name=None) self.assertEqual({result.count for", "None): \"Make a few rows. Useful for many tests.\" names = names or", "results}, rowids) def test_list_some(self): \"Can we get several rows from the table with", "table?\" rowid = ModelTests.MyTable.insert(self.db, count=3, name=\"foobar\") result = ModelTests.MyTable.get(self.db, rowid) self.assertEqual(result.id, rowid) self.assertEqual(result.count,", "primary_key=True), \"count\": ColumnInteger(), \"name\": ColumnText(null=True), } def _makerows(self, names: Optional[List[str]] = None): \"Make", "we have multiple matches?\" self._makerows(names=[\"foo\", \"foo\", \"bar\"]) with self.assertRaises(ValueError): ModelTests.MyTable.search(self.db, name=\"foo\") def test_search_with_none(self):", "list(rows)[0] ModelTests.MyTable.update(self.db, row_id, name=\"biff\", count=100) results = ModelTests.MyTable.get(self.db, row_id) self.assertEqual(results.count, 100) self.assertEqual(results.name, \"biff\")", "= ModelTests.MyTable.create_statement() expected = \"\\n\".join(( \"CREATE TABLE IF NOT EXISTS MyTable (\", \"count", "TEXT\", \");\", )) self.assertEqual(result, expected) def test_insert(self): \"Can we insert a row into", "1) def test_get(self): \"Can we get a row from the table?\" rowid =", ">= 4\") self.assertEqual({result.count for result in results}, {4, 6}) def test_list_with_none(self): \"Can we", "def test_update(self): \"Can we update a row with update()?\" rows = self._makerows(names=[\"foo\"]) row_id", "matches?\" self._makerows(names=[\"foo\", \"foo\", \"bar\"]) with self.assertRaises(ValueError): ModelTests.MyTable.search(self.db, name=\"foo\") def test_search_with_none(self): \"Do we properly" ]
[ "and extract it into a data folder. Usefull for downloading small datasets. The", "import urllib.request from zipfile import ZipFile def download_zip(root_dir: str, url: str) -> None:", "it into a data folder. Usefull for downloading small datasets. The zip file", "os.path.exists(os.path.join(root_dir, dataset_name)): urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name)) # Create a ZipFile Object and load sample.zip in", "Download a zip from a url and extract it into a data folder.", "to extract all. :param url: The url from which to download the zip", "set the root dir to your dataset folder when you download a dataset.", "to your dataset folder when you download a dataset. :param root_dir: The root", "all. :param url: The url from which to download the zip archive. \"\"\"", "directory where to extract all. :param url: The url from which to download", ":param url: The url from which to download the zip archive. \"\"\" dataset_name", "and load sample.zip in it with ZipFile('{}.zip'.format(dataset_name), 'r') as zipObj: # Extract all", "file gets extracted in the root_folder. It is recommended to set the root", "dataset_name = url.split(\"/\")[-1] assert \".zip\" == dataset_name[-4:] dataset_name = dataset_name[:-4] if not os.path.exists(os.path.join(root_dir,", "download a dataset. :param root_dir: The root directory where to extract all. :param", "small datasets. The zip file gets extracted in the root_folder. It is recommended", "extract it into a data folder. Usefull for downloading small datasets. The zip", "\"{}.zip\".format(dataset_name)) # Create a ZipFile Object and load sample.zip in it with ZipFile('{}.zip'.format(dataset_name),", "dataset folder when you download a dataset. :param root_dir: The root directory where", ":param root_dir: The root directory where to extract all. :param url: The url", "The url from which to download the zip archive. \"\"\" dataset_name = url.split(\"/\")[-1]", "url: The url from which to download the zip archive. \"\"\" dataset_name =", "datasets. The zip file gets extracted in the root_folder. It is recommended to", "url from which to download the zip archive. \"\"\" dataset_name = url.split(\"/\")[-1] assert", "the root dir to your dataset folder when you download a dataset. :param", "None: \"\"\" Download a zip from a url and extract it into a", "'r') as zipObj: # Extract all the contents of zip file in current", "dataset_name)): urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name)) # Create a ZipFile Object and load sample.zip in it", "dataset_name[:-4] if not os.path.exists(os.path.join(root_dir, dataset_name)): urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name)) # Create a ZipFile Object and", "\".zip\" == dataset_name[-4:] dataset_name = dataset_name[:-4] if not os.path.exists(os.path.join(root_dir, dataset_name)): urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name)) #", "= url.split(\"/\")[-1] assert \".zip\" == dataset_name[-4:] dataset_name = dataset_name[:-4] if not os.path.exists(os.path.join(root_dir, dataset_name)):", "-> None: \"\"\" Download a zip from a url and extract it into", "<reponame>penguinmenac3/babilim<filename>babilim/data/specialized_readers/data_downloader.py import os import urllib.request from zipfile import ZipFile def download_zip(root_dir: str, url:", "from a url and extract it into a data folder. Usefull for downloading", "it with ZipFile('{}.zip'.format(dataset_name), 'r') as zipObj: # Extract all the contents of zip", "url and extract it into a data folder. Usefull for downloading small datasets.", "\"\"\" Download a zip from a url and extract it into a data", "downloading small datasets. The zip file gets extracted in the root_folder. It is", "zip file gets extracted in the root_folder. It is recommended to set the", "ZipFile Object and load sample.zip in it with ZipFile('{}.zip'.format(dataset_name), 'r') as zipObj: #", "url.split(\"/\")[-1] assert \".zip\" == dataset_name[-4:] dataset_name = dataset_name[:-4] if not os.path.exists(os.path.join(root_dir, dataset_name)): urllib.request.urlretrieve(url,", "where to extract all. :param url: The url from which to download the", "extracted in the root_folder. It is recommended to set the root dir to", "data folder. Usefull for downloading small datasets. The zip file gets extracted in", "all the contents of zip file in current directory zipObj.extractall(root_dir) os.remove(\"{}.zip\".format(dataset_name)) else: print(\"Using", "root dir to your dataset folder when you download a dataset. :param root_dir:", "in it with ZipFile('{}.zip'.format(dataset_name), 'r') as zipObj: # Extract all the contents of", "urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name)) # Create a ZipFile Object and load sample.zip in it with", "with ZipFile('{}.zip'.format(dataset_name), 'r') as zipObj: # Extract all the contents of zip file", "\"\"\" dataset_name = url.split(\"/\")[-1] assert \".zip\" == dataset_name[-4:] dataset_name = dataset_name[:-4] if not", "a data folder. Usefull for downloading small datasets. The zip file gets extracted", "== dataset_name[-4:] dataset_name = dataset_name[:-4] if not os.path.exists(os.path.join(root_dir, dataset_name)): urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name)) # Create", "not os.path.exists(os.path.join(root_dir, dataset_name)): urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name)) # Create a ZipFile Object and load sample.zip", "folder. Usefull for downloading small datasets. The zip file gets extracted in the", "the contents of zip file in current directory zipObj.extractall(root_dir) os.remove(\"{}.zip\".format(dataset_name)) else: print(\"Using buffered", "a dataset. :param root_dir: The root directory where to extract all. :param url:", "import ZipFile def download_zip(root_dir: str, url: str) -> None: \"\"\" Download a zip", "if not os.path.exists(os.path.join(root_dir, dataset_name)): urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name)) # Create a ZipFile Object and load", "load sample.zip in it with ZipFile('{}.zip'.format(dataset_name), 'r') as zipObj: # Extract all the", "root directory where to extract all. :param url: The url from which to", "for downloading small datasets. The zip file gets extracted in the root_folder. It", "the zip archive. \"\"\" dataset_name = url.split(\"/\")[-1] assert \".zip\" == dataset_name[-4:] dataset_name =", "download the zip archive. \"\"\" dataset_name = url.split(\"/\")[-1] assert \".zip\" == dataset_name[-4:] dataset_name", "as zipObj: # Extract all the contents of zip file in current directory", "str) -> None: \"\"\" Download a zip from a url and extract it", "to set the root dir to your dataset folder when you download a", "into a data folder. Usefull for downloading small datasets. The zip file gets", "The zip file gets extracted in the root_folder. It is recommended to set", "dir to your dataset folder when you download a dataset. :param root_dir: The", "recommended to set the root dir to your dataset folder when you download", "which to download the zip archive. \"\"\" dataset_name = url.split(\"/\")[-1] assert \".zip\" ==", "urllib.request from zipfile import ZipFile def download_zip(root_dir: str, url: str) -> None: \"\"\"", "os import urllib.request from zipfile import ZipFile def download_zip(root_dir: str, url: str) ->", "zipObj: # Extract all the contents of zip file in current directory zipObj.extractall(root_dir)", "root_dir: The root directory where to extract all. :param url: The url from", "the root_folder. It is recommended to set the root dir to your dataset", "from zipfile import ZipFile def download_zip(root_dir: str, url: str) -> None: \"\"\" Download", "when you download a dataset. :param root_dir: The root directory where to extract", "= dataset_name[:-4] if not os.path.exists(os.path.join(root_dir, dataset_name)): urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name)) # Create a ZipFile Object", "archive. \"\"\" dataset_name = url.split(\"/\")[-1] assert \".zip\" == dataset_name[-4:] dataset_name = dataset_name[:-4] if", "zip from a url and extract it into a data folder. Usefull for", "dataset. :param root_dir: The root directory where to extract all. :param url: The", "folder when you download a dataset. :param root_dir: The root directory where to", "url: str) -> None: \"\"\" Download a zip from a url and extract", "It is recommended to set the root dir to your dataset folder when", "a url and extract it into a data folder. Usefull for downloading small", "Create a ZipFile Object and load sample.zip in it with ZipFile('{}.zip'.format(dataset_name), 'r') as", "sample.zip in it with ZipFile('{}.zip'.format(dataset_name), 'r') as zipObj: # Extract all the contents", "download_zip(root_dir: str, url: str) -> None: \"\"\" Download a zip from a url", "dataset_name[-4:] dataset_name = dataset_name[:-4] if not os.path.exists(os.path.join(root_dir, dataset_name)): urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name)) # Create a", "ZipFile def download_zip(root_dir: str, url: str) -> None: \"\"\" Download a zip from", "a zip from a url and extract it into a data folder. Usefull", "Usefull for downloading small datasets. The zip file gets extracted in the root_folder.", "zipfile import ZipFile def download_zip(root_dir: str, url: str) -> None: \"\"\" Download a", "from which to download the zip archive. \"\"\" dataset_name = url.split(\"/\")[-1] assert \".zip\"", "assert \".zip\" == dataset_name[-4:] dataset_name = dataset_name[:-4] if not os.path.exists(os.path.join(root_dir, dataset_name)): urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name))", "you download a dataset. :param root_dir: The root directory where to extract all.", "in the root_folder. It is recommended to set the root dir to your", "is recommended to set the root dir to your dataset folder when you", "# Create a ZipFile Object and load sample.zip in it with ZipFile('{}.zip'.format(dataset_name), 'r')", "# Extract all the contents of zip file in current directory zipObj.extractall(root_dir) os.remove(\"{}.zip\".format(dataset_name))", "contents of zip file in current directory zipObj.extractall(root_dir) os.remove(\"{}.zip\".format(dataset_name)) else: print(\"Using buffered data.\")", "ZipFile('{}.zip'.format(dataset_name), 'r') as zipObj: # Extract all the contents of zip file in", "your dataset folder when you download a dataset. :param root_dir: The root directory", "dataset_name = dataset_name[:-4] if not os.path.exists(os.path.join(root_dir, dataset_name)): urllib.request.urlretrieve(url, \"{}.zip\".format(dataset_name)) # Create a ZipFile", "The root directory where to extract all. :param url: The url from which", "zip archive. \"\"\" dataset_name = url.split(\"/\")[-1] assert \".zip\" == dataset_name[-4:] dataset_name = dataset_name[:-4]", "str, url: str) -> None: \"\"\" Download a zip from a url and", "Extract all the contents of zip file in current directory zipObj.extractall(root_dir) os.remove(\"{}.zip\".format(dataset_name)) else:", "to download the zip archive. \"\"\" dataset_name = url.split(\"/\")[-1] assert \".zip\" == dataset_name[-4:]", "extract all. :param url: The url from which to download the zip archive.", "def download_zip(root_dir: str, url: str) -> None: \"\"\" Download a zip from a", "gets extracted in the root_folder. It is recommended to set the root dir", "import os import urllib.request from zipfile import ZipFile def download_zip(root_dir: str, url: str)", "root_folder. It is recommended to set the root dir to your dataset folder", "a ZipFile Object and load sample.zip in it with ZipFile('{}.zip'.format(dataset_name), 'r') as zipObj:", "Object and load sample.zip in it with ZipFile('{}.zip'.format(dataset_name), 'r') as zipObj: # Extract" ]
[ "only MD5 hash class :py:class:`Md5` \"\"\" from .hash import Hash from .md5 import", "class :py:class:`Md5` \"\"\" from .hash import Hash from .md5 import Md5 __all__ =", "contains only MD5 hash class :py:class:`Md5` \"\"\" from .hash import Hash from .md5", ":py:class:`Hash` from which all hash implementations inherits. For now contains only MD5 hash", "module A simple module for hashing. Contains base class :py:class:`Hash` from which all", "hashing. Contains base class :py:class:`Hash` from which all hash implementations inherits. For now", "all hash implementations inherits. For now contains only MD5 hash class :py:class:`Md5` \"\"\"", "hash class :py:class:`Md5` \"\"\" from .hash import Hash from .md5 import Md5 __all__", "\"\"\" from .hash import Hash from .md5 import Md5 __all__ = [\"Hash\", \"Md5\"]", ":py:class:`Md5` \"\"\" from .hash import Hash from .md5 import Md5 __all__ = [\"Hash\",", "from which all hash implementations inherits. For now contains only MD5 hash class", "base class :py:class:`Hash` from which all hash implementations inherits. For now contains only", "Contains base class :py:class:`Hash` from which all hash implementations inherits. For now contains", "which all hash implementations inherits. For now contains only MD5 hash class :py:class:`Md5`", "\"\"\"Hashing module A simple module for hashing. Contains base class :py:class:`Hash` from which", "for hashing. Contains base class :py:class:`Hash` from which all hash implementations inherits. For", "module for hashing. Contains base class :py:class:`Hash` from which all hash implementations inherits.", "MD5 hash class :py:class:`Md5` \"\"\" from .hash import Hash from .md5 import Md5", "now contains only MD5 hash class :py:class:`Md5` \"\"\" from .hash import Hash from", "inherits. For now contains only MD5 hash class :py:class:`Md5` \"\"\" from .hash import", "implementations inherits. For now contains only MD5 hash class :py:class:`Md5` \"\"\" from .hash", "hash implementations inherits. For now contains only MD5 hash class :py:class:`Md5` \"\"\" from", "For now contains only MD5 hash class :py:class:`Md5` \"\"\" from .hash import Hash", "A simple module for hashing. Contains base class :py:class:`Hash` from which all hash", "class :py:class:`Hash` from which all hash implementations inherits. For now contains only MD5", "simple module for hashing. Contains base class :py:class:`Hash` from which all hash implementations" ]
[ "numbers = tuple(map(int, input().split())) print(numpy.zeros(numbers, dtype = numpy.int), numpy.ones(numbers, dtype = numpy.int), sep='\\n')", "numpy numbers = tuple(map(int, input().split())) print(numpy.zeros(numbers, dtype = numpy.int), numpy.ones(numbers, dtype = numpy.int),", "import numpy numbers = tuple(map(int, input().split())) print(numpy.zeros(numbers, dtype = numpy.int), numpy.ones(numbers, dtype =", "<reponame>Code-With-Aagam/python-hackerrank import numpy numbers = tuple(map(int, input().split())) print(numpy.zeros(numbers, dtype = numpy.int), numpy.ones(numbers, dtype" ]
[ "{status}', u'Computer Name: {computer_name}', u'Terminal: {terminal}'] FORMAT_STRING_SHORT_PIECES = [u'User: {user}'] SOURCE_LONG = u'UTMPX", "of FormatterMediator). event_object: the event object (instance of EventObject). Returns: A tuple containing", "event_values.get(u'status_type', None) if status_type is not None: event_values[u'status'] = self._STATUS_TYPES.get( status_type, u'{0:d}'.format(status_type)) else:", "if status_type is not None: event_values[u'status'] = self._STATUS_TYPES.get( status_type, u'{0:d}'.format(status_type)) else: event_values[u'status'] =", "by the formatter. \"\"\" if self.DATA_TYPE != event_object.data_type: raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format(", "-*- coding: utf-8 -*- \"\"\"The UTMPX binary file event formatter.\"\"\" from plaso.formatters import", "= u'LOG' # 9, 10 and 11 are only for Darwin and IOS.", "DATA_TYPE = u'mac:utmpx:event' FORMAT_STRING_PIECES = [ u'User: {user}', u'Status: {status}', u'Computer Name: {computer_name}',", "binary file event formatter.\"\"\" from plaso.formatters import interface from plaso.formatters import manager from", "raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format( event_object.data_type)) event_values = event_object.GetValues() status_type = event_values.get(u'status_type', None)", "Returns: A tuple containing the formatted message string and short message string. Raises:", "u'SIGNATURE', 11: u'SHUTDOWN_TIME'} def GetMessages(self, unused_formatter_mediator, event_object): \"\"\"Determines the formatted message strings for", "Name: {computer_name}', u'Terminal: {terminal}'] FORMAT_STRING_SHORT_PIECES = [u'User: {user}'] SOURCE_LONG = u'UTMPX session' SOURCE_SHORT", "\"\"\"Determines the formatted message strings for an event object. Args: formatter_mediator: the formatter", "the event object (instance of EventObject). Returns: A tuple containing the formatted message", "1: u'RUN_LVL', 2: u'BOOT_TIME', 3: u'OLD_TIME', 4: u'NEW_TIME', 5: u'INIT_PROCESS', 6: u'LOGIN_PROCESS', 7:", "event_object.data_type: raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format( event_object.data_type)) event_values = event_object.GetValues() status_type = event_values.get(u'status_type',", "{user}', u'Status: {status}', u'Computer Name: {computer_name}', u'Terminal: {terminal}'] FORMAT_STRING_SHORT_PIECES = [u'User: {user}'] SOURCE_LONG", "2: u'BOOT_TIME', 3: u'OLD_TIME', 4: u'NEW_TIME', 5: u'INIT_PROCESS', 6: u'LOGIN_PROCESS', 7: u'USER_PROCESS', 8:", "event_object.GetValues() status_type = event_values.get(u'status_type', None) if status_type is not None: event_values[u'status'] = self._STATUS_TYPES.get(", "FORMAT_STRING_PIECES = [ u'User: {user}', u'Status: {status}', u'Computer Name: {computer_name}', u'Terminal: {terminal}'] FORMAT_STRING_SHORT_PIECES", "u'Computer Name: {computer_name}', u'Terminal: {terminal}'] FORMAT_STRING_SHORT_PIECES = [u'User: {user}'] SOURCE_LONG = u'UTMPX session'", "SOURCE_LONG = u'UTMPX session' SOURCE_SHORT = u'LOG' # 9, 10 and 11 are", "(instance of FormatterMediator). event_object: the event object (instance of EventObject). Returns: A tuple", "u'ACCOUNTING', 10: u'SIGNATURE', 11: u'SHUTDOWN_TIME'} def GetMessages(self, unused_formatter_mediator, event_object): \"\"\"Determines the formatted message", "Raises: WrongFormatter: if the event object cannot be formatted by the formatter. \"\"\"", "= event_values.get(u'status_type', None) if status_type is not None: event_values[u'status'] = self._STATUS_TYPES.get( status_type, u'{0:d}'.format(status_type))", "9: u'ACCOUNTING', 10: u'SIGNATURE', 11: u'SHUTDOWN_TIME'} def GetMessages(self, unused_formatter_mediator, event_object): \"\"\"Determines the formatted", "u'NEW_TIME', 5: u'INIT_PROCESS', 6: u'LOGIN_PROCESS', 7: u'USER_PROCESS', 8: u'DEAD_PROCESS', 9: u'ACCOUNTING', 10: u'SIGNATURE',", "coding: utf-8 -*- \"\"\"The UTMPX binary file event formatter.\"\"\" from plaso.formatters import interface", "3: u'OLD_TIME', 4: u'NEW_TIME', 5: u'INIT_PROCESS', 6: u'LOGIN_PROCESS', 7: u'USER_PROCESS', 8: u'DEAD_PROCESS', 9:", "import manager from plaso.lib import errors class UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter for an UTMPX session", "!= event_object.data_type: raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format( event_object.data_type)) event_values = event_object.GetValues() status_type =", "SOURCE_SHORT = u'LOG' # 9, 10 and 11 are only for Darwin and", "and 11 are only for Darwin and IOS. _STATUS_TYPES = { 0: u'EMPTY',", "manager from plaso.lib import errors class UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter for an UTMPX session event.\"\"\"", "A tuple containing the formatted message string and short message string. Raises: WrongFormatter:", "the formatted message string and short message string. Raises: WrongFormatter: if the event", "message string. Raises: WrongFormatter: if the event object cannot be formatted by the", "event formatter.\"\"\" from plaso.formatters import interface from plaso.formatters import manager from plaso.lib import", "= u'mac:utmpx:event' FORMAT_STRING_PIECES = [ u'User: {user}', u'Status: {status}', u'Computer Name: {computer_name}', u'Terminal:", "errors class UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter for an UTMPX session event.\"\"\" DATA_TYPE = u'mac:utmpx:event' FORMAT_STRING_PIECES", "{ 0: u'EMPTY', 1: u'RUN_LVL', 2: u'BOOT_TIME', 3: u'OLD_TIME', 4: u'NEW_TIME', 5: u'INIT_PROCESS',", "UTMPX binary file event formatter.\"\"\" from plaso.formatters import interface from plaso.formatters import manager", "for an event object. Args: formatter_mediator: the formatter mediator object (instance of FormatterMediator).", "event_object.data_type)) event_values = event_object.GetValues() status_type = event_values.get(u'status_type', None) if status_type is not None:", "is not None: event_values[u'status'] = self._STATUS_TYPES.get( status_type, u'{0:d}'.format(status_type)) else: event_values[u'status'] = u'N/A' return", "tuple containing the formatted message string and short message string. Raises: WrongFormatter: if", "{terminal}'] FORMAT_STRING_SHORT_PIECES = [u'User: {user}'] SOURCE_LONG = u'UTMPX session' SOURCE_SHORT = u'LOG' #", "u'LOGIN_PROCESS', 7: u'USER_PROCESS', 8: u'DEAD_PROCESS', 9: u'ACCOUNTING', 10: u'SIGNATURE', 11: u'SHUTDOWN_TIME'} def GetMessages(self,", "event object (instance of EventObject). Returns: A tuple containing the formatted message string", "WrongFormatter: if the event object cannot be formatted by the formatter. \"\"\" if", "formatter.\"\"\" from plaso.formatters import interface from plaso.formatters import manager from plaso.lib import errors", "plaso.lib import errors class UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter for an UTMPX session event.\"\"\" DATA_TYPE =", "event_values = event_object.GetValues() status_type = event_values.get(u'status_type', None) if status_type is not None: event_values[u'status']", "11 are only for Darwin and IOS. _STATUS_TYPES = { 0: u'EMPTY', 1:", "= [ u'User: {user}', u'Status: {status}', u'Computer Name: {computer_name}', u'Terminal: {terminal}'] FORMAT_STRING_SHORT_PIECES =", "an UTMPX session event.\"\"\" DATA_TYPE = u'mac:utmpx:event' FORMAT_STRING_PIECES = [ u'User: {user}', u'Status:", "9, 10 and 11 are only for Darwin and IOS. _STATUS_TYPES = {", "the formatted message strings for an event object. Args: formatter_mediator: the formatter mediator", "status_type = event_values.get(u'status_type', None) if status_type is not None: event_values[u'status'] = self._STATUS_TYPES.get( status_type,", "only for Darwin and IOS. _STATUS_TYPES = { 0: u'EMPTY', 1: u'RUN_LVL', 2:", "self.DATA_TYPE != event_object.data_type: raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format( event_object.data_type)) event_values = event_object.GetValues() status_type", "message string and short message string. Raises: WrongFormatter: if the event object cannot", "[ u'User: {user}', u'Status: {status}', u'Computer Name: {computer_name}', u'Terminal: {terminal}'] FORMAT_STRING_SHORT_PIECES = [u'User:", "containing the formatted message string and short message string. Raises: WrongFormatter: if the", "IOS. _STATUS_TYPES = { 0: u'EMPTY', 1: u'RUN_LVL', 2: u'BOOT_TIME', 3: u'OLD_TIME', 4:", "the formatter mediator object (instance of FormatterMediator). event_object: the event object (instance of", "u'mac:utmpx:event' FORMAT_STRING_PIECES = [ u'User: {user}', u'Status: {status}', u'Computer Name: {computer_name}', u'Terminal: {terminal}']", "strings for an event object. Args: formatter_mediator: the formatter mediator object (instance of", "session' SOURCE_SHORT = u'LOG' # 9, 10 and 11 are only for Darwin", "formatter mediator object (instance of FormatterMediator). event_object: the event object (instance of EventObject).", "object (instance of FormatterMediator). event_object: the event object (instance of EventObject). Returns: A", "10 and 11 are only for Darwin and IOS. _STATUS_TYPES = { 0:", "= [u'User: {user}'] SOURCE_LONG = u'UTMPX session' SOURCE_SHORT = u'LOG' # 9, 10", "event object cannot be formatted by the formatter. \"\"\" if self.DATA_TYPE != event_object.data_type:", "of EventObject). Returns: A tuple containing the formatted message string and short message", "Args: formatter_mediator: the formatter mediator object (instance of FormatterMediator). event_object: the event object", "object. Args: formatter_mediator: the formatter mediator object (instance of FormatterMediator). event_object: the event", "formatted message string and short message string. Raises: WrongFormatter: if the event object", "interface from plaso.formatters import manager from plaso.lib import errors class UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter for", "plaso.formatters import interface from plaso.formatters import manager from plaso.lib import errors class UtmpxSessionFormatter(interface.ConditionalEventFormatter):", "= u'UTMPX session' SOURCE_SHORT = u'LOG' # 9, 10 and 11 are only", "EventObject). Returns: A tuple containing the formatted message string and short message string.", "the event object cannot be formatted by the formatter. \"\"\" if self.DATA_TYPE !=", "5: u'INIT_PROCESS', 6: u'LOGIN_PROCESS', 7: u'USER_PROCESS', 8: u'DEAD_PROCESS', 9: u'ACCOUNTING', 10: u'SIGNATURE', 11:", "formatter_mediator: the formatter mediator object (instance of FormatterMediator). event_object: the event object (instance", "# 9, 10 and 11 are only for Darwin and IOS. _STATUS_TYPES =", "6: u'LOGIN_PROCESS', 7: u'USER_PROCESS', 8: u'DEAD_PROCESS', 9: u'ACCOUNTING', 10: u'SIGNATURE', 11: u'SHUTDOWN_TIME'} def", "and short message string. Raises: WrongFormatter: if the event object cannot be formatted", "from plaso.formatters import interface from plaso.formatters import manager from plaso.lib import errors class", "u'User: {user}', u'Status: {status}', u'Computer Name: {computer_name}', u'Terminal: {terminal}'] FORMAT_STRING_SHORT_PIECES = [u'User: {user}']", "-*- \"\"\"The UTMPX binary file event formatter.\"\"\" from plaso.formatters import interface from plaso.formatters", "import errors class UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter for an UTMPX session event.\"\"\" DATA_TYPE = u'mac:utmpx:event'", "are only for Darwin and IOS. _STATUS_TYPES = { 0: u'EMPTY', 1: u'RUN_LVL',", "for Darwin and IOS. _STATUS_TYPES = { 0: u'EMPTY', 1: u'RUN_LVL', 2: u'BOOT_TIME',", "formatter. \"\"\" if self.DATA_TYPE != event_object.data_type: raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format( event_object.data_type)) event_values", "\"\"\"The UTMPX binary file event formatter.\"\"\" from plaso.formatters import interface from plaso.formatters import", "0: u'EMPTY', 1: u'RUN_LVL', 2: u'BOOT_TIME', 3: u'OLD_TIME', 4: u'NEW_TIME', 5: u'INIT_PROCESS', 6:", "{0:s}.'.format( event_object.data_type)) event_values = event_object.GetValues() status_type = event_values.get(u'status_type', None) if status_type is not", "4: u'NEW_TIME', 5: u'INIT_PROCESS', 6: u'LOGIN_PROCESS', 7: u'USER_PROCESS', 8: u'DEAD_PROCESS', 9: u'ACCOUNTING', 10:", "\"\"\"Formatter for an UTMPX session event.\"\"\" DATA_TYPE = u'mac:utmpx:event' FORMAT_STRING_PIECES = [ u'User:", "event_object): \"\"\"Determines the formatted message strings for an event object. Args: formatter_mediator: the", "u'LOG' # 9, 10 and 11 are only for Darwin and IOS. _STATUS_TYPES", "string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.", "[u'User: {user}'] SOURCE_LONG = u'UTMPX session' SOURCE_SHORT = u'LOG' # 9, 10 and", "u'EMPTY', 1: u'RUN_LVL', 2: u'BOOT_TIME', 3: u'OLD_TIME', 4: u'NEW_TIME', 5: u'INIT_PROCESS', 6: u'LOGIN_PROCESS',", "formatted by the formatter. \"\"\" if self.DATA_TYPE != event_object.data_type: raise errors.WrongFormatter(u'Unsupported data type:", "UTMPX session event.\"\"\" DATA_TYPE = u'mac:utmpx:event' FORMAT_STRING_PIECES = [ u'User: {user}', u'Status: {status}',", "message strings for an event object. Args: formatter_mediator: the formatter mediator object (instance", "class UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter for an UTMPX session event.\"\"\" DATA_TYPE = u'mac:utmpx:event' FORMAT_STRING_PIECES =", "session event.\"\"\" DATA_TYPE = u'mac:utmpx:event' FORMAT_STRING_PIECES = [ u'User: {user}', u'Status: {status}', u'Computer", "utf-8 -*- \"\"\"The UTMPX binary file event formatter.\"\"\" from plaso.formatters import interface from", "and IOS. _STATUS_TYPES = { 0: u'EMPTY', 1: u'RUN_LVL', 2: u'BOOT_TIME', 3: u'OLD_TIME',", "u'INIT_PROCESS', 6: u'LOGIN_PROCESS', 7: u'USER_PROCESS', 8: u'DEAD_PROCESS', 9: u'ACCOUNTING', 10: u'SIGNATURE', 11: u'SHUTDOWN_TIME'}", "u'USER_PROCESS', 8: u'DEAD_PROCESS', 9: u'ACCOUNTING', 10: u'SIGNATURE', 11: u'SHUTDOWN_TIME'} def GetMessages(self, unused_formatter_mediator, event_object):", "mediator object (instance of FormatterMediator). event_object: the event object (instance of EventObject). Returns:", "string and short message string. Raises: WrongFormatter: if the event object cannot be", "status_type is not None: event_values[u'status'] = self._STATUS_TYPES.get( status_type, u'{0:d}'.format(status_type)) else: event_values[u'status'] = u'N/A'", "u'BOOT_TIME', 3: u'OLD_TIME', 4: u'NEW_TIME', 5: u'INIT_PROCESS', 6: u'LOGIN_PROCESS', 7: u'USER_PROCESS', 8: u'DEAD_PROCESS',", "unused_formatter_mediator, event_object): \"\"\"Determines the formatted message strings for an event object. Args: formatter_mediator:", "short message string. Raises: WrongFormatter: if the event object cannot be formatted by", "object (instance of EventObject). Returns: A tuple containing the formatted message string and", "None: event_values[u'status'] = self._STATUS_TYPES.get( status_type, u'{0:d}'.format(status_type)) else: event_values[u'status'] = u'N/A' return self._ConditionalFormatMessages(event_values) manager.FormattersManager.RegisterFormatter(UtmpxSessionFormatter)", "plaso.formatters import manager from plaso.lib import errors class UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter for an UTMPX", "FORMAT_STRING_SHORT_PIECES = [u'User: {user}'] SOURCE_LONG = u'UTMPX session' SOURCE_SHORT = u'LOG' # 9,", "Darwin and IOS. _STATUS_TYPES = { 0: u'EMPTY', 1: u'RUN_LVL', 2: u'BOOT_TIME', 3:", "be formatted by the formatter. \"\"\" if self.DATA_TYPE != event_object.data_type: raise errors.WrongFormatter(u'Unsupported data", "u'SHUTDOWN_TIME'} def GetMessages(self, unused_formatter_mediator, event_object): \"\"\"Determines the formatted message strings for an event", "formatted message strings for an event object. Args: formatter_mediator: the formatter mediator object", "11: u'SHUTDOWN_TIME'} def GetMessages(self, unused_formatter_mediator, event_object): \"\"\"Determines the formatted message strings for an", "the formatter. \"\"\" if self.DATA_TYPE != event_object.data_type: raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format( event_object.data_type))", "# -*- coding: utf-8 -*- \"\"\"The UTMPX binary file event formatter.\"\"\" from plaso.formatters", "= { 0: u'EMPTY', 1: u'RUN_LVL', 2: u'BOOT_TIME', 3: u'OLD_TIME', 4: u'NEW_TIME', 5:", "type: {0:s}.'.format( event_object.data_type)) event_values = event_object.GetValues() status_type = event_values.get(u'status_type', None) if status_type is", "{user}'] SOURCE_LONG = u'UTMPX session' SOURCE_SHORT = u'LOG' # 9, 10 and 11", "_STATUS_TYPES = { 0: u'EMPTY', 1: u'RUN_LVL', 2: u'BOOT_TIME', 3: u'OLD_TIME', 4: u'NEW_TIME',", "if the event object cannot be formatted by the formatter. \"\"\" if self.DATA_TYPE", "event object. Args: formatter_mediator: the formatter mediator object (instance of FormatterMediator). event_object: the", "(instance of EventObject). Returns: A tuple containing the formatted message string and short", "{computer_name}', u'Terminal: {terminal}'] FORMAT_STRING_SHORT_PIECES = [u'User: {user}'] SOURCE_LONG = u'UTMPX session' SOURCE_SHORT =", "7: u'USER_PROCESS', 8: u'DEAD_PROCESS', 9: u'ACCOUNTING', 10: u'SIGNATURE', 11: u'SHUTDOWN_TIME'} def GetMessages(self, unused_formatter_mediator,", "errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format( event_object.data_type)) event_values = event_object.GetValues() status_type = event_values.get(u'status_type', None) if", "cannot be formatted by the formatter. \"\"\" if self.DATA_TYPE != event_object.data_type: raise errors.WrongFormatter(u'Unsupported", "u'UTMPX session' SOURCE_SHORT = u'LOG' # 9, 10 and 11 are only for", "event.\"\"\" DATA_TYPE = u'mac:utmpx:event' FORMAT_STRING_PIECES = [ u'User: {user}', u'Status: {status}', u'Computer Name:", "8: u'DEAD_PROCESS', 9: u'ACCOUNTING', 10: u'SIGNATURE', 11: u'SHUTDOWN_TIME'} def GetMessages(self, unused_formatter_mediator, event_object): \"\"\"Determines", "None) if status_type is not None: event_values[u'status'] = self._STATUS_TYPES.get( status_type, u'{0:d}'.format(status_type)) else: event_values[u'status']", "import interface from plaso.formatters import manager from plaso.lib import errors class UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter", "for an UTMPX session event.\"\"\" DATA_TYPE = u'mac:utmpx:event' FORMAT_STRING_PIECES = [ u'User: {user}',", "def GetMessages(self, unused_formatter_mediator, event_object): \"\"\"Determines the formatted message strings for an event object.", "FormatterMediator). event_object: the event object (instance of EventObject). Returns: A tuple containing the", "from plaso.formatters import manager from plaso.lib import errors class UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter for an", "from plaso.lib import errors class UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter for an UTMPX session event.\"\"\" DATA_TYPE", "event_object: the event object (instance of EventObject). Returns: A tuple containing the formatted", "10: u'SIGNATURE', 11: u'SHUTDOWN_TIME'} def GetMessages(self, unused_formatter_mediator, event_object): \"\"\"Determines the formatted message strings", "object cannot be formatted by the formatter. \"\"\" if self.DATA_TYPE != event_object.data_type: raise", "u'Status: {status}', u'Computer Name: {computer_name}', u'Terminal: {terminal}'] FORMAT_STRING_SHORT_PIECES = [u'User: {user}'] SOURCE_LONG =", "data type: {0:s}.'.format( event_object.data_type)) event_values = event_object.GetValues() status_type = event_values.get(u'status_type', None) if status_type", "u'RUN_LVL', 2: u'BOOT_TIME', 3: u'OLD_TIME', 4: u'NEW_TIME', 5: u'INIT_PROCESS', 6: u'LOGIN_PROCESS', 7: u'USER_PROCESS',", "u'OLD_TIME', 4: u'NEW_TIME', 5: u'INIT_PROCESS', 6: u'LOGIN_PROCESS', 7: u'USER_PROCESS', 8: u'DEAD_PROCESS', 9: u'ACCOUNTING',", "\"\"\" if self.DATA_TYPE != event_object.data_type: raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format( event_object.data_type)) event_values =", "if self.DATA_TYPE != event_object.data_type: raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format( event_object.data_type)) event_values = event_object.GetValues()", "UtmpxSessionFormatter(interface.ConditionalEventFormatter): \"\"\"Formatter for an UTMPX session event.\"\"\" DATA_TYPE = u'mac:utmpx:event' FORMAT_STRING_PIECES = [", "u'Terminal: {terminal}'] FORMAT_STRING_SHORT_PIECES = [u'User: {user}'] SOURCE_LONG = u'UTMPX session' SOURCE_SHORT = u'LOG'", "= event_object.GetValues() status_type = event_values.get(u'status_type', None) if status_type is not None: event_values[u'status'] =", "not None: event_values[u'status'] = self._STATUS_TYPES.get( status_type, u'{0:d}'.format(status_type)) else: event_values[u'status'] = u'N/A' return self._ConditionalFormatMessages(event_values)", "an event object. Args: formatter_mediator: the formatter mediator object (instance of FormatterMediator). event_object:", "GetMessages(self, unused_formatter_mediator, event_object): \"\"\"Determines the formatted message strings for an event object. Args:", "file event formatter.\"\"\" from plaso.formatters import interface from plaso.formatters import manager from plaso.lib", "u'DEAD_PROCESS', 9: u'ACCOUNTING', 10: u'SIGNATURE', 11: u'SHUTDOWN_TIME'} def GetMessages(self, unused_formatter_mediator, event_object): \"\"\"Determines the" ]
[ "# License: MIT # from time import localtime as ltime def saveLog(content, tag=\"INFO\"):", "into log file like normal log ''' saveLog('In {0} error: {1}'.format(path, error), 'error')", "behaviors into log file # Author: @k1k9 # License: MIT # from time", "Config logDate = '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon, ltime().tm_year, ltime().tm_hour, ltime().tm_min, ltime().tm_sec) log = '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate,", "This part of script is responsible for saving script behaviors into log file", "{3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon, ltime().tm_year, ltime().tm_hour, ltime().tm_min, ltime().tm_sec) log = '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(), content) file =", "ltime().tm_year, ltime().tm_hour, ltime().tm_min, ltime().tm_sec) log = '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(), content) file = open('logs.txt', 'a+')", "= '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(), content) file = open('logs.txt', 'a+') file.write(log) def saveError(path, error): '''", "as ltime def saveLog(content, tag=\"INFO\"): ''' Save information (log) into log file '''", "<filename>log.py #!/usr/bin/env python3 # # Title: job-ofert-fetcher # Description: This part of script", "# Description: This part of script is responsible for saving script behaviors into", "script behaviors into log file # Author: @k1k9 # License: MIT # from", "file # Author: @k1k9 # License: MIT # from time import localtime as", "error): ''' Save error into log file like normal log ''' saveLog('In {0}", "'[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(), content) file = open('logs.txt', 'a+') file.write(log) def saveError(path, error): ''' Save", "ltime().tm_mon, ltime().tm_year, ltime().tm_hour, ltime().tm_min, ltime().tm_sec) log = '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(), content) file = open('logs.txt',", "tag.upper(), content) file = open('logs.txt', 'a+') file.write(log) def saveError(path, error): ''' Save error", "content) file = open('logs.txt', 'a+') file.write(log) def saveError(path, error): ''' Save error into", "open('logs.txt', 'a+') file.write(log) def saveError(path, error): ''' Save error into log file like", "# Title: job-ofert-fetcher # Description: This part of script is responsible for saving", "ltime().tm_hour, ltime().tm_min, ltime().tm_sec) log = '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(), content) file = open('logs.txt', 'a+') file.write(log)", "ltime().tm_min, ltime().tm_sec) log = '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(), content) file = open('logs.txt', 'a+') file.write(log) def", "information (log) into log file ''' # Config logDate = '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon,", "ltime().tm_sec) log = '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(), content) file = open('logs.txt', 'a+') file.write(log) def saveError(path,", "Author: @k1k9 # License: MIT # from time import localtime as ltime def", "time import localtime as ltime def saveLog(content, tag=\"INFO\"): ''' Save information (log) into", "(log) into log file ''' # Config logDate = '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon, ltime().tm_year,", "error into log file like normal log ''' saveLog('In {0} error: {1}'.format(path, error),", "= open('logs.txt', 'a+') file.write(log) def saveError(path, error): ''' Save error into log file", "License: MIT # from time import localtime as ltime def saveLog(content, tag=\"INFO\"): '''", "MIT # from time import localtime as ltime def saveLog(content, tag=\"INFO\"): ''' Save", "# from time import localtime as ltime def saveLog(content, tag=\"INFO\"): ''' Save information", "part of script is responsible for saving script behaviors into log file #", "# # Title: job-ofert-fetcher # Description: This part of script is responsible for", "''' # Config logDate = '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon, ltime().tm_year, ltime().tm_hour, ltime().tm_min, ltime().tm_sec) log", "job-ofert-fetcher # Description: This part of script is responsible for saving script behaviors", "from time import localtime as ltime def saveLog(content, tag=\"INFO\"): ''' Save information (log)", "into log file # Author: @k1k9 # License: MIT # from time import", "file = open('logs.txt', 'a+') file.write(log) def saveError(path, error): ''' Save error into log", "def saveError(path, error): ''' Save error into log file like normal log '''", "Title: job-ofert-fetcher # Description: This part of script is responsible for saving script", "Save information (log) into log file ''' # Config logDate = '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday,", "# Config logDate = '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon, ltime().tm_year, ltime().tm_hour, ltime().tm_min, ltime().tm_sec) log =", "''' Save error into log file like normal log ''' saveLog('In {0} error:", "is responsible for saving script behaviors into log file # Author: @k1k9 #", "#!/usr/bin/env python3 # # Title: job-ofert-fetcher # Description: This part of script is", "log file # Author: @k1k9 # License: MIT # from time import localtime", "@k1k9 # License: MIT # from time import localtime as ltime def saveLog(content,", "''' Save information (log) into log file ''' # Config logDate = '{0}/{1}/{2}", "= '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon, ltime().tm_year, ltime().tm_hour, ltime().tm_min, ltime().tm_sec) log = '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(), content)", "Description: This part of script is responsible for saving script behaviors into log", "saveError(path, error): ''' Save error into log file like normal log ''' saveLog('In", "# Author: @k1k9 # License: MIT # from time import localtime as ltime", "import localtime as ltime def saveLog(content, tag=\"INFO\"): ''' Save information (log) into log", "file ''' # Config logDate = '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon, ltime().tm_year, ltime().tm_hour, ltime().tm_min, ltime().tm_sec)", "script is responsible for saving script behaviors into log file # Author: @k1k9", "def saveLog(content, tag=\"INFO\"): ''' Save information (log) into log file ''' # Config", "localtime as ltime def saveLog(content, tag=\"INFO\"): ''' Save information (log) into log file", "responsible for saving script behaviors into log file # Author: @k1k9 # License:", "tag=\"INFO\"): ''' Save information (log) into log file ''' # Config logDate =", "logDate = '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon, ltime().tm_year, ltime().tm_hour, ltime().tm_min, ltime().tm_sec) log = '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(),", "'{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon, ltime().tm_year, ltime().tm_hour, ltime().tm_min, ltime().tm_sec) log = '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(), content) file", "'a+') file.write(log) def saveError(path, error): ''' Save error into log file like normal", "python3 # # Title: job-ofert-fetcher # Description: This part of script is responsible", "log file ''' # Config logDate = '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon, ltime().tm_year, ltime().tm_hour, ltime().tm_min,", "Save error into log file like normal log ''' saveLog('In {0} error: {1}'.format(path,", "saving script behaviors into log file # Author: @k1k9 # License: MIT #", "for saving script behaviors into log file # Author: @k1k9 # License: MIT", "ltime def saveLog(content, tag=\"INFO\"): ''' Save information (log) into log file ''' #", "log = '[{0}]\\t{1}\\t\\t{2}\\n'.format(logDate, tag.upper(), content) file = open('logs.txt', 'a+') file.write(log) def saveError(path, error):", "into log file ''' # Config logDate = '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon, ltime().tm_year, ltime().tm_hour,", "of script is responsible for saving script behaviors into log file # Author:", "file.write(log) def saveError(path, error): ''' Save error into log file like normal log", "saveLog(content, tag=\"INFO\"): ''' Save information (log) into log file ''' # Config logDate" ]
[ "'param3'] action2 [-arg1 'param1' -arg2] action3 [-arg1 -arg2] # Run any script from", "not the greatest dimension.') return parser def nuc_args(self, parser): # Add specific arguments", "[skip]') top_parser.add_argument('-dt', '--delete_temp', action='store_true', help='Delete temp files upon completion [False]') # Mask arguments", "metavar='<string>', help='Control group name filter [*]', default='') return parser def pair_args(self, parser): parser", "principle axis orientations, in case AP is not the greatest dimension.') # parser.add_argument('--allpa',", "1-beta [0.8]', default=0.8, type=float) parser.add_argument('--significance', dest='significance', metavar='<0 < float < 1>', help='Desired significance", "Run print(\"MouseMorph {0} will be run with arguments: \\n\\t{1}\".format(first_arg.command, vars(self.args))) time_start = notify_start()", "command-specific arguments parser = getattr(self, first_arg.command + '_args')(top_parser) self.args = parser.parse_args(sys.argv[2:]) # Sanitise", "elif os.path.isfile(args.input_2): # print (\" Input 2 {0} is a file ...\".format(args.input)) #", "removing path and 1+ extensions args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm) else: args.tpm_directory", "directory containing files whose names are to be matched, or a .CSV file", "return parser def orient_args(self, parser): # Add specific arguments parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>', help='Atlas", "& CABI, UCL, UK), <EMAIL>' __created__ = '2015-06-28' def notify_start(): \"\"\"Give the user", "path>', help='Path to .CSV file', required=req) parser.add_argument('-col', '--column', dest='column', metavar='<int int ... int>',", "getattr(self, first_arg.command + '_args')(top_parser) self.args = parser.parse_args(sys.argv[2:]) # Sanitise arguments & directories self.args", "directory or file', required=False) top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory,", "directories ending with a \"\\\" (os.sep), which argparse assumes was intentional and adds", "[0.5]', default=0.5, type=float) parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>', help='Prior gaussian regularization [2.0]', default=2.0, type=float) return", "or does not exist: {0}\".format(args.input)) else: args.input_directory = os.getcwd() if not args.no_output: if", "= os.path.normpath(os.path.join(args.output)) else: print \"Specified output ({0}) is not a directory; creating it", "import chain from datetime import datetime # import test_go import mm_functions as mmfn", "an automatic mouse MR image processor.\", usage = \"mousemorph <command> [<arguments>]\") # Input", "os.path.isdir(args.mask): args.mask_name_filter = '*' + args.mn_filter + '*' # use wildcards if provided", "if args.input_2: if os.path.isdir(args.input_2): # print (\" Input 2 {0} is a directory", "help='Prior relaxation factor [0.5]', default=0.5, type=float) parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>', help='Prior gaussian regularization [2.0]',", "hasattr(args, 'input_2'): if args.input_2: if os.path.isdir(args.input_2): # print (\" Input 2 {0} is", "== FSL FLIRT's \"secondary\" # adapt to accept -corr [dir] [filter] # if", "MouseMorph programs. \"\"\" def __init__(self): command_parser = argparse.ArgumentParser(\"argp_mousemorph\", description = \"MouseMorph, an automatic", "self.args.in_ext_filter) if self.args.reverse_input: self.args.input_files_list = self.args.input_files_list[::-1] # Run print(\"MouseMorph {0} will be run", "os.path.isdir(args.tpm): args.tpm_name_filter = '*' + args.tn_filter + '*' # use wildcards if provided", "use wildcards if provided a directory alone args.input_name_filter_exact_2 = args.in_filter args.input_directory_2 = os.path.normpath(os.path.join(args.input_2))", "from datetime import datetime # import test_go import mm_functions as mmfn __author__ =", "image mask', required=True) return parser def orient_args(self, parser): # Add specific arguments parser.add_argument('-at','--atlas',", "directory ...\".format(args.input)) args.input_name_filter = '*' + args.in_filter + '*' # use wildcards if", "print (\" Input 2 {0} is a file ...\".format(args.input)) # Get the filename,", "float < 1>', help='Prior relaxation factor [0.5]', default=0.5, type=float) parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>', help='Prior", "...\".format(args.input)) # Get the filename, removing path and 1+ extensions args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0]", "dest='nopriors', metavar='<int>', help='Number of classes (no TPM inputs)', type=int) parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0 <", "Exception(\"Input 2 not recognised or does not exist: {0}\".format(args.input_2)) else: args.input_directory_2 = os.getcwd()", "req=False): parser.add_argument('-l', '--list', dest='list', metavar='<directory> or <.CSV file path>', help='Either a directory containing", "metavar='<0 < float < 1>', help='Desired significance level, alpha [0.05]', default=0.05, type=float) parser.add_argument('--detect_difference',", "top_parser.add_argument('-if', '--in_filter', dest='in_filter', metavar='<filter>', help=\"If input is a directory, filter files ['']\", default='')", "'--baseline_mask', dest='baseline_mask_path', metavar='<baseline mask NIfTI file path>', help='Full path of the baseline NIfTI", "top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input', action=\"store_true\", help='Run through input files in reverse order') top_parser.add_argument('-v', '--verbose',", "pass # else: # break # return args def add_arg_csv(self, parser, req=False): parser.add_argument('-csv',", "input is a directory, filter files ['']\", default='') # Processing options top_parser.add_argument('-ds', '--downsample',", "dest='convergence', metavar='<convergence>', help='Convergence threshold [0.001]', default=0.001, type=float) parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>', help='Number of histogram", "help='Number of histogram bins [256]', default=256, type=int) return parser def tails_type(self, str): acceptable", "time_diff)) return class MouseMorph(object): \"\"\"Define all the necessary arguments passed to MouseMorph programs.", "required=False) top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter files ['']\",", "[256]', default=256, type=int) return parser def tails_type(self, str): acceptable = ['one', 'two'] if", "off]\\n\\t(Downsampling input files may speed up processing, at the expense of accuracy.)', default=0,", "are in approximately the same initial orientation. Only the first image will be", "type=float) parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>', help='Number of histogram bins [256]', default=256, type=int) return parser", "= os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory = os.path.dirname(args.mask) else: args.mask_directory = None if hasattr(args, 'tpm'): if", "parser def loop_args(self, parser): parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on", "req=True) parser.add_argument('--group', dest='csv_group_filter', metavar='<string>', help='Control group name filter [*]', default='') return parser def", "orient <arguments> Non-uniformity correct brains ----------------------------- python mousemorph.py nuc <arguments> \"\"\" # To", "the repeat NIfTI image', required=True) parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path', metavar='<baseline mask NIfTI file path>',", "top_parser.add_argument('-v', '--verbose', action=\"store_true\", help=\"Verbose output\") # Add command-specific arguments parser = getattr(self, first_arg.command", "will be used as a list of names [current]', required=req) parser.add_argument('-lf', '--list_filter', dest='list_filter',", "files ['']\", default='') parser.add_argument('--priors4D', dest='priors4D', action='store_true', help='Use this flag if the priors are", "# use wildcards if provided a directory alone args.input_name_filter_exact_2 = args.in_filter args.input_directory_2 =", "{0} ...\".format(time_start)) return time_start def notify_complete(time_start=None, log_location=''): time_format = '%H:%M:%S %p on %b", "parser.add_argument('-col', '--column', dest='column', metavar='<int>', help='Column number [0]', default=0, type=int, required=False) parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory>", "help=\"If mask is a directory, filter files ['']\", default='') return parser def add_arg_list(self,", "rather than individual files per class', required=False) parser.add_argument('--nopriors', dest='nopriors', metavar='<int>', help='Number of classes", "1+ extensions args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2) else: raise Exception(\"Input 2 not", "or two [two]', default='two', type=self.tails_type) parser = self.add_arg_csv(parser, req=True) parser.add_argument('--group', dest='csv_group_filter', metavar='<string>', help='Control", "shouldn't supply that \"\\\", really, but just in case...\"\"\" # for name in", "----- # 1. if -i has more than one argument following it, use", "os.path.isfile(os.path.normpath(args.list)): # args.column return args # Methods which actually run the command the", "if args.input: if os.path.isdir(args.input): # print (\" Input {0} is a directory ...\".format(args.input))", "\\n\\tintstan, \\n\\t...\"\"\") command_parser.add_argument('command', help=\"MouseMorph program to be run.\") first_arg = command_parser.parse_args(sys.argv[1:2]) if not", "\"\\\" (os.sep), which argparse assumes was intentional and adds the final user quote", "not the greatest dimension.') # parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle", "a directory alone args.input_name_filter_exact_2 = args.in_filter args.input_directory_2 = os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2): # print", "programs. \"\"\" def __init__(self): command_parser = argparse.ArgumentParser(\"argp_mousemorph\", description = \"MouseMorph, an automatic mouse", "to output name', default=\"\") top_parser.add_argument('-ona', '--out_name_append', metavar='<string>', help='Append this string to output name',", "parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>', help='Full width, half maximum [0.15]', default=0.15, type=float) parser.add_argument('-ss', dest='subsample', metavar='<factor>',", "file', required=req) parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter files", "required=True) parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path', metavar='<baseline mask NIfTI file path>', help='Full path of the", "and 1+ extensions args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory = os.path.dirname(args.mask) else: args.mask_directory = None", "get a list of strings as file names from a directory, or from", "significance level, alpha [0.05]', default=0.05, type=float) parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0 < float < 1>',", "[0.001]', default=0.001, type=float) parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>', help='Number of histogram bins [256]', default=256, type=int)", "combine all input files into the same list. Likewise for other directory arguments.", "directory containing NIfTIs, or a single file.', required=False) parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>', help='NIfTI-1 file,", "is a file ...\".format(args.input)) # Get the filename, removing path and 1+ extensions", "Add specific arguments parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>', help='Iterations to run [200]', default=200, type=int) parser.add_argument('-fwhm',", "<file path>', help='Input file (NIfTI-1, *.nii.gz or *.nii) or directory [current]') top_parser.add_argument('-o', '--output',", "= '*' + args.tn_filter + '*' # use wildcards if provided a directory", "mm_pair mm_pair.go(self.args) def power(self): import mm_powercalc mm_powercalc.go(self.args) def loop(self): import mm_loop mm_loop.go(self.args) def", "any of: \\n\\textract, \\n\\torient, \\n\\tnuc, \\n\\tintstan, \\n\\t...\"\"\") command_parser.add_argument('command', help=\"MouseMorph program to be run.\")", "or file', required=False) parser.add_argument('-tf', '--tpm_filter', dest='tn_filter', metavar='<filter>', help=\"If tpm is a directory, filter", "= '.nii' if args.input: if os.path.isdir(args.input): # print (\" Input {0} is a", "file (NIfTI-1, *.nii.gz or *.nii) or directory [current]') top_parser.add_argument('-o', '--output', dest='output', metavar='<directory>', help='Output", "'--column', dest='column', metavar='<int>', help='Column number [0]', default=0, type=int, required=False) parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory> or", "a filter here, corresponding images in dir which also matching the filter will", "control mean to detect', default=0.25, type=float) parser.add_argument('--tails', dest='tails', metavar='<string>', help='Tails, one or two", "actually run the command the user asked for def nuc(self): import mm_nuc_n4 mm_nuc_n4.go(self.args)", "dest='list', metavar='<directory> or <.CSV file path>', help='Either a directory containing files whose names", "required=True) parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on each file', required=False)", "arguments: \\n\\t{1}\".format(first_arg.command, vars(self.args))) time_start = notify_start() getattr(self, first_arg.command)() notify_complete(time_start) # def sanitise_arguments(self, args):", "there's no filter here, all images in dir will be oriented as per", "time_start = notify_start() getattr(self, first_arg.command)() notify_complete(time_start) # def sanitise_arguments(self, args): # \"\"\"Windows: only", "of classes (no TPM inputs)', type=int) parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0 < float < 1>',", "(\" Input {0} is a file ...\".format(args.input)) # Get the filename, removing path", "images in dir matching the filter will be oriented as per the single", "import mm_seg_EM_group mm_seg_EM_group.go(self.args) def main(): mm = MouseMorph() # print(\"{0}\".format(mm.__dict__)) print(\"{0}\".format(mm.args)) if __name__", "= mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter) if self.args.reverse_input: self.args.input_files_list = self.args.input_files_list[::-1] # Run print(\"MouseMorph {0}", "(\" Input 2 {0} is a file ...\".format(args.input)) # Get the filename, removing", "required=True) parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path', metavar='<repeat mask NIfTI file path>', help='Full path of the", "if the user has supplied directories ending with a \"\\\" (os.sep), which argparse", "return parser def pair_args(self, parser): parser = self.add_arg_list(parser, req=False) parser.add_argument('-col', '--column', dest='column', metavar='<int>',", "'--repeat', dest='repeat_path', metavar='<repeat NIfTI file path>', help='Full path of the repeat NIfTI image',", "metavar='<string>', help='Append this string to output name', default=\"\") top_parser.add_argument('-uz', '--unzip', action='store_true', help='Uncompressed output", "# Mask arguments top_parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory", "dest='tails', metavar='<string>', help='Tails, one or two [two]', default='two', type=self.tails_type) parser = self.add_arg_csv(parser, req=True)", "directories self.args = self.sanitise_arguments(self.args) if self.args.input: # Pre-populate a list of relevant files.", "dimension.') return parser def nuc_args(self, parser): # Add specific arguments parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>',", "(PhD student, CMIC & CABI, UCL, UK), <EMAIL>' __created__ = '2015-06-28' def notify_start():", "= os.getcwd() if hasattr(args, 'mask'): if args.mask: args.mask = os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter", "parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>', help='Number of histogram bins [256]', default=256, type=int) return parser def", "it ...\".format(args.output) args.output_directory = os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print \"No output directory specified. Setting", "mm_nuc_n4.go(self.args) def orient(self): import mm_orient mm_orient.go(self.args) def pair(self): import mm_pair mm_pair.go(self.args) def power(self):", "= getattr(args, name) # setattr(args, name, s.rstrip(os.sep).rstrip('\"')) # except AttributeError: # pass #", "mousemorph.py orient <arguments> Non-uniformity correct brains ----------------------------- python mousemorph.py nuc <arguments> \"\"\" #", "return args # Methods which actually run the command the user asked for", "# Run any script from this one import os import sys import glob", "speed up processing, at the expense of accuracy.)', default=0, type=float) top_parser.add_argument('-par', '--parallel', dest='parallel',", "top_parser.add_argument('-ona', '--out_name_append', metavar='<string>', help='Append this string to output name', default=\"\") top_parser.add_argument('-uz', '--unzip', action='store_true',", "mask is a directory, filter files ['']\", default='') # Filters top_parser.add_argument('-if', '--in_filter', dest='in_filter',", "\\n\\t...\"\"\") command_parser.add_argument('command', help=\"MouseMorph program to be run.\") first_arg = command_parser.parse_args(sys.argv[1:2]) if not hasattr(self,", "files [False]') top_parser.add_argument('-ow', '--overwrite', action='store_true', help='Overwrite existing output files [skip]') top_parser.add_argument('-dt', '--delete_temp', action='store_true',", "file>', help='TPM directory or file', required=False) parser.add_argument('-tf', '--tpm_filter', dest='tn_filter', metavar='<filter>', help=\"If tpm is", "image processor This is the base MouseMorph program. Run it with Python. Usage", "# Run print(\"MouseMorph {0} will be run with arguments: \\n\\t{1}\".format(first_arg.command, vars(self.args))) time_start =", "flag if the priors are all single 4D NIfTIs rather than individual files", "break # return args def add_arg_csv(self, parser, req=False): parser.add_argument('-csv', '--csv', dest='csv_path', metavar='<.CSV file", "indicate that all brains are in approximately the same initial orientation. Only the", "args.in_filter + '*' # use wildcards if provided a directory alone args.input_name_filter_exact_2 =", "in approximately the same initial orientation. Only the first image will be compared", "'--output', dest='output', metavar='<directory>', help='Output directory [input directory]') top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>', help='Prepend this string", "1>', help='Desired significance level, alpha [0.05]', default=0.05, type=float) parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0 < float", "<filename>mousemorph/mousemorph.py #!/usr/bin/python \"\"\"MouseMorph, an automatic mouse MR image processor This is the base", "help='Append this string to output name', default=\"\") top_parser.add_argument('-uz', '--unzip', action='store_true', help='Uncompressed output files", "def orient(self): import mm_orient mm_orient.go(self.args) def pair(self): import mm_pair mm_pair.go(self.args) def power(self): import", "is a filter here, corresponding images in dir which also matching the filter", "'.nii*' if not args.unzip: args.ext = '.nii.gz' else: args.ext = '.nii' if args.input:", "os.path.basename(args.input).split(os.extsep)[0] args.input_directory = os.path.dirname(args.input) else: raise Exception(\"Input not recognised or does not exist:", "command_parser.print_help() sys.exit(1) # Top-level parser with universal arguments top_parser = argparse.ArgumentParser(\"MouseMorph user input", "= notify_start() getattr(self, first_arg.command)() notify_complete(time_start) # def sanitise_arguments(self, args): # \"\"\"Windows: only necessary", "arguments parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>', help='Iterations to run [200]', default=200, type=int) parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>',", "output name', default=\"\") top_parser.add_argument('-ona', '--out_name_append', metavar='<string>', help='Append this string to output name', default=\"\")", "user some helpful notes at the launch of the script.\"\"\" time_format = '%H:%M:%S", "help=\"MouseMorph program to be run.\") first_arg = command_parser.parse_args(sys.argv[1:2]) if not hasattr(self, first_arg.command): print(\"command", "self.sanitise_arguments(self.args) if self.args.input: # Pre-populate a list of relevant files. self.args.input_files_list = mmfn.get_files_list(self.args.input_directory,", "dest='function_name', metavar='<function>', help='MouseMorph function to run on each file', required=False) return parser def", "filter here, corresponding images in dir which also matching the filter will be", "= \"MouseMorph, an automatic mouse MR image processor.\", usage = \"mousemorph <command> [<arguments>]\")", "gaussian regularization [2.0]', default=2.0, type=float) return parser def sanitise_arguments(self, args): \"\"\" \"\"\" args.in_ext_filter", "BSI.\"\"\" parser.add_argument('-b', '--baseline', dest='baseline_path', metavar='<baseline NIfTI file path>', help='Full path of the baseline", "single image and there is a filter here, all images in dir matching", "arguments parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>', help='Atlas directory containing NIfTIs, or a single file.', required=False)", "name, s.rstrip(os.sep).rstrip('\"')) # except AttributeError: # pass # else: # break # return", "1>', help='Fractional difference from the control mean to detect', default=0.25, type=float) parser.add_argument('--tails', dest='tails',", "metavar='<0 < float < 1>', help='Prior relaxation factor [0.5]', default=0.5, type=float) parser.add_argument('--rf_gstd', dest='rf_gstd',", "parser.add_argument('-t', '--tpm', dest='tpm', metavar='<tpm directory> or <tpm file>', help='TPM directory or file', required=False)", "<arguments> Non-uniformity correct brains ----------------------------- python mousemorph.py nuc <arguments> \"\"\" # To do", "+ args.tn_filter + '*' # use wildcards if provided a directory alone args.tpm_name_filter_exact", "'--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or file', required=False) top_parser.add_argument('-mf',", "with a \"\\\" (os.sep), which argparse assumes was intentional and adds the final", "histogram bins [256]', default=256, type=int) return parser def tails_type(self, str): acceptable = ['one',", "be oriented as per the single input # if input is a directory", "dest='rf_rel', metavar='<0 < float < 1>', help='Prior relaxation factor [0.5]', default=0.5, type=float) parser.add_argument('--rf_gstd',", "Input 2 {0} is a directory ...\".format(args.input)) args.input_name_filter_2 = '*' + args.in_filter +", "# if input is a single image and there is a filter here,", "initial orientation. Only the first image will be compared with an atlas and", "'*' # use wildcards if provided a directory alone args.input_name_filter_exact = args.in_filter args.input_directory", "[-arg1 'param1' -arg2] action3 [-arg1 -arg2] # Run any script from this one", "whose names are to be matched, or a .CSV file whose Nth column", "required=req) parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter files ['']\",", "can be any of: \\n\\textract, \\n\\torient, \\n\\tnuc, \\n\\tintstan, \\n\\t...\"\"\") command_parser.add_argument('command', help=\"MouseMorph program to", "default='') return parser def pair_args(self, parser): parser = self.add_arg_list(parser, req=False) parser.add_argument('-col', '--column', dest='column',", "... int>', nargs='+', help='Column number(s) [0 1]', default=1, type=int, required=req) return parser def", "default='') # Processing options top_parser.add_argument('-ds', '--downsample', dest='downsample', metavar='<factor>', help='Downsampling factor [0, off]\\n\\t(Downsampling input", "help='Overwrite existing output files [skip]') top_parser.add_argument('-dt', '--delete_temp', action='store_true', help='Delete temp files upon completion", "return parser def seg_EM_args(self, parser): # parser = self.add_arg_mask(parser, req=True) parser.add_argument('-t', '--tpm', dest='tpm',", "datetime.strptime(time_stop, time_format) - datetime.strptime(time_start, time_format) mmfn.alert_user(\"Completion time is {0}. Took {1}.\".format(time_stop, time_diff)) return", "single image and there's no filter here, all images in dir will be", "all brains are in approximately the same initial orientation. Only the first image", "<file path>', help='Second input directory containing NIfTIs, or a single file.', required=True) parser.add_argument('-fn',", "to filter list input ['*']\", default='*') return parser def bsi_args(self, parser): \"\"\"These are", "action='store_true', help='Delete temp files upon completion [False]') # Mask arguments top_parser.add_argument('-m', '--mask', dest='mask',", "of the repeat NIfTI image', required=True) parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path', metavar='<baseline mask NIfTI file", "__author__ = '<NAME> (PhD student, CMIC & CABI, UCL, UK), <EMAIL>' __created__ =", "os.getcwd() if hasattr(args, 'mask'): if args.mask: args.mask = os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter =", "processor This is the base MouseMorph program. Run it with Python. Usage python", "the priors are all single 4D NIfTIs rather than individual files per class',", "accept -corr [dir] [filter] # if input is a single image and there's", "# setattr(args, name, s.rstrip(os.sep).rstrip('\"')) # except AttributeError: # pass # else: # break", "\\n\\tnuc, \\n\\tintstan, \\n\\t...\"\"\") command_parser.add_argument('command', help=\"MouseMorph program to be run.\") first_arg = command_parser.parse_args(sys.argv[1:2]) if", "*.nii.gz or *.nii) or directory [current]') top_parser.add_argument('-o', '--output', dest='output', metavar='<directory>', help='Output directory [input", "help='Output directory [input directory]') top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>', help='Prepend this string to output name',", "of the script.\"\"\" time_format = '%H:%M:%S %p on %b %d, %Y (%Z)' time_start", "if omitted)\") else: return str def power_args(self, parser): parser.add_argument('--power', dest='power', metavar='<0 < float", "number of iterations [100]', default=100, type=int) parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0 < float < 1>',", "bins [256]', default=256, type=int) return parser def tails_type(self, str): acceptable = ['one', 'two']", "program to be run.\") first_arg = command_parser.parse_args(sys.argv[1:2]) if not hasattr(self, first_arg.command): print(\"command '{0}'", "{0} is a directory ...\".format(args.input)) args.input_name_filter = '*' + args.in_filter + '*' #", "= '*' + args.mn_filter + '*' # use wildcards if provided a directory", "parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or file', required=req)", "base MouseMorph program. Run it with Python. Usage python mousemorph.py --help Orient brains", "in dir will be oriented # if input is a directory and there", "metavar='<filter>', help=\"If mask is a directory, filter files ['']\", default='') return parser def", "used to filter list input ['*']\", default='*') return parser def bsi_args(self, parser): \"\"\"These", "may speed up processing, at the expense of accuracy.)', default=0, type=float) top_parser.add_argument('-par', '--parallel',", "top_parser.add_argument('-uz', '--unzip', action='store_true', help='Uncompressed output files [compressed]') top_parser.add_argument('-no', '--no_output', dest='no_output', action='store_true', help='Don\\'t save", "type=self.tails_type) parser = self.add_arg_csv(parser, req=True) parser.add_argument('--group', dest='csv_group_filter', metavar='<string>', help='Control group name filter [*]',", "metavar='<filter>', help=\"If tpm is a directory, filter files ['']\", default='') parser.add_argument('--priors4D', dest='priors4D', action='store_true',", "parser.add_argument('--power', dest='power', metavar='<0 < float < 1>', help='Desired power, 1-beta [0.8]', default=0.8, type=float)", "if args.mask: args.mask = os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter = '*' + args.mn_filter +", "TPM inputs)', type=int) parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0 < float < 1>', help='MRF prior strength", "here, corresponding images in dir which also matching the filter will be oriented", "int>', nargs='+', help='Column number(s) [0 1]', default=1, type=int, required=req) return parser def add_arg_mask(self,", "parser.add_argument('-b', '--baseline', dest='baseline_path', metavar='<baseline NIfTI file path>', help='Full path of the baseline NIfTI", "dest='corresponding', metavar='<corresponding>', help='NIfTI-1 file, or directory of files, to be oriented in the", "mask', required=True) parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path', metavar='<repeat mask NIfTI file path>', help='Full path of", "def main(): mm = MouseMorph() # print(\"{0}\".format(mm.__dict__)) print(\"{0}\".format(mm.args)) if __name__ == '__main__': main()", "help='Also resample output files.') parser.add_argument('--allsame', dest='allsame', action='store_true', help='Flag to indicate that all brains", "mask NIfTI file path>', help='Full path of the repeat NIfTI image mask', required=True)", ".CSV file', required=req) parser.add_argument('-col', '--column', dest='column', metavar='<int int ... int>', nargs='+', help='Column number(s)", "which argparse assumes was intentional and adds the final user quote to the", "input_directory. (As per \"secondary\" in FSL FLIRT.)') parser.add_argument('-res','--resample', dest='resample', action='store_true', help='Also resample output", "type=float) parser.add_argument('-ss', dest='subsample', metavar='<factor>', help='Subsampling factor [4]', default=4, type=int) parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>', help='Number", "type=float) parser.add_argument('--significance', dest='significance', metavar='<0 < float < 1>', help='Desired significance level, alpha [0.05]',", "metavar='<directory> or <file path>', help='Second input directory containing NIfTIs, or a single file.',", "dest='mrf_beta', metavar='<0 < float < 1>', help='MRF prior strength [0.4]', default=0.4, type=float) parser.add_argument('--max_iter',", "run [200]', default=200, type=int) parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>', help='Full width, half maximum [0.15]', default=0.15,", "if provided a directory alone args.tpm_name_filter_exact = args.tn_filter args.tpm_directory = os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm):", "mask NIfTI file path>', help='Full path of the baseline NIfTI image mask', required=True)", "--help Orient brains to standard space ------------------------------- python mousemorph.py orient <arguments> Non-uniformity correct", "dest='iterations', metavar='<iterations>', help='Iterations to run [200]', default=200, type=int) parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>', help='Full width,", "a single file.', required=True) parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on", "parser.add_argument('-conv', dest='convergence', metavar='<convergence>', help='Convergence threshold [0.001]', default=0.001, type=float) parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>', help='Number of", "given column of a .CSV file if hasattr(args, 'list'): if args.list: args.list =", "from itertools import chain from datetime import datetime # import test_go import mm_functions", "will be oriented # if input is a directory and there is a", "brains are in approximately the same initial orientation. Only the first image will", "use wildcards if provided a directory alone args.input_name_filter_exact = args.in_filter args.input_directory = os.path.normpath(os.path.join(args.input))", "parser.add_argument('--significance', dest='significance', metavar='<0 < float < 1>', help='Desired significance level, alpha [0.05]', default=0.05,", "args.column return args # Methods which actually run the command the user asked", "\"\"\" def __init__(self): command_parser = argparse.ArgumentParser(\"argp_mousemorph\", description = \"MouseMorph, an automatic mouse MR", "required=req) return parser def add_arg_mask(self, parser, req=False): # Mask arguments parser.add_argument('-m', '--mask', dest='mask',", "else: args.mask_directory = None if hasattr(args, 'tpm'): if args.tpm: args.tpm = os.path.normpath(args.tpm) if", "axis orientations, in case AP is not the greatest dimension.') return parser def", "parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>', help='NIfTI-1 file, or directory of files, to be oriented in", "args.input_directory_2 = os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2): # print (\" Input 2 {0} is a", "metavar='<iterations>', help='Iterations to run [200]', default=200, type=int) parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>', help='Full width, half", "Add command-specific arguments parser = getattr(self, first_arg.command + '_args')(top_parser) self.args = parser.parse_args(sys.argv[2:]) #", "with arguments: \\n\\t{1}\".format(first_arg.command, vars(self.args))) time_start = notify_start() getattr(self, first_arg.command)() notify_complete(time_start) # def sanitise_arguments(self,", "= self.args.input_files_list[::-1] # Run print(\"MouseMorph {0} will be run with arguments: \\n\\t{1}\".format(first_arg.command, vars(self.args)))", "Input and output top_parser.add_argument('-i', '--input', dest='input', metavar='<directory> or <file path>', help='Input file (NIfTI-1,", "multi action1 [-arg1 -arg2 -arg3 'param3'] action2 [-arg1 'param1' -arg2] action3 [-arg1 -arg2]", "helpful notes at the launch of the script.\"\"\" time_format = '%H:%M:%S %p on", "mm_loop.go(self.args) def seg_EM(self): import mm_seg_EM_group mm_seg_EM_group.go(self.args) def main(): mm = MouseMorph() # print(\"{0}\".format(mm.__dict__))", "at the launch of the script.\"\"\" time_format = '%H:%M:%S %p on %b %d,", "metavar='<0 < float < 1>', help='Fractional difference from the control mean to detect',", "if self.args.input: # Pre-populate a list of relevant files. self.args.input_files_list = mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter,", "def loop_args(self, parser): parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on each", "args.list: args.list = os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter) # elif", "default=4, type=int) parser.add_argument('-conv', dest='convergence', metavar='<convergence>', help='Convergence threshold [0.001]', default=0.001, type=float) parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>',", "parser): # parser = self.add_arg_mask(parser, req=True) parser.add_argument('-t', '--tpm', dest='tpm', metavar='<tpm directory> or <tpm", "if os.path.isdir(args.output): # print (\" Output {0} is a directory ...\".format(args.input)) args.output_directory =", "help='Prepend this string to output name', default=\"\") top_parser.add_argument('-ona', '--out_name_append', metavar='<string>', help='Append this string", "import mm_pair mm_pair.go(self.args) def power(self): import mm_powercalc mm_powercalc.go(self.args) def loop(self): import mm_loop mm_loop.go(self.args)", "if input is a single image and there's no filter here, all images", "command_parser.parse_args(sys.argv[1:2]) if not hasattr(self, first_arg.command): print(\"command '{0}' not recognised.\".format(first_arg.command)) command_parser.print_help() sys.exit(1) # Top-level", "omitted)\") else: return str def power_args(self, parser): parser.add_argument('--power', dest='power', metavar='<0 < float <", "dest='repeat_mask_path', metavar='<repeat mask NIfTI file path>', help='Full path of the repeat NIfTI image", "filter here, all images in dir will be oriented as per the single", "metavar='<factor>', help='Downsampling factor [0, off]\\n\\t(Downsampling input files may speed up processing, at the", "file ...\".format(args.input)) # Get the filename, removing path and 1+ extensions args.input_name_filter_2 =", ".CSV file if hasattr(args, 'list'): if args.list: args.list = os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names", "# \"\"\"Windows: only necessary if the user has supplied directories ending with a", "if input is a directory and there is a filter here, corresponding images", "type=float) parser.add_argument('--tails', dest='tails', metavar='<string>', help='Tails, one or two [two]', default='two', type=self.tails_type) parser =", "Get the filename, removing path and 1+ extensions args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0] args.input_directory =", "files upon completion [False]') # Mask arguments top_parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or", "parser def tails_type(self, str): acceptable = ['one', 'two'] if str not in acceptable:", "or <mask file>', help='Mask directory or file', required=req) parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If", "\"secondary\" # adapt to accept -corr [dir] [filter] # if input is a", "nuc_args(self, parser): # Add specific arguments parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>', help='Iterations to run [200]',", "-corr [dir] [filter] # if input is a single image and there's no", "argparse import subprocess from itertools import chain from datetime import datetime # import", "os.path.isfile(args.mask): # Get the filename, removing path and 1+ extensions args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0]", "type=int) parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>', help='Full width, half maximum [0.15]', default=0.15, type=float) parser.add_argument('-ss', dest='subsample',", "# Add specific arguments parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>', help='Iterations to run [200]', default=200, type=int)", "MR image processor.\", usage = \"mousemorph <command> [<arguments>]\") # Input and output top_parser.add_argument('-i',", "# args.column return args # Methods which actually run the command the user", "self.add_arg_mask(parser, req=True) parser.add_argument('-t', '--tpm', dest='tpm', metavar='<tpm directory> or <tpm file>', help='TPM directory or", "= self.sanitise_arguments(self.args) if self.args.input: # Pre-populate a list of relevant files. self.args.input_files_list =", "help='Full path of the baseline NIfTI image', required=True) parser.add_argument('-r', '--repeat', dest='repeat_path', metavar='<repeat NIfTI", "(%Z)' time_start = time.strftime(time_format) mmfn.alert_user(\"Start time is {0} ...\".format(time_start)) return time_start def notify_complete(time_start=None,", "automatic mouse MR image processor.\", usage = \"\"\"mousemorph <command> [<arguments>], where <command> can", "a directory containing files whose names are to be matched, or a .CSV", "Took {1}.\".format(time_stop, time_diff)) return class MouseMorph(object): \"\"\"Define all the necessary arguments passed to", "up processing, at the expense of accuracy.)', default=0, type=float) top_parser.add_argument('-par', '--parallel', dest='parallel', action=\"store_true\",", "import mm_powercalc mm_powercalc.go(self.args) def loop(self): import mm_loop mm_loop.go(self.args) def seg_EM(self): import mm_seg_EM_group mm_seg_EM_group.go(self.args)", "was intentional and adds the final user quote to the end of the", "mm_nuc_n4 mm_nuc_n4.go(self.args) def orient(self): import mm_orient mm_orient.go(self.args) def pair(self): import mm_pair mm_pair.go(self.args) def", "a directory, filter files ['']\", default='') return parser def add_arg_list(self, parser, req=False): parser.add_argument('-l',", "directories (or files) and combine all input files into the same list. Likewise", "output\") # Add command-specific arguments parser = getattr(self, first_arg.command + '_args')(top_parser) self.args =", "and the rest will have the same gross orientation applied. Final minor corrections", "a directory, or from a given column of a .CSV file if hasattr(args,", "same initial orientation. Only the first image will be compared with an atlas", "'--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter files ['']\", default='') return", "float < 1>', help='Desired power, 1-beta [0.8]', default=0.8, type=float) parser.add_argument('--significance', dest='significance', metavar='<0 <", "be matched, or a .CSV file whose Nth column will be used as", "metavar='<convergence>', help='Convergence threshold [0.001]', default=0.001, type=float) parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>', help='Number of histogram bins", "a single image and there is a filter here, all images in dir", "# print (\" Input {0} is a directory ...\".format(args.input)) args.input_name_filter = '*' +", "first_arg.command): print(\"command '{0}' not recognised.\".format(first_arg.command)) command_parser.print_help() sys.exit(1) # Top-level parser with universal arguments", "# s = getattr(args, name) # setattr(args, name, s.rstrip(os.sep).rstrip('\"')) # except AttributeError: #", "directory or file', required=False) parser.add_argument('-tf', '--tpm_filter', dest='tn_filter', metavar='<filter>', help=\"If tpm is a directory,", "args.mn_filter + '*' # use wildcards if provided a directory alone args.mask_name_filter_exact =", "mousemorph.py --help Orient brains to standard space ------------------------------- python mousemorph.py orient <arguments> Non-uniformity", "required=False) return parser def loop_args(self, parser): parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to", "of levels [4]', default=4, type=int) parser.add_argument('-conv', dest='convergence', metavar='<convergence>', help='Convergence threshold [0.001]', default=0.001, type=float)", "<mask file>', help='Mask directory or file', required=False) top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask", "also matching the filter will be oriented # Replace mm_multi.py with: run mousemorph.py", "will have the same gross orientation applied. Final minor corrections will be performed", "file path>', help='Either a directory containing files whose names are to be matched,", "in case AP is not the greatest dimension.') return parser def nuc_args(self, parser):", "help='Path to .CSV file', required=req) parser.add_argument('-col', '--column', dest='column', metavar='<int int ... int>', nargs='+',", "[input directory]') top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>', help='Prepend this string to output name', default=\"\") top_parser.add_argument('-ona',", "filter [*]', default='') return parser def pair_args(self, parser): parser = self.add_arg_list(parser, req=False) parser.add_argument('-col',", "[filter] # if input is a single image and there's no filter here,", "'<NAME> (PhD student, CMIC & CABI, UCL, UK), <EMAIL>' __created__ = '2015-06-28' def", "args.tn_filter + '*' # use wildcards if provided a directory alone args.tpm_name_filter_exact =", "[False]') # Mask arguments top_parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask", "program. Run it with Python. Usage python mousemorph.py --help Orient brains to standard", "notify_complete(time_start) # def sanitise_arguments(self, args): # \"\"\"Windows: only necessary if the user has", "parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>', help='Prior gaussian regularization [2.0]', default=2.0, type=float) return parser def sanitise_arguments(self,", "quote to the end of the string. The user shouldn't supply that \"\\\",", "def loop(self): import mm_loop mm_loop.go(self.args) def seg_EM(self): import mm_seg_EM_group mm_seg_EM_group.go(self.args) def main(): mm", "provided a directory alone args.input_name_filter_exact_2 = args.in_filter args.input_directory_2 = os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2): #", "same gross orientation applied. Final minor corrections will be performed individually.') parser.add_argument('--allpa', dest='allpa',", "dest='resample', action='store_true', help='Also resample output files.') parser.add_argument('--allsame', dest='allsame', action='store_true', help='Flag to indicate that", "\"\"\"Give the user some helpful notes at the launch of the script.\"\"\" time_format", "parser.add_argument('--tails', dest='tails', metavar='<string>', help='Tails, one or two [two]', default='two', type=self.tails_type) parser = self.add_arg_csv(parser,", "alone args.input_name_filter_exact = args.in_filter args.input_directory = os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input): # print (\" Input", "if hasattr(args, 'input_2'): if args.input_2: if os.path.isdir(args.input_2): # print (\" Input 2 {0}", "----------------------------- python mousemorph.py nuc <arguments> \"\"\" # To do # ----- # 1.", "...\".format(args.input)) args.input_name_filter_2 = '*' + args.in_filter + '*' # use wildcards if provided", "in reverse order') top_parser.add_argument('-v', '--verbose', action=\"store_true\", help=\"Verbose output\") # Add command-specific arguments parser", "- datetime.strptime(time_start, time_format) mmfn.alert_user(\"Completion time is {0}. Took {1}.\".format(time_stop, time_diff)) return class MouseMorph(object):", "2 not recognised or does not exist: {0}\".format(args.input_2)) else: args.input_directory_2 = os.getcwd() if", "of the baseline NIfTI image', required=True) parser.add_argument('-r', '--repeat', dest='repeat_path', metavar='<repeat NIfTI file path>',", "[0 1]', default=1, type=int, required=req) return parser def add_arg_mask(self, parser, req=False): # Mask", "{0} will be run with arguments: \\n\\t{1}\".format(first_arg.command, vars(self.args))) time_start = notify_start() getattr(self, first_arg.command)()", "file', required=False) return parser def loop_args(self, parser): parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function", "getattr(args, name) # setattr(args, name, s.rstrip(os.sep).rstrip('\"')) # except AttributeError: # pass # else:", "here, all images in dir matching the filter will be oriented as per", "= None if hasattr(args, 'tpm'): if args.tpm: args.tpm = os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter", "NIfTI file path>', help='Full path of the baseline NIfTI image mask', required=True) parser.add_argument('-rm',", "directory ...\".format(args.input)) args.output_directory = os.path.normpath(os.path.join(args.output)) else: print \"Specified output ({0}) is not a", "parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0 < float < 1>', help='Fractional difference from the control mean", "here, all images in dir will be oriented as per the single input", "return parser def sanitise_arguments(self, args): \"\"\" \"\"\" args.in_ext_filter = '.nii*' if not args.unzip:", "is a filter here, all images in dir matching the filter will be", "return parser def bsi_args(self, parser): \"\"\"These are the somewhat-unique requirements for BSI.\"\"\" parser.add_argument('-b',", "through input files in reverse order') top_parser.add_argument('-v', '--verbose', action=\"store_true\", help=\"Verbose output\") # Add", "elif os.path.isfile(args.tpm): # Get the filename, removing path and 1+ extensions args.tpm_name_filter =", "return time_start def notify_complete(time_start=None, log_location=''): time_format = '%H:%M:%S %p on %b %d, %Y", "type=int) parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0 < float < 1>', help='Prior relaxation factor [0.5]', default=0.5,", "action='store_true', help='Also resample output files.') parser.add_argument('--allsame', dest='allsame', action='store_true', help='Flag to indicate that all", "not exist: {0}\".format(args.input_2)) else: args.input_directory_2 = os.getcwd() if hasattr(args, 'mask'): if args.mask: args.mask", "and there is a filter here, corresponding images in dir which also matching", "return parser def tails_type(self, str): acceptable = ['one', 'two'] if str not in", "and 1+ extensions args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2) else: raise Exception(\"Input 2", "< float < 1>', help='Desired significance level, alpha [0.05]', default=0.05, type=float) parser.add_argument('--detect_difference', dest='detect_difference',", "is the base MouseMorph program. Run it with Python. Usage python mousemorph.py --help", "os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)): # args.column return args", "'two' (default is two, if omitted)\") else: return str def power_args(self, parser): parser.add_argument('--power',", "a single file.', required=False) parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>', help='NIfTI-1 file, or directory of files,", "argparse.ArgumentParser(\"MouseMorph user input parser\", description = \"MouseMorph, an automatic mouse MR image processor.\",", "file ...\".format(args.input)) # Get the filename, removing path and 1+ extensions args.input_name_filter =", "if not args.unzip: args.ext = '.nii.gz' else: args.ext = '.nii' if args.input: if", "is a directory ...\".format(args.input)) args.input_name_filter = '*' + args.in_filter + '*' # use", "matching the filter will be oriented as per the single input # if", "import mm_nuc_n4 mm_nuc_n4.go(self.args) def orient(self): import mm_orient mm_orient.go(self.args) def pair(self): import mm_pair mm_pair.go(self.args)", "# adapt to accept -corr [dir] [filter] # if input is a single", "default=200, type=int) parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>', help='Full width, half maximum [0.15]', default=0.15, type=float) parser.add_argument('-ss',", "+ '*' # use wildcards if provided a directory alone args.tpm_name_filter_exact = args.tn_filter", "MR image processor.\", usage = \"\"\"mousemorph <command> [<arguments>], where <command> can be any", "image', required=True) parser.add_argument('-r', '--repeat', dest='repeat_path', metavar='<repeat NIfTI file path>', help='Full path of the", "# Methods which actually run the command the user asked for def nuc(self):", "vars(self.args))) time_start = notify_start() getattr(self, first_arg.command)() notify_complete(time_start) # def sanitise_arguments(self, args): # \"\"\"Windows:", "default=0, type=int, required=False) parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory> or <file path>', help='Second input directory containing", "input ['*']\", default='*') return parser def bsi_args(self, parser): \"\"\"These are the somewhat-unique requirements", "filter files ['']\", default='') # Filters top_parser.add_argument('-if', '--in_filter', dest='in_filter', metavar='<filter>', help=\"If input is", "time.strftime(time_format) time_diff = datetime.strptime(time_stop, time_format) - datetime.strptime(time_start, time_format) mmfn.alert_user(\"Completion time is {0}. Took", "to input directory ({0}) in case it is required.\".format(args.input_directory) args.output_directory = args.input_directory if", "<tpm file>', help='TPM directory or file', required=False) parser.add_argument('-tf', '--tpm_filter', dest='tn_filter', metavar='<filter>', help=\"If tpm", "parser): parser.add_argument('--power', dest='power', metavar='<0 < float < 1>', help='Desired power, 1-beta [0.8]', default=0.8,", "filter files ['']\", default='') parser.add_argument('--priors4D', dest='priors4D', action='store_true', help='Use this flag if the priors", "the user some helpful notes at the launch of the script.\"\"\" time_format =", "specific arguments parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>', help='Atlas directory containing NIfTIs, or a single file.',", "+ '*' # use wildcards if provided a directory alone args.mask_name_filter_exact = args.mn_filter", "action='store_true', help='Overwrite existing output files [skip]') top_parser.add_argument('-dt', '--delete_temp', action='store_true', help='Delete temp files upon", "there's no filter here, corresponding images in dir will be oriented # if", "parser def sanitise_arguments(self, args): \"\"\" \"\"\" args.in_ext_filter = '.nii*' if not args.unzip: args.ext", "usage = \"mousemorph <command> [<arguments>]\") # Input and output top_parser.add_argument('-i', '--input', dest='input', metavar='<directory>", "datetime # import test_go import mm_functions as mmfn __author__ = '<NAME> (PhD student,", "directory [current]') top_parser.add_argument('-o', '--output', dest='output', metavar='<directory>', help='Output directory [input directory]') top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>',", "standard space ------------------------------- python mousemorph.py orient <arguments> Non-uniformity correct brains ----------------------------- python mousemorph.py", "where <command> can be any of: \\n\\textract, \\n\\torient, \\n\\tnuc, \\n\\tintstan, \\n\\t...\"\"\") command_parser.add_argument('command', help=\"MouseMorph", "'--tpm', dest='tpm', metavar='<tpm directory> or <tpm file>', help='TPM directory or file', required=False) parser.add_argument('-tf',", "loop_args(self, parser): parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on each file',", "time is {0} ...\".format(time_start)) return time_start def notify_complete(time_start=None, log_location=''): time_format = '%H:%M:%S %p", "string. The user shouldn't supply that \"\\\", really, but just in case...\"\"\" #", "parser.add_argument('--group', dest='csv_group_filter', metavar='<string>', help='Control group name filter [*]', default='') return parser def pair_args(self,", "# Pre-populate a list of relevant files. self.args.input_files_list = mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter) if", "'--in_filter', dest='in_filter', metavar='<filter>', help=\"If input is a directory, filter files ['']\", default='') #", "same list. Likewise for other directory arguments. # corresponding == FSL FLIRT's \"secondary\"", "(\" Input 2 {0} is a directory ...\".format(args.input)) args.input_name_filter_2 = '*' + args.in_filter", "dest='subsample', metavar='<factor>', help='Subsampling factor [4]', default=4, type=int) parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>', help='Number of levels", "NIfTI image mask', required=True) return parser def orient_args(self, parser): # Add specific arguments", "str): acceptable = ['one', 'two'] if str not in acceptable: raise argparse.ArgumentTypeError(\"--tails argument", "of accuracy.)', default=0, type=float) top_parser.add_argument('-par', '--parallel', dest='parallel', action=\"store_true\", help='Use multiple cores to process", "args.input_directory = os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input): # print (\" Input {0} is a file", "& directories self.args = self.sanitise_arguments(self.args) if self.args.input: # Pre-populate a list of relevant", "Get the filename, removing path and 1+ extensions args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 =", "metavar='<tpm directory> or <tpm file>', help='TPM directory or file', required=False) parser.add_argument('-tf', '--tpm_filter', dest='tn_filter',", "manner as their correspondingly-named files in input_directory. (As per \"secondary\" in FSL FLIRT.)')", "# elif os.path.isfile(os.path.normpath(args.list)): # args.column return args # Methods which actually run the", "to be run.\") first_arg = command_parser.parse_args(sys.argv[1:2]) if not hasattr(self, first_arg.command): print(\"command '{0}' not", "{0}. Took {1}.\".format(time_stop, time_diff)) return class MouseMorph(object): \"\"\"Define all the necessary arguments passed", "default=0.15, type=float) parser.add_argument('-ss', dest='subsample', metavar='<factor>', help='Subsampling factor [4]', default=4, type=int) parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>',", "dest='significance', metavar='<0 < float < 1>', help='Desired significance level, alpha [0.05]', default=0.05, type=float)", "the user has supplied directories ending with a \"\\\" (os.sep), which argparse assumes", "mmfn.alert_user(\"Completion time is {0}. Took {1}.\".format(time_stop, time_diff)) return class MouseMorph(object): \"\"\"Define all the", "getattr(self, first_arg.command)() notify_complete(time_start) # def sanitise_arguments(self, args): # \"\"\"Windows: only necessary if the", "a given column of a .CSV file if hasattr(args, 'list'): if args.list: args.list", "parser.add_argument('-lf', '--list_filter', dest='list_filter', metavar='<string>', help=\"String used to filter list input ['*']\", default='*') return", "seg_EM_args(self, parser): # parser = self.add_arg_mask(parser, req=True) parser.add_argument('-t', '--tpm', dest='tpm', metavar='<tpm directory> or", "def notify_start(): \"\"\"Give the user some helpful notes at the launch of the", "mm_loop mm_loop.go(self.args) def seg_EM(self): import mm_seg_EM_group mm_seg_EM_group.go(self.args) def main(): mm = MouseMorph() #", "# use wildcards if provided a directory alone args.input_name_filter_exact = args.in_filter args.input_directory =", "type=float) return parser def sanitise_arguments(self, args): \"\"\" \"\"\" args.in_ext_filter = '.nii*' if not", "approximately the same initial orientation. Only the first image will be compared with", "a directory; creating it ...\".format(args.output) args.output_directory = os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print \"No output", "NIfTI image', required=True) parser.add_argument('-r', '--repeat', dest='repeat_path', metavar='<repeat NIfTI file path>', help='Full path of", "directory alone args.input_name_filter_exact_2 = args.in_filter args.input_directory_2 = os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2): # print (\"", "not args.no_output: if args.output: if os.path.isdir(args.output): # print (\" Output {0} is a", "= os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2): # print (\" Input 2 {0} is a file", "argparse.ArgumentTypeError(\"--tails argument must be 'one' or 'two' (default is two, if omitted)\") else:", "hasattr(args, 'list'): if args.list: args.list = os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list(args.list, args.list_filter,", "dimension.') # parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle axis orientations, in", "metavar='<mask directory> or <mask file>', help='Mask directory or file', required=False) top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter',", "or <tpm file>', help='TPM directory or file', required=False) parser.add_argument('-tf', '--tpm_filter', dest='tn_filter', metavar='<filter>', help=\"If", "are the somewhat-unique requirements for BSI.\"\"\" parser.add_argument('-b', '--baseline', dest='baseline_path', metavar='<baseline NIfTI file path>',", "filename, removing path and 1+ extensions args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0] args.input_directory = os.path.dirname(args.input) else:", "arguments parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or file',", "args): # \"\"\"Windows: only necessary if the user has supplied directories ending with", "raise Exception(\"Input not recognised or does not exist: {0}\".format(args.input)) else: args.input_directory = os.getcwd()", "notify_start(): \"\"\"Give the user some helpful notes at the launch of the script.\"\"\"", "metavar='<directory>', help='Output directory [input directory]') top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>', help='Prepend this string to output", "correspondingly-named files in input_directory. (As per \"secondary\" in FSL FLIRT.)') parser.add_argument('-res','--resample', dest='resample', action='store_true',", "help='Maximum number of iterations [100]', default=100, type=int) parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0 < float <", "description = \"MouseMorph, an automatic mouse MR image processor.\", usage = \"\"\"mousemorph <command>", "input is a single image and there is a filter here, all images", "wildcards if provided a directory alone args.input_name_filter_exact_2 = args.in_filter args.input_directory_2 = os.path.normpath(os.path.join(args.input_2)) elif", "'--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or file', required=req) parser.add_argument('-mf',", "dest='input', metavar='<directory> or <file path>', help='Input file (NIfTI-1, *.nii.gz or *.nii) or directory", "on %b %d, %Y (%Z)' time_start = time.strftime(time_format) mmfn.alert_user(\"Start time is {0} ...\".format(time_start))", "Output {0} is a directory ...\".format(args.input)) args.output_directory = os.path.normpath(os.path.join(args.output)) else: print \"Specified output", "help='Number of classes (no TPM inputs)', type=int) parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0 < float <", "# use wildcards if provided a directory alone args.mask_name_filter_exact = args.mn_filter args.mask_directory =", "add_arg_mask(self, parser, req=False): # Mask arguments parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask", "of strings as file names from a directory, or from a given column", "of names [current]', required=req) parser.add_argument('-lf', '--list_filter', dest='list_filter', metavar='<string>', help=\"String used to filter list", "on each file', required=False) return parser def loop_args(self, parser): parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>',", "creating it ...\".format(args.output) args.output_directory = os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print \"No output directory specified.", "greatest dimension.') # parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle axis orientations,", "default='') return parser def add_arg_list(self, parser, req=False): parser.add_argument('-l', '--list', dest='list', metavar='<directory> or <.CSV", "really, but just in case...\"\"\" # for name in args.__dict__.keys(): # try: #", "+ '*' # use wildcards if provided a directory alone args.input_name_filter_exact = args.in_filter", "same manner as their correspondingly-named files in input_directory. (As per \"secondary\" in FSL", "UK), <EMAIL>' __created__ = '2015-06-28' def notify_start(): \"\"\"Give the user some helpful notes", "# Get the filename, removing path and 1+ extensions args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2", "dest='max_iter', metavar='<int>', help='Maximum number of iterations [100]', default=100, type=int) parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0 <", "will be oriented as per the single input # if input is a", "file', required=False) top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter files", "dest='rf_gstd', metavar='<float>', help='Prior gaussian regularization [2.0]', default=2.0, type=float) return parser def sanitise_arguments(self, args):", "relevant files. self.args.input_files_list = mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter) if self.args.reverse_input: self.args.input_files_list = self.args.input_files_list[::-1] #", "be compared with an atlas and the rest will have the same gross", "parser.add_argument('-r', '--repeat', dest='repeat_path', metavar='<repeat NIfTI file path>', help='Full path of the repeat NIfTI", "NIfTI image', required=True) parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path', metavar='<baseline mask NIfTI file path>', help='Full path", "to standard space ------------------------------- python mousemorph.py orient <arguments> Non-uniformity correct brains ----------------------------- python", "2 {0} is a file ...\".format(args.input)) # Get the filename, removing path and", "= \"\"\"mousemorph <command> [<arguments>], where <command> can be any of: \\n\\textract, \\n\\torient, \\n\\tnuc,", "of input directories (or files) and combine all input files into the same", "top_parser.add_argument('-par', '--parallel', dest='parallel', action=\"store_true\", help='Use multiple cores to process in parallel using multiprocessing", "= self.add_arg_mask(parser, req=True) parser.add_argument('-t', '--tpm', dest='tpm', metavar='<tpm directory> or <tpm file>', help='TPM directory", "= self.add_arg_list(parser, req=False) parser.add_argument('-col', '--column', dest='column', metavar='<int>', help='Column number [0]', default=0, type=int, required=False)", "self.args.input_name_filter, self.args.in_ext_filter) if self.args.reverse_input: self.args.input_files_list = self.args.input_files_list[::-1] # Run print(\"MouseMorph {0} will be", "else: # break # return args def add_arg_csv(self, parser, req=False): parser.add_argument('-csv', '--csv', dest='csv_path',", "parser, req=False): parser.add_argument('-csv', '--csv', dest='csv_path', metavar='<.CSV file path>', help='Path to .CSV file', required=req)", "the filename, removing path and 1+ extensions args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory = os.path.dirname(args.mask)", "parser.add_argument('--allsame', dest='allsame', action='store_true', help='Flag to indicate that all brains are in approximately the", "-arg2 -arg3 'param3'] action2 [-arg1 'param1' -arg2] action3 [-arg1 -arg2] # Run any", "top_parser.add_argument('-ds', '--downsample', dest='downsample', metavar='<factor>', help='Downsampling factor [0, off]\\n\\t(Downsampling input files may speed up", "args.mask_directory = os.path.dirname(args.mask) else: args.mask_directory = None if hasattr(args, 'tpm'): if args.tpm: args.tpm", "acceptable = ['one', 'two'] if str not in acceptable: raise argparse.ArgumentTypeError(\"--tails argument must", "to the end of the string. The user shouldn't supply that \"\\\", really,", "\"No output directory specified. Setting to input directory ({0}) in case it is", "relaxation factor [0.5]', default=0.5, type=float) parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>', help='Prior gaussian regularization [2.0]', default=2.0,", "help='Desired significance level, alpha [0.05]', default=0.05, type=float) parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0 < float <", "single file.', required=True) parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on each", "output files.') parser.add_argument('--allsame', dest='allsame', action='store_true', help='Flag to indicate that all brains are in", "inputs)', type=int) parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0 < float < 1>', help='MRF prior strength [0.4]',", "if not args.no_output: if args.output: if os.path.isdir(args.output): # print (\" Output {0} is", "metavar='<repeat mask NIfTI file path>', help='Full path of the repeat NIfTI image mask',", "AP is not the greatest dimension.') return parser def nuc_args(self, parser): # Add", "{0} is a directory ...\".format(args.input)) args.output_directory = os.path.normpath(os.path.join(args.output)) else: print \"Specified output ({0})", "wildcards if provided a directory alone args.tpm_name_filter_exact = args.tn_filter args.tpm_directory = os.path.normpath(os.path.join(args.tpm)) elif", "dest='allsame', action='store_true', help='Flag to indicate that all brains are in approximately the same", "for def nuc(self): import mm_nuc_n4 mm_nuc_n4.go(self.args) def orient(self): import mm_orient mm_orient.go(self.args) def pair(self):", "to run on each file', required=False) return parser def loop_args(self, parser): parser.add_argument('-fn', '--function',", "top_parser.add_argument('-o', '--output', dest='output', metavar='<directory>', help='Output directory [input directory]') top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>', help='Prepend this", "string to output name', default=\"\") top_parser.add_argument('-ona', '--out_name_append', metavar='<string>', help='Append this string to output", "following it, use as a list of input directories (or files) and combine", "import subprocess from itertools import chain from datetime import datetime # import test_go", "will be compared with an atlas and the rest will have the same", "use as a list of input directories (or files) and combine all input", "Python. Usage python mousemorph.py --help Orient brains to standard space ------------------------------- python mousemorph.py", "= os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm) else: args.tpm_directory = None # Either get a", "are all single 4D NIfTIs rather than individual files per class', required=False) parser.add_argument('--nopriors',", "args.output_directory = args.input_directory if hasattr(args, 'input_2'): if args.input_2: if os.path.isdir(args.input_2): # print (\"", "provided a directory alone args.tpm_name_filter_exact = args.tn_filter args.tpm_directory = os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm): #", "metavar='<corresponding>', help='NIfTI-1 file, or directory of files, to be oriented in the same", "parser with universal arguments top_parser = argparse.ArgumentParser(\"MouseMorph user input parser\", description = \"MouseMorph,", "dest='column', metavar='<int>', help='Column number [0]', default=0, type=int, required=False) parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory> or <file", "factor [0, off]\\n\\t(Downsampling input files may speed up processing, at the expense of", "is a directory and there is a filter here, corresponding images in dir", "as per the single input # if input is a single image and", "args.in_ext_filter = '.nii*' if not args.unzip: args.ext = '.nii.gz' else: args.ext = '.nii'", "rest will have the same gross orientation applied. Final minor corrections will be", "all 12 possible principle axis orientations, in case AP is not the greatest", "dest='in_filter', metavar='<filter>', help=\"If input is a directory, filter files ['']\", default='') # Processing", "orientation. Only the first image will be compared with an atlas and the", "metavar='<function>', help='MouseMorph function to run on each file', required=False) return parser def loop_args(self,", "output top_parser.add_argument('-i', '--input', dest='input', metavar='<directory> or <file path>', help='Input file (NIfTI-1, *.nii.gz or", "brains ----------------------------- python mousemorph.py nuc <arguments> \"\"\" # To do # ----- #", "into the same list. Likewise for other directory arguments. # corresponding == FSL", "metavar='<factor>', help='Subsampling factor [4]', default=4, type=int) parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>', help='Number of levels [4]',", "all input files into the same list. Likewise for other directory arguments. #", "the rest will have the same gross orientation applied. Final minor corrections will", "NIfTI file path>', help='Full path of the repeat NIfTI image', required=True) parser.add_argument('-bm', '--baseline_mask',", "save output files [False]') top_parser.add_argument('-ow', '--overwrite', action='store_true', help='Overwrite existing output files [skip]') top_parser.add_argument('-dt',", "is not a directory; creating it ...\".format(args.output) args.output_directory = os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print", "two, if omitted)\") else: return str def power_args(self, parser): parser.add_argument('--power', dest='power', metavar='<0 <", "an atlas and the rest will have the same gross orientation applied. Final", "reverse order') top_parser.add_argument('-v', '--verbose', action=\"store_true\", help=\"Verbose output\") # Add command-specific arguments parser =", "argparse assumes was intentional and adds the final user quote to the end", "top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter files ['']\", default='')", "Input {0} is a directory ...\".format(args.input)) args.input_name_filter = '*' + args.in_filter + '*'", "'--out_name_prepend', metavar='<string>', help='Prepend this string to output name', default=\"\") top_parser.add_argument('-ona', '--out_name_append', metavar='<string>', help='Append", "mask is a directory, filter files ['']\", default='') return parser def add_arg_list(self, parser,", "parser): parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on each file', required=False)", "possible principle axis orientations, in case AP is not the greatest dimension.') return", "os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory = os.path.dirname(args.mask) else: args.mask_directory = None if hasattr(args, 'tpm'): if args.tpm:", "nuc(self): import mm_nuc_n4 mm_nuc_n4.go(self.args) def orient(self): import mm_orient mm_orient.go(self.args) def pair(self): import mm_pair", "completion [False]') # Mask arguments top_parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>',", ".CSV file whose Nth column will be used as a list of names", "__init__(self): command_parser = argparse.ArgumentParser(\"argp_mousemorph\", description = \"MouseMorph, an automatic mouse MR image processor.\",", "add_arg_list(self, parser, req=False): parser.add_argument('-l', '--list', dest='list', metavar='<directory> or <.CSV file path>', help='Either a", "os.path.isdir(args.input_2): # print (\" Input 2 {0} is a directory ...\".format(args.input)) args.input_name_filter_2 =", "NIfTIs rather than individual files per class', required=False) parser.add_argument('--nopriors', dest='nopriors', metavar='<int>', help='Number of", "args.tpm_directory = os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm): # Get the filename, removing path and 1+", "metavar='<filter>', help=\"If input is a directory, filter files ['']\", default='') # Processing options", "'--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on each file', required=False) return parser", "dest='downsample', metavar='<factor>', help='Downsampling factor [0, off]\\n\\t(Downsampling input files may speed up processing, at", "possible principle axis orientations, in case AP is not the greatest dimension.') #", "oriented in the same manner as their correspondingly-named files in input_directory. (As per", "path of the baseline NIfTI image mask', required=True) parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path', metavar='<repeat mask", "a file ...\".format(args.input)) # Get the filename, removing path and 1+ extensions args.input_name_filter", "help='Subsampling factor [4]', default=4, type=int) parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>', help='Number of levels [4]', default=4,", "help='Mask directory or file', required=req) parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a", "be performed individually.') parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle axis orientations,", "a directory, filter files ['']\", default='') parser.add_argument('--priors4D', dest='priors4D', action='store_true', help='Use this flag if", "*.nii) or directory [current]') top_parser.add_argument('-o', '--output', dest='output', metavar='<directory>', help='Output directory [input directory]') top_parser.add_argument('-onp',", "'tpm'): if args.tpm: args.tpm = os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter = '*' + args.tn_filter", "end of the string. The user shouldn't supply that \"\\\", really, but just", "'%H:%M:%S %p on %b %d, %Y (%Z)' time_start = time.strftime(time_format) mmfn.alert_user(\"Start time is", "default='two', type=self.tails_type) parser = self.add_arg_csv(parser, req=True) parser.add_argument('--group', dest='csv_group_filter', metavar='<string>', help='Control group name filter", "{0} is a file ...\".format(args.input)) # Get the filename, removing path and 1+", "first_arg.command)() notify_complete(time_start) # def sanitise_arguments(self, args): # \"\"\"Windows: only necessary if the user", "args.ext = '.nii.gz' else: args.ext = '.nii' if args.input: if os.path.isdir(args.input): # print", "upon completion [False]') # Mask arguments top_parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask", "action=\"store_true\", help='Run through input files in reverse order') top_parser.add_argument('-v', '--verbose', action=\"store_true\", help=\"Verbose output\")", "default=\"\") top_parser.add_argument('-ona', '--out_name_append', metavar='<string>', help='Append this string to output name', default=\"\") top_parser.add_argument('-uz', '--unzip',", "required=req) parser.add_argument('-lf', '--list_filter', dest='list_filter', metavar='<string>', help=\"String used to filter list input ['*']\", default='*')", "else: print \"Specified output ({0}) is not a directory; creating it ...\".format(args.output) args.output_directory", "parser, req=False): parser.add_argument('-l', '--list', dest='list', metavar='<directory> or <.CSV file path>', help='Either a directory", "default=0.4, type=float) parser.add_argument('--max_iter', dest='max_iter', metavar='<int>', help='Maximum number of iterations [100]', default=100, type=int) parser.add_argument('--rf_rel',", "files may speed up processing, at the expense of accuracy.)', default=0, type=float) top_parser.add_argument('-par',", "or 'two' (default is two, if omitted)\") else: return str def power_args(self, parser):", "default=256, type=int) return parser def tails_type(self, str): acceptable = ['one', 'two'] if str", "'input_2'): if args.input_2: if os.path.isdir(args.input_2): # print (\" Input 2 {0} is a", "FLIRT.)') parser.add_argument('-res','--resample', dest='resample', action='store_true', help='Also resample output files.') parser.add_argument('--allsame', dest='allsame', action='store_true', help='Flag to", "parser = self.add_arg_csv(parser, req=True) parser.add_argument('--group', dest='csv_group_filter', metavar='<string>', help='Control group name filter [*]', default='')", "{0}\".format(args.input)) else: args.input_directory = os.getcwd() if not args.no_output: if args.output: if os.path.isdir(args.output): #", "levels [4]', default=4, type=int) parser.add_argument('-conv', dest='convergence', metavar='<convergence>', help='Convergence threshold [0.001]', default=0.001, type=float) parser.add_argument('-nhb',", "mm_pair.go(self.args) def power(self): import mm_powercalc mm_powercalc.go(self.args) def loop(self): import mm_loop mm_loop.go(self.args) def seg_EM(self):", "{0} is a directory ...\".format(args.input)) args.input_name_filter_2 = '*' + args.in_filter + '*' #", "args def add_arg_csv(self, parser, req=False): parser.add_argument('-csv', '--csv', dest='csv_path', metavar='<.CSV file path>', help='Path to", "args.mask_directory = None if hasattr(args, 'tpm'): if args.tpm: args.tpm = os.path.normpath(args.tpm) if os.path.isdir(args.tpm):", "return parser def add_arg_mask(self, parser, req=False): # Mask arguments parser.add_argument('-m', '--mask', dest='mask', metavar='<mask", "dest='baseline_path', metavar='<baseline NIfTI file path>', help='Full path of the baseline NIfTI image', required=True)", "print (\" Input {0} is a directory ...\".format(args.input)) args.input_name_filter = '*' + args.in_filter", "args.in_filter + '*' # use wildcards if provided a directory alone args.input_name_filter_exact =", "directory [input directory]') top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>', help='Prepend this string to output name', default=\"\")", "oriented as per the single input # if input is a single image", "Mask arguments top_parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or", "the filename, removing path and 1+ extensions args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm)", "args.input_name_filter = '*' + args.in_filter + '*' # use wildcards if provided a", "= command_parser.parse_args(sys.argv[1:2]) if not hasattr(self, first_arg.command): print(\"command '{0}' not recognised.\".format(first_arg.command)) command_parser.print_help() sys.exit(1) #", "be run.\") first_arg = command_parser.parse_args(sys.argv[1:2]) if not hasattr(self, first_arg.command): print(\"command '{0}' not recognised.\".format(first_arg.command))", "os.path.isfile(args.input): # print (\" Input {0} is a file ...\".format(args.input)) # Get the", "passed to MouseMorph programs. \"\"\" def __init__(self): command_parser = argparse.ArgumentParser(\"argp_mousemorph\", description = \"MouseMorph,", "parser def seg_EM_args(self, parser): # parser = self.add_arg_mask(parser, req=True) parser.add_argument('-t', '--tpm', dest='tpm', metavar='<tpm", "does not exist: {0}\".format(args.input_2)) else: args.input_directory_2 = os.getcwd() if hasattr(args, 'mask'): if args.mask:", "add_arg_csv(self, parser, req=False): parser.add_argument('-csv', '--csv', dest='csv_path', metavar='<.CSV file path>', help='Path to .CSV file',", "one argument following it, use as a list of input directories (or files)", "= args.in_filter args.input_directory_2 = os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2): # print (\" Input 2 {0}", "necessary arguments passed to MouseMorph programs. \"\"\" def __init__(self): command_parser = argparse.ArgumentParser(\"argp_mousemorph\", description", "level, alpha [0.05]', default=0.05, type=float) parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0 < float < 1>', help='Fractional", "\"secondary\" in FSL FLIRT.)') parser.add_argument('-res','--resample', dest='resample', action='store_true', help='Also resample output files.') parser.add_argument('--allsame', dest='allsame',", "= '%H:%M:%S %p on %b %d, %Y (%Z)' time_stop = time.strftime(time_format) time_diff =", "action1 [-arg1 -arg2 -arg3 'param3'] action2 [-arg1 'param1' -arg2] action3 [-arg1 -arg2] #", "containing NIfTIs, or a single file.', required=True) parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function", "metavar='<repeat NIfTI file path>', help='Full path of the repeat NIfTI image', required=True) parser.add_argument('-bm',", "single 4D NIfTIs rather than individual files per class', required=False) parser.add_argument('--nopriors', dest='nopriors', metavar='<int>',", "default=4, type=int) parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>', help='Number of levels [4]', default=4, type=int) parser.add_argument('-conv', dest='convergence',", "[0.15]', default=0.15, type=float) parser.add_argument('-ss', dest='subsample', metavar='<factor>', help='Subsampling factor [4]', default=4, type=int) parser.add_argument('-nlevels', dest='nlevels',", "< float < 1>', help='MRF prior strength [0.4]', default=0.4, type=float) parser.add_argument('--max_iter', dest='max_iter', metavar='<int>',", "str not in acceptable: raise argparse.ArgumentTypeError(\"--tails argument must be 'one' or 'two' (default", "# Get the filename, removing path and 1+ extensions args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory", "the filter will be oriented as per the single input # if input", "Replace mm_multi.py with: run mousemorph.py multi action1 [-arg1 -arg2 -arg3 'param3'] action2 [-arg1", "parser.parse_args(sys.argv[2:]) # Sanitise arguments & directories self.args = self.sanitise_arguments(self.args) if self.args.input: # Pre-populate", "the greatest dimension.') return parser def nuc_args(self, parser): # Add specific arguments parser.add_argument('-its','--iterations',", "file names from a directory, or from a given column of a .CSV", "%d, %Y (%Z)' time_stop = time.strftime(time_format) time_diff = datetime.strptime(time_stop, time_format) - datetime.strptime(time_start, time_format)", "directory arguments. # corresponding == FSL FLIRT's \"secondary\" # adapt to accept -corr", "(As per \"secondary\" in FSL FLIRT.)') parser.add_argument('-res','--resample', dest='resample', action='store_true', help='Also resample output files.')", "parser.add_argument('-tf', '--tpm_filter', dest='tn_filter', metavar='<filter>', help=\"If tpm is a directory, filter files ['']\", default='')", "# Get the filename, removing path and 1+ extensions args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory", "help='Input file (NIfTI-1, *.nii.gz or *.nii) or directory [current]') top_parser.add_argument('-o', '--output', dest='output', metavar='<directory>',", "mm_orient mm_orient.go(self.args) def pair(self): import mm_pair mm_pair.go(self.args) def power(self): import mm_powercalc mm_powercalc.go(self.args) def", "if provided a directory alone args.input_name_filter_exact_2 = args.in_filter args.input_directory_2 = os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2):", "file path>', help='Full path of the baseline NIfTI image mask', required=True) parser.add_argument('-rm', '--repeat_mask',", "parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory> or <file path>', help='Second input directory containing NIfTIs, or a", "parser.add_argument('-l', '--list', dest='list', metavar='<directory> or <.CSV file path>', help='Either a directory containing files", "principle axis orientations, in case AP is not the greatest dimension.') return parser", "mm_orient.go(self.args) def pair(self): import mm_pair mm_pair.go(self.args) def power(self): import mm_powercalc mm_powercalc.go(self.args) def loop(self):", "dest='input_2', metavar='<directory> or <file path>', help='Second input directory containing NIfTIs, or a single", "dest='tn_filter', metavar='<filter>', help=\"If tpm is a directory, filter files ['']\", default='') parser.add_argument('--priors4D', dest='priors4D',", "[False]') top_parser.add_argument('-ow', '--overwrite', action='store_true', help='Overwrite existing output files [skip]') top_parser.add_argument('-dt', '--delete_temp', action='store_true', help='Delete", "supply that \"\\\", really, but just in case...\"\"\" # for name in args.__dict__.keys():", "dest='atlas', metavar='<atlas>', help='Atlas directory containing NIfTIs, or a single file.', required=False) parser.add_argument('-corr','--corresponding', dest='corresponding',", "mm = MouseMorph() # print(\"{0}\".format(mm.__dict__)) print(\"{0}\".format(mm.args)) if __name__ == '__main__': main() # End", "matched, or a .CSV file whose Nth column will be used as a", "args.ext = '.nii' if args.input: if os.path.isdir(args.input): # print (\" Input {0} is", "12 possible principle axis orientations, in case AP is not the greatest dimension.')", "print (\" Output {0} is a directory ...\".format(args.input)) args.output_directory = os.path.normpath(os.path.join(args.output)) else: print", "= os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)): #", "default=1, type=int, required=req) return parser def add_arg_mask(self, parser, req=False): # Mask arguments parser.add_argument('-m',", "compared with an atlas and the rest will have the same gross orientation", "if input is a single image and there is a filter here, all", "float < 1>', help='Desired significance level, alpha [0.05]', default=0.05, type=float) parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0", "[200]', default=200, type=int) parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>', help='Full width, half maximum [0.15]', default=0.15, type=float)", "parser = self.add_arg_mask(parser, req=True) parser.add_argument('-t', '--tpm', dest='tpm', metavar='<tpm directory> or <tpm file>', help='TPM", "(default is two, if omitted)\") else: return str def power_args(self, parser): parser.add_argument('--power', dest='power',", "%d, %Y (%Z)' time_start = time.strftime(time_format) mmfn.alert_user(\"Start time is {0} ...\".format(time_start)) return time_start", "# else: # break # return args def add_arg_csv(self, parser, req=False): parser.add_argument('-csv', '--csv',", "mousemorph.py nuc <arguments> \"\"\" # To do # ----- # 1. if -i", "a file ...\".format(args.input)) # Get the filename, removing path and 1+ extensions args.input_name_filter_2", "be oriented in the same manner as their correspondingly-named files in input_directory. (As", "= ['one', 'two'] if str not in acceptable: raise argparse.ArgumentTypeError(\"--tails argument must be", "path>', help='Either a directory containing files whose names are to be matched, or", "1>', help='Desired power, 1-beta [0.8]', default=0.8, type=float) parser.add_argument('--significance', dest='significance', metavar='<0 < float <", "files [compressed]') top_parser.add_argument('-no', '--no_output', dest='no_output', action='store_true', help='Don\\'t save output files [False]') top_parser.add_argument('-ow', '--overwrite',", "# for name in args.__dict__.keys(): # try: # s = getattr(args, name) #", "files per class', required=False) parser.add_argument('--nopriors', dest='nopriors', metavar='<int>', help='Number of classes (no TPM inputs)',", "args # Methods which actually run the command the user asked for def", "time_stop = time.strftime(time_format) time_diff = datetime.strptime(time_stop, time_format) - datetime.strptime(time_start, time_format) mmfn.alert_user(\"Completion time is", "help='Atlas directory containing NIfTIs, or a single file.', required=False) parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>', help='NIfTI-1", "< float < 1>', help='Desired power, 1-beta [0.8]', default=0.8, type=float) parser.add_argument('--significance', dest='significance', metavar='<0", "files. self.args.input_files_list = mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter) if self.args.reverse_input: self.args.input_files_list = self.args.input_files_list[::-1] # Run", "%Y (%Z)' time_start = time.strftime(time_format) mmfn.alert_user(\"Start time is {0} ...\".format(time_start)) return time_start def", "a directory alone args.input_name_filter_exact = args.in_filter args.input_directory = os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input): # print", "def seg_EM_args(self, parser): # parser = self.add_arg_mask(parser, req=True) parser.add_argument('-t', '--tpm', dest='tpm', metavar='<tpm directory>", "the expense of accuracy.)', default=0, type=float) top_parser.add_argument('-par', '--parallel', dest='parallel', action=\"store_true\", help='Use multiple cores", "dest='detect_difference', metavar='<0 < float < 1>', help='Fractional difference from the control mean to", "def notify_complete(time_start=None, log_location=''): time_format = '%H:%M:%S %p on %b %d, %Y (%Z)' time_stop", "req=True) parser.add_argument('-t', '--tpm', dest='tpm', metavar='<tpm directory> or <tpm file>', help='TPM directory or file',", "parser\", description = \"MouseMorph, an automatic mouse MR image processor.\", usage = \"mousemorph", "the greatest dimension.') # parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle axis", "mousemorph.py multi action1 [-arg1 -arg2 -arg3 'param3'] action2 [-arg1 'param1' -arg2] action3 [-arg1", "parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>', help='Atlas directory containing NIfTIs, or a single file.', required=False) parser.add_argument('-corr','--corresponding',", "def sanitise_arguments(self, args): \"\"\" \"\"\" args.in_ext_filter = '.nii*' if not args.unzip: args.ext =", "is a single image and there's no filter here, all images in dir", "extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)): # args.column return args # Methods which actually run", "else: raise Exception(\"Input 2 not recognised or does not exist: {0}\".format(args.input_2)) else: args.input_directory_2", "def nuc_args(self, parser): # Add specific arguments parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>', help='Iterations to run", "path and 1+ extensions args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory = os.path.dirname(args.mask) else: args.mask_directory =", "argparse.ArgumentParser(\"argp_mousemorph\", description = \"MouseMorph, an automatic mouse MR image processor.\", usage = \"\"\"mousemorph", "dest='nlevels', metavar='<nlevels>', help='Number of levels [4]', default=4, type=int) parser.add_argument('-conv', dest='convergence', metavar='<convergence>', help='Convergence threshold", "only necessary if the user has supplied directories ending with a \"\\\" (os.sep),", "'param1' -arg2] action3 [-arg1 -arg2] # Run any script from this one import", "just in case...\"\"\" # for name in args.__dict__.keys(): # try: # s =", "print \"Specified output ({0}) is not a directory; creating it ...\".format(args.output) args.output_directory =", "gross orientation applied. Final minor corrections will be performed individually.') parser.add_argument('--allpa', dest='allpa', action='store_true',", "mm_multi.py with: run mousemorph.py multi action1 [-arg1 -arg2 -arg3 'param3'] action2 [-arg1 'param1'", "parser.add_argument('--priors4D', dest='priors4D', action='store_true', help='Use this flag if the priors are all single 4D", "description = \"MouseMorph, an automatic mouse MR image processor.\", usage = \"mousemorph <command>", "difference from the control mean to detect', default=0.25, type=float) parser.add_argument('--tails', dest='tails', metavar='<string>', help='Tails,", "a \"\\\" (os.sep), which argparse assumes was intentional and adds the final user", "name', default=\"\") top_parser.add_argument('-uz', '--unzip', action='store_true', help='Uncompressed output files [compressed]') top_parser.add_argument('-no', '--no_output', dest='no_output', action='store_true',", "self.args.input_files_list = self.args.input_files_list[::-1] # Run print(\"MouseMorph {0} will be run with arguments: \\n\\t{1}\".format(first_arg.command,", "asked for def nuc(self): import mm_nuc_n4 mm_nuc_n4.go(self.args) def orient(self): import mm_orient mm_orient.go(self.args) def", "1+ extensions args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory = os.path.dirname(args.mask) else: args.mask_directory = None if", "a directory alone args.tpm_name_filter_exact = args.tn_filter args.tpm_directory = os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm): # Get", "os.path.dirname(args.input) else: raise Exception(\"Input not recognised or does not exist: {0}\".format(args.input)) else: args.input_directory", "if hasattr(args, 'tpm'): if args.tpm: args.tpm = os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter = '*'", "file>', help='Mask directory or file', required=req) parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is", "classes (no TPM inputs)', type=int) parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0 < float < 1>', help='MRF", "directory; creating it ...\".format(args.output) args.output_directory = os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print \"No output directory", "greatest dimension.') return parser def nuc_args(self, parser): # Add specific arguments parser.add_argument('-its','--iterations', dest='iterations',", "used as a list of names [current]', required=req) parser.add_argument('-lf', '--list_filter', dest='list_filter', metavar='<string>', help=\"String", "file path>', help='Full path of the repeat NIfTI image mask', required=True) return parser", "which actually run the command the user asked for def nuc(self): import mm_nuc_n4", "per class', required=False) parser.add_argument('--nopriors', dest='nopriors', metavar='<int>', help='Number of classes (no TPM inputs)', type=int)", "not in acceptable: raise argparse.ArgumentTypeError(\"--tails argument must be 'one' or 'two' (default is", "but just in case...\"\"\" # for name in args.__dict__.keys(): # try: # s", "return parser def nuc_args(self, parser): # Add specific arguments parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>', help='Iterations", "args.mn_filter args.mask_directory = os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask): # Get the filename, removing path and", "# if input is a single image and there's no filter here, all", "type=float) parser.add_argument('--max_iter', dest='max_iter', metavar='<int>', help='Maximum number of iterations [100]', default=100, type=int) parser.add_argument('--rf_rel', dest='rf_rel',", "removing path and 1+ extensions args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0] args.input_directory = os.path.dirname(args.input) else: raise", "or a single file.', required=True) parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run", "help='Number of levels [4]', default=4, type=int) parser.add_argument('-conv', dest='convergence', metavar='<convergence>', help='Convergence threshold [0.001]', default=0.001,", "baseline NIfTI image', required=True) parser.add_argument('-r', '--repeat', dest='repeat_path', metavar='<repeat NIfTI file path>', help='Full path", "\"\"\" args.in_ext_filter = '.nii*' if not args.unzip: args.ext = '.nii.gz' else: args.ext =", "else: print \"No output directory specified. Setting to input directory ({0}) in case", "help='NIfTI-1 file, or directory of files, to be oriented in the same manner", "names are to be matched, or a .CSV file whose Nth column will", "file.', required=True) parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on each file',", "intentional and adds the final user quote to the end of the string.", "parser = self.add_arg_list(parser, req=False) parser.add_argument('-col', '--column', dest='column', metavar='<int>', help='Column number [0]', default=0, type=int,", "time_format) - datetime.strptime(time_start, time_format) mmfn.alert_user(\"Completion time is {0}. Took {1}.\".format(time_stop, time_diff)) return class", "action='store_true', help='Don\\'t save output files [False]') top_parser.add_argument('-ow', '--overwrite', action='store_true', help='Overwrite existing output files", "processing, at the expense of accuracy.)', default=0, type=float) top_parser.add_argument('-par', '--parallel', dest='parallel', action=\"store_true\", help='Use", "to process in parallel using multiprocessing [off]') top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input', action=\"store_true\", help='Run through", "input directories (or files) and combine all input files into the same list.", "os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2) else: raise Exception(\"Input 2 not recognised or does not", "alone args.input_name_filter_exact_2 = args.in_filter args.input_directory_2 = os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2): # print (\" Input", "# Sanitise arguments & directories self.args = self.sanitise_arguments(self.args) if self.args.input: # Pre-populate a", "individually.') parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle axis orientations, in case", "args.output: if os.path.isdir(args.output): # print (\" Output {0} is a directory ...\".format(args.input)) args.output_directory", "metavar='<int>', help='Maximum number of iterations [100]', default=100, type=int) parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0 < float", "required=True) parser.add_argument('-r', '--repeat', dest='repeat_path', metavar='<repeat NIfTI file path>', help='Full path of the repeat", "axis orientations, in case AP is not the greatest dimension.') # parser.add_argument('--allpa', dest='allpa',", "will be oriented # Replace mm_multi.py with: run mousemorph.py multi action1 [-arg1 -arg2", "is not the greatest dimension.') return parser def nuc_args(self, parser): # Add specific", "self.args.reverse_input: self.args.input_files_list = self.args.input_files_list[::-1] # Run print(\"MouseMorph {0} will be run with arguments:", "1>', help='Prior relaxation factor [0.5]', default=0.5, type=float) parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>', help='Prior gaussian regularization", "prior strength [0.4]', default=0.4, type=float) parser.add_argument('--max_iter', dest='max_iter', metavar='<int>', help='Maximum number of iterations [100]',", "regularization [2.0]', default=2.0, type=float) return parser def sanitise_arguments(self, args): \"\"\" \"\"\" args.in_ext_filter =", "------------------------------- python mousemorph.py orient <arguments> Non-uniformity correct brains ----------------------------- python mousemorph.py nuc <arguments>", "image and there is a filter here, all images in dir matching the", "ending with a \"\\\" (os.sep), which argparse assumes was intentional and adds the", "filter here, corresponding images in dir will be oriented # if input is", "Input {0} is a file ...\".format(args.input)) # Get the filename, removing path and", "metavar='<baseline mask NIfTI file path>', help='Full path of the baseline NIfTI image mask',", "the repeat NIfTI image mask', required=True) return parser def orient_args(self, parser): # Add", "names from a directory, or from a given column of a .CSV file", "os.path.isfile(args.tpm): # Get the filename, removing path and 1+ extensions args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0]", "maximum [0.15]', default=0.15, type=float) parser.add_argument('-ss', dest='subsample', metavar='<factor>', help='Subsampling factor [4]', default=4, type=int) parser.add_argument('-nlevels',", "if hasattr(args, 'list'): if args.list: args.list = os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list(args.list,", "user asked for def nuc(self): import mm_nuc_n4 mm_nuc_n4.go(self.args) def orient(self): import mm_orient mm_orient.go(self.args)", "class', required=False) parser.add_argument('--nopriors', dest='nopriors', metavar='<int>', help='Number of classes (no TPM inputs)', type=int) parser.add_argument('--mrf_beta',", "-arg2] action3 [-arg1 -arg2] # Run any script from this one import os", "the same list. Likewise for other directory arguments. # corresponding == FSL FLIRT's", "{0}\".format(args.input_2)) else: args.input_directory_2 = os.getcwd() if hasattr(args, 'mask'): if args.mask: args.mask = os.path.normpath(args.mask)", "metavar='<nlevels>', help='Number of levels [4]', default=4, type=int) parser.add_argument('-conv', dest='convergence', metavar='<convergence>', help='Convergence threshold [0.001]',", "action='store_true', help='Use this flag if the priors are all single 4D NIfTIs rather", "else: raise Exception(\"Input not recognised or does not exist: {0}\".format(args.input)) else: args.input_directory =", "the first image will be compared with an atlas and the rest will", "self.args = parser.parse_args(sys.argv[2:]) # Sanitise arguments & directories self.args = self.sanitise_arguments(self.args) if self.args.input:", "case...\"\"\" # for name in args.__dict__.keys(): # try: # s = getattr(args, name)", "or directory [current]') top_parser.add_argument('-o', '--output', dest='output', metavar='<directory>', help='Output directory [input directory]') top_parser.add_argument('-onp', '--out_name_prepend',", "Final minor corrections will be performed individually.') parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12", "Methods which actually run the command the user asked for def nuc(self): import", "'--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter files ['']\", default='') #", "help=\"Verbose output\") # Add command-specific arguments parser = getattr(self, first_arg.command + '_args')(top_parser) self.args", "notify_complete(time_start=None, log_location=''): time_format = '%H:%M:%S %p on %b %d, %Y (%Z)' time_stop =", "Top-level parser with universal arguments top_parser = argparse.ArgumentParser(\"MouseMorph user input parser\", description =", "parser def add_arg_list(self, parser, req=False): parser.add_argument('-l', '--list', dest='list', metavar='<directory> or <.CSV file path>',", "mmfn.check_create_directories([args.output_directory]) else: print \"No output directory specified. Setting to input directory ({0}) in", "from a given column of a .CSV file if hasattr(args, 'list'): if args.list:", "the user asked for def nuc(self): import mm_nuc_n4 mm_nuc_n4.go(self.args) def orient(self): import mm_orient", "as file names from a directory, or from a given column of a", "'--out_name_append', metavar='<string>', help='Append this string to output name', default=\"\") top_parser.add_argument('-uz', '--unzip', action='store_true', help='Uncompressed", "parser def bsi_args(self, parser): \"\"\"These are the somewhat-unique requirements for BSI.\"\"\" parser.add_argument('-b', '--baseline',", "default='') # Filters top_parser.add_argument('-if', '--in_filter', dest='in_filter', metavar='<filter>', help=\"If input is a directory, filter", "file path>', help='Full path of the repeat NIfTI image', required=True) parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path',", "path and 1+ extensions args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2) else: raise Exception(\"Input", "or <mask file>', help='Mask directory or file', required=False) top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If", "# Input and output top_parser.add_argument('-i', '--input', dest='input', metavar='<directory> or <file path>', help='Input file", "Run any script from this one import os import sys import glob import", "parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0 < float < 1>', help='Prior relaxation factor [0.5]', default=0.5, type=float)", "MouseMorph program. Run it with Python. Usage python mousemorph.py --help Orient brains to", "does not exist: {0}\".format(args.input)) else: args.input_directory = os.getcwd() if not args.no_output: if args.output:", "= parser.parse_args(sys.argv[2:]) # Sanitise arguments & directories self.args = self.sanitise_arguments(self.args) if self.args.input: #", "Orient brains to standard space ------------------------------- python mousemorph.py orient <arguments> Non-uniformity correct brains", "launch of the script.\"\"\" time_format = '%H:%M:%S %p on %b %d, %Y (%Z)'", "mm_functions as mmfn __author__ = '<NAME> (PhD student, CMIC & CABI, UCL, UK),", "be run with arguments: \\n\\t{1}\".format(first_arg.command, vars(self.args))) time_start = notify_start() getattr(self, first_arg.command)() notify_complete(time_start) #", "CABI, UCL, UK), <EMAIL>' __created__ = '2015-06-28' def notify_start(): \"\"\"Give the user some", "if provided a directory alone args.input_name_filter_exact = args.in_filter args.input_directory = os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input):", "type=int, required=req) return parser def add_arg_mask(self, parser, req=False): # Mask arguments parser.add_argument('-m', '--mask',", "'--verbose', action=\"store_true\", help=\"Verbose output\") # Add command-specific arguments parser = getattr(self, first_arg.command +", "detect', default=0.25, type=float) parser.add_argument('--tails', dest='tails', metavar='<string>', help='Tails, one or two [two]', default='two', type=self.tails_type)", "datetime.strptime(time_start, time_format) mmfn.alert_user(\"Completion time is {0}. Took {1}.\".format(time_stop, time_diff)) return class MouseMorph(object): \"\"\"Define", "is required.\".format(args.input_directory) args.output_directory = args.input_directory if hasattr(args, 'input_2'): if args.input_2: if os.path.isdir(args.input_2): #", "or <file path>', help='Input file (NIfTI-1, *.nii.gz or *.nii) or directory [current]') top_parser.add_argument('-o',", "two [two]', default='two', type=self.tails_type) parser = self.add_arg_csv(parser, req=True) parser.add_argument('--group', dest='csv_group_filter', metavar='<string>', help='Control group", "help='MouseMorph function to run on each file', required=False) return parser def loop_args(self, parser):", "top_parser.add_argument('-dt', '--delete_temp', action='store_true', help='Delete temp files upon completion [False]') # Mask arguments top_parser.add_argument('-m',", "metavar='<function>', help='MouseMorph function to run on each file', required=False) return parser def seg_EM_args(self,", "file.', required=False) parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>', help='NIfTI-1 file, or directory of files, to be", "top_parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or file', required=False)", "first_arg.command + '_args')(top_parser) self.args = parser.parse_args(sys.argv[2:]) # Sanitise arguments & directories self.args =", "directory alone args.input_name_filter_exact = args.in_filter args.input_directory = os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input): # print (\"", "be oriented # if input is a directory and there is a filter", "directory> or <tpm file>', help='TPM directory or file', required=False) parser.add_argument('-tf', '--tpm_filter', dest='tn_filter', metavar='<filter>',", "help=\"If mask is a directory, filter files ['']\", default='') # Filters top_parser.add_argument('-if', '--in_filter',", "# try: # s = getattr(args, name) # setattr(args, name, s.rstrip(os.sep).rstrip('\"')) # except", "help='Fractional difference from the control mean to detect', default=0.25, type=float) parser.add_argument('--tails', dest='tails', metavar='<string>',", "files [skip]') top_parser.add_argument('-dt', '--delete_temp', action='store_true', help='Delete temp files upon completion [False]') # Mask", "orientation applied. Final minor corrections will be performed individually.') parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check", "'*' # use wildcards if provided a directory alone args.mask_name_filter_exact = args.mn_filter args.mask_directory", "= args.input_directory if hasattr(args, 'input_2'): if args.input_2: if os.path.isdir(args.input_2): # print (\" Input", "1>', help='MRF prior strength [0.4]', default=0.4, type=float) parser.add_argument('--max_iter', dest='max_iter', metavar='<int>', help='Maximum number of", "the launch of the script.\"\"\" time_format = '%H:%M:%S %p on %b %d, %Y", "accuracy.)', default=0, type=float) top_parser.add_argument('-par', '--parallel', dest='parallel', action=\"store_true\", help='Use multiple cores to process in", "(no TPM inputs)', type=int) parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0 < float < 1>', help='MRF prior", "sanitise_arguments(self, args): \"\"\" \"\"\" args.in_ext_filter = '.nii*' if not args.unzip: args.ext = '.nii.gz'", "import mm_loop mm_loop.go(self.args) def seg_EM(self): import mm_seg_EM_group mm_seg_EM_group.go(self.args) def main(): mm = MouseMorph()", "2 {0} is a directory ...\".format(args.input)) args.input_name_filter_2 = '*' + args.in_filter + '*'", "args.list = os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)):", "\"\"\"mousemorph <command> [<arguments>], where <command> can be any of: \\n\\textract, \\n\\torient, \\n\\tnuc, \\n\\tintstan,", "from the control mean to detect', default=0.25, type=float) parser.add_argument('--tails', dest='tails', metavar='<string>', help='Tails, one", "alone args.tpm_name_filter_exact = args.tn_filter args.tpm_directory = os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm): # Get the filename,", "\"Specified output ({0}) is not a directory; creating it ...\".format(args.output) args.output_directory = os.path.normpath(os.path.join(args.output))", "os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print \"No output directory specified. Setting to input directory ({0})", "args.mask_name_filter = '*' + args.mn_filter + '*' # use wildcards if provided a", "an automatic mouse MR image processor This is the base MouseMorph program. Run", "output files [False]') top_parser.add_argument('-ow', '--overwrite', action='store_true', help='Overwrite existing output files [skip]') top_parser.add_argument('-dt', '--delete_temp',", "directory, filter files ['']\", default='') parser.add_argument('--priors4D', dest='priors4D', action='store_true', help='Use this flag if the", "path and 1+ extensions args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm) else: args.tpm_directory =", "is a directory, filter files ['']\", default='') parser.add_argument('--priors4D', dest='priors4D', action='store_true', help='Use this flag", "file whose Nth column will be used as a list of names [current]',", "multiple cores to process in parallel using multiprocessing [off]') top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input', action=\"store_true\",", "per \"secondary\" in FSL FLIRT.)') parser.add_argument('-res','--resample', dest='resample', action='store_true', help='Also resample output files.') parser.add_argument('--allsame',", "-arg3 'param3'] action2 [-arg1 'param1' -arg2] action3 [-arg1 -arg2] # Run any script", "return class MouseMorph(object): \"\"\"Define all the necessary arguments passed to MouseMorph programs. \"\"\"", "user quote to the end of the string. The user shouldn't supply that", "else: args.tpm_directory = None # Either get a list of strings as file", "default=100, type=int) parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0 < float < 1>', help='Prior relaxation factor [0.5]',", "or does not exist: {0}\".format(args.input_2)) else: args.input_directory_2 = os.getcwd() if hasattr(args, 'mask'): if", "= self.add_arg_csv(parser, req=True) parser.add_argument('--group', dest='csv_group_filter', metavar='<string>', help='Control group name filter [*]', default='') return", "= None # Either get a list of strings as file names from", "# Get the filename, removing path and 1+ extensions args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0] args.input_directory", "def add_arg_list(self, parser, req=False): parser.add_argument('-l', '--list', dest='list', metavar='<directory> or <.CSV file path>', help='Either", "per the single input # if input is a directory and there's no", "a list of relevant files. self.args.input_files_list = mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter) if self.args.reverse_input: self.args.input_files_list", "list of names [current]', required=req) parser.add_argument('-lf', '--list_filter', dest='list_filter', metavar='<string>', help=\"String used to filter", "power_args(self, parser): parser.add_argument('--power', dest='power', metavar='<0 < float < 1>', help='Desired power, 1-beta [0.8]',", "metavar='<string>', help=\"String used to filter list input ['*']\", default='*') return parser def bsi_args(self,", "file if hasattr(args, 'list'): if args.list: args.list = os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names =", "dest='allpa', action='store_true', help='Check all 12 possible principle axis orientations, in case AP is", "< float < 1>', help='Prior relaxation factor [0.5]', default=0.5, type=float) parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>',", "directory, filter files ['']\", default='') # Filters top_parser.add_argument('-if', '--in_filter', dest='in_filter', metavar='<filter>', help=\"If input", "it, use as a list of input directories (or files) and combine all", "orientations, in case AP is not the greatest dimension.') return parser def nuc_args(self,", "parser def pair_args(self, parser): parser = self.add_arg_list(parser, req=False) parser.add_argument('-col', '--column', dest='column', metavar='<int>', help='Column", "Processing options top_parser.add_argument('-ds', '--downsample', dest='downsample', metavar='<factor>', help='Downsampling factor [0, off]\\n\\t(Downsampling input files may", "group name filter [*]', default='') return parser def pair_args(self, parser): parser = self.add_arg_list(parser,", "top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>', help='Prepend this string to output name', default=\"\") top_parser.add_argument('-ona', '--out_name_append', metavar='<string>',", "to indicate that all brains are in approximately the same initial orientation. Only", "number(s) [0 1]', default=1, type=int, required=req) return parser def add_arg_mask(self, parser, req=False): #", "# return args def add_arg_csv(self, parser, req=False): parser.add_argument('-csv', '--csv', dest='csv_path', metavar='<.CSV file path>',", "path>', help='Full path of the baseline NIfTI image', required=True) parser.add_argument('-r', '--repeat', dest='repeat_path', metavar='<repeat", "image', required=True) parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path', metavar='<baseline mask NIfTI file path>', help='Full path of", "names [current]', required=req) parser.add_argument('-lf', '--list_filter', dest='list_filter', metavar='<string>', help=\"String used to filter list input", "(os.sep), which argparse assumes was intentional and adds the final user quote to", "arguments & directories self.args = self.sanitise_arguments(self.args) if self.args.input: # Pre-populate a list of", "hasattr(args, 'mask'): if args.mask: args.mask = os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter = '*' +", "not recognised or does not exist: {0}\".format(args.input)) else: args.input_directory = os.getcwd() if not", "parser.add_argument('-col', '--column', dest='column', metavar='<int int ... int>', nargs='+', help='Column number(s) [0 1]', default=1,", "= os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2) else: raise Exception(\"Input 2 not recognised or does", "print \"No output directory specified. Setting to input directory ({0}) in case it", "user shouldn't supply that \"\\\", really, but just in case...\"\"\" # for name", "[current]', required=req) parser.add_argument('-lf', '--list_filter', dest='list_filter', metavar='<string>', help=\"String used to filter list input ['*']\",", "command the user asked for def nuc(self): import mm_nuc_n4 mm_nuc_n4.go(self.args) def orient(self): import", "or directory of files, to be oriented in the same manner as their", "<.CSV file path>', help='Either a directory containing files whose names are to be", "in parallel using multiprocessing [off]') top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input', action=\"store_true\", help='Run through input files", "files in reverse order') top_parser.add_argument('-v', '--verbose', action=\"store_true\", help=\"Verbose output\") # Add command-specific arguments", "run.\") first_arg = command_parser.parse_args(sys.argv[1:2]) if not hasattr(self, first_arg.command): print(\"command '{0}' not recognised.\".format(first_arg.command)) command_parser.print_help()", "and combine all input files into the same list. Likewise for other directory", "= os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter = '*' + args.tn_filter + '*' # use", "['']\", default='') parser.add_argument('--priors4D', dest='priors4D', action='store_true', help='Use this flag if the priors are all", "dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or file', required=False) top_parser.add_argument('-mf', '--mask_filter',", "This is the base MouseMorph program. Run it with Python. Usage python mousemorph.py", "= os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm): # Get the filename, removing path and 1+ extensions", "return args def add_arg_csv(self, parser, req=False): parser.add_argument('-csv', '--csv', dest='csv_path', metavar='<.CSV file path>', help='Path", "# corresponding == FSL FLIRT's \"secondary\" # adapt to accept -corr [dir] [filter]", "are to be matched, or a .CSV file whose Nth column will be", "'.nii.gz' else: args.ext = '.nii' if args.input: if os.path.isdir(args.input): # print (\" Input", "all images in dir matching the filter will be oriented as per the", "half maximum [0.15]', default=0.15, type=float) parser.add_argument('-ss', dest='subsample', metavar='<factor>', help='Subsampling factor [4]', default=4, type=int)", "parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle axis orientations, in case AP", "= os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask): # Get the filename, removing path and 1+ extensions", "repeat NIfTI image', required=True) parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path', metavar='<baseline mask NIfTI file path>', help='Full", "# except AttributeError: # pass # else: # break # return args def", "this string to output name', default=\"\") top_parser.add_argument('-ona', '--out_name_append', metavar='<string>', help='Append this string to", "with an atlas and the rest will have the same gross orientation applied.", "metavar='<filter>', help=\"If mask is a directory, filter files ['']\", default='') # Filters top_parser.add_argument('-if',", "< 1>', help='MRF prior strength [0.4]', default=0.4, type=float) parser.add_argument('--max_iter', dest='max_iter', metavar='<int>', help='Maximum number", "than individual files per class', required=False) parser.add_argument('--nopriors', dest='nopriors', metavar='<int>', help='Number of classes (no", "MR image processor This is the base MouseMorph program. Run it with Python.", "images in dir will be oriented as per the single input # if", "one import os import sys import glob import time import argparse import subprocess", "default=0.001, type=float) parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>', help='Number of histogram bins [256]', default=256, type=int) return", "<EMAIL>' __created__ = '2015-06-28' def notify_start(): \"\"\"Give the user some helpful notes at", "in case AP is not the greatest dimension.') # parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check", "# ----- # 1. if -i has more than one argument following it,", "args.input_directory if hasattr(args, 'input_2'): if args.input_2: if os.path.isdir(args.input_2): # print (\" Input 2", "'*' # use wildcards if provided a directory alone args.tpm_name_filter_exact = args.tn_filter args.tpm_directory", "parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>', help='Iterations to run [200]', default=200, type=int) parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>', help='Full", "def power_args(self, parser): parser.add_argument('--power', dest='power', metavar='<0 < float < 1>', help='Desired power, 1-beta", "if -i has more than one argument following it, use as a list", "elif os.path.isfile(args.mask): # Get the filename, removing path and 1+ extensions args.mask_name_filter =", "help='Full width, half maximum [0.15]', default=0.15, type=float) parser.add_argument('-ss', dest='subsample', metavar='<factor>', help='Subsampling factor [4]',", "def sanitise_arguments(self, args): # \"\"\"Windows: only necessary if the user has supplied directories", "files) and combine all input files into the same list. Likewise for other", "({0}) in case it is required.\".format(args.input_directory) args.output_directory = args.input_directory if hasattr(args, 'input_2'): if", "in case...\"\"\" # for name in args.__dict__.keys(): # try: # s = getattr(args,", "temp files upon completion [False]') # Mask arguments top_parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory>", "default='*') return parser def bsi_args(self, parser): \"\"\"These are the somewhat-unique requirements for BSI.\"\"\"", "in dir which also matching the filter will be oriented # Replace mm_multi.py", "a single image and there's no filter here, all images in dir will", "the single input # if input is a directory and there's no filter", "metavar='<float>', help='Prior gaussian regularization [2.0]', default=2.0, type=float) return parser def sanitise_arguments(self, args): \"\"\"", "oriented # Replace mm_multi.py with: run mousemorph.py multi action1 [-arg1 -arg2 -arg3 'param3']", "# use wildcards if provided a directory alone args.tpm_name_filter_exact = args.tn_filter args.tpm_directory =", "[4]', default=4, type=int) parser.add_argument('-conv', dest='convergence', metavar='<convergence>', help='Convergence threshold [0.001]', default=0.001, type=float) parser.add_argument('-nhb', dest='nhistbins',", "in FSL FLIRT.)') parser.add_argument('-res','--resample', dest='resample', action='store_true', help='Also resample output files.') parser.add_argument('--allsame', dest='allsame', action='store_true',", "str def power_args(self, parser): parser.add_argument('--power', dest='power', metavar='<0 < float < 1>', help='Desired power,", "def add_arg_mask(self, parser, req=False): # Mask arguments parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or", "metavar='<0 < float < 1>', help='Desired power, 1-beta [0.8]', default=0.8, type=float) parser.add_argument('--significance', dest='significance',", "help='Use this flag if the priors are all single 4D NIfTIs rather than", "top_parser.add_argument('-no', '--no_output', dest='no_output', action='store_true', help='Don\\'t save output files [False]') top_parser.add_argument('-ow', '--overwrite', action='store_true', help='Overwrite", "first image will be compared with an atlas and the rest will have", "this string to output name', default=\"\") top_parser.add_argument('-uz', '--unzip', action='store_true', help='Uncompressed output files [compressed]')", "#!/usr/bin/python \"\"\"MouseMorph, an automatic mouse MR image processor This is the base MouseMorph", "it with Python. Usage python mousemorph.py --help Orient brains to standard space -------------------------------", "to be matched, or a .CSV file whose Nth column will be used", "args.tn_filter args.tpm_directory = os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm): # Get the filename, removing path and", "to run [200]', default=200, type=int) parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>', help='Full width, half maximum [0.15]',", "a directory ...\".format(args.input)) args.input_name_filter = '*' + args.in_filter + '*' # use wildcards", "of the baseline NIfTI image mask', required=True) parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path', metavar='<repeat mask NIfTI", "default='') parser.add_argument('--priors4D', dest='priors4D', action='store_true', help='Use this flag if the priors are all single", "def orient_args(self, parser): # Add specific arguments parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>', help='Atlas directory containing", "# print (\" Input 2 {0} is a file ...\".format(args.input)) # Get the", "To do # ----- # 1. if -i has more than one argument", "directory and there's no filter here, corresponding images in dir will be oriented", "wildcards if provided a directory alone args.input_name_filter_exact = args.in_filter args.input_directory = os.path.normpath(os.path.join(args.input)) elif", "def tails_type(self, str): acceptable = ['one', 'two'] if str not in acceptable: raise", "def pair(self): import mm_pair mm_pair.go(self.args) def power(self): import mm_powercalc mm_powercalc.go(self.args) def loop(self): import", "({0}) is not a directory; creating it ...\".format(args.output) args.output_directory = os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else:", "file', required=req) parser.add_argument('-col', '--column', dest='column', metavar='<int int ... int>', nargs='+', help='Column number(s) [0", "type=int, required=False) parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory> or <file path>', help='Second input directory containing NIfTIs,", "help='TPM directory or file', required=False) parser.add_argument('-tf', '--tpm_filter', dest='tn_filter', metavar='<filter>', help=\"If tpm is a", "in acceptable: raise argparse.ArgumentTypeError(\"--tails argument must be 'one' or 'two' (default is two,", "an automatic mouse MR image processor.\", usage = \"\"\"mousemorph <command> [<arguments>], where <command>", "there is a filter here, all images in dir matching the filter will", "single file.', required=False) parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>', help='NIfTI-1 file, or directory of files, to", "[2.0]', default=2.0, type=float) return parser def sanitise_arguments(self, args): \"\"\" \"\"\" args.in_ext_filter = '.nii*'", "if args.output: if os.path.isdir(args.output): # print (\" Output {0} is a directory ...\".format(args.input))", "[current]') top_parser.add_argument('-o', '--output', dest='output', metavar='<directory>', help='Output directory [input directory]') top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>', help='Prepend", "dir will be oriented # if input is a directory and there is", "action='store_true', help='Uncompressed output files [compressed]') top_parser.add_argument('-no', '--no_output', dest='no_output', action='store_true', help='Don\\'t save output files", "dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter files ['']\", default='') return parser", "corresponding images in dir which also matching the filter will be oriented #", "parser def orient_args(self, parser): # Add specific arguments parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>', help='Atlas directory", "# parser = self.add_arg_mask(parser, req=True) parser.add_argument('-t', '--tpm', dest='tpm', metavar='<tpm directory> or <tpm file>',", "is a directory, filter files ['']\", default='') # Processing options top_parser.add_argument('-ds', '--downsample', dest='downsample',", "< 1>', help='Prior relaxation factor [0.5]', default=0.5, type=float) parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>', help='Prior gaussian", "sanitise_arguments(self, args): # \"\"\"Windows: only necessary if the user has supplied directories ending", "['one', 'two'] if str not in acceptable: raise argparse.ArgumentTypeError(\"--tails argument must be 'one'", "help='Desired power, 1-beta [0.8]', default=0.8, type=float) parser.add_argument('--significance', dest='significance', metavar='<0 < float < 1>',", "parser.add_argument('-csv', '--csv', dest='csv_path', metavar='<.CSV file path>', help='Path to .CSV file', required=req) parser.add_argument('-col', '--column',", "default=0.25, type=float) parser.add_argument('--tails', dest='tails', metavar='<string>', help='Tails, one or two [two]', default='two', type=self.tails_type) parser", "metavar='<directory> or <file path>', help='Input file (NIfTI-1, *.nii.gz or *.nii) or directory [current]')", "per the single input # if input is a single image and there", "usage = \"\"\"mousemorph <command> [<arguments>], where <command> can be any of: \\n\\textract, \\n\\torient,", "file', required=False) parser.add_argument('-tf', '--tpm_filter', dest='tn_filter', metavar='<filter>', help=\"If tpm is a directory, filter files", "image mask', required=True) parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path', metavar='<repeat mask NIfTI file path>', help='Full path", "parser.add_argument('--nopriors', dest='nopriors', metavar='<int>', help='Number of classes (no TPM inputs)', type=int) parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0", "req=False): parser.add_argument('-csv', '--csv', dest='csv_path', metavar='<.CSV file path>', help='Path to .CSV file', required=req) parser.add_argument('-col',", "a directory and there is a filter here, corresponding images in dir which", "wildcards if provided a directory alone args.mask_name_filter_exact = args.mn_filter args.mask_directory = os.path.normpath(os.path.join(args.mask)) elif", "of relevant files. self.args.input_files_list = mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter) if self.args.reverse_input: self.args.input_files_list = self.args.input_files_list[::-1]", "path of the baseline NIfTI image', required=True) parser.add_argument('-r', '--repeat', dest='repeat_path', metavar='<repeat NIfTI file", "file', required=False) return parser def seg_EM_args(self, parser): # parser = self.add_arg_mask(parser, req=True) parser.add_argument('-t',", "'--repeat_mask', dest='repeat_mask_path', metavar='<repeat mask NIfTI file path>', help='Full path of the repeat NIfTI", "this one import os import sys import glob import time import argparse import", "help='Run through input files in reverse order') top_parser.add_argument('-v', '--verbose', action=\"store_true\", help=\"Verbose output\") #", "s.rstrip(os.sep).rstrip('\"')) # except AttributeError: # pass # else: # break # return args", "'list'): if args.list: args.list = os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter)", "first_arg = command_parser.parse_args(sys.argv[1:2]) if not hasattr(self, first_arg.command): print(\"command '{0}' not recognised.\".format(first_arg.command)) command_parser.print_help() sys.exit(1)", "# Add command-specific arguments parser = getattr(self, first_arg.command + '_args')(top_parser) self.args = parser.parse_args(sys.argv[2:])", "\"MouseMorph, an automatic mouse MR image processor.\", usage = \"\"\"mousemorph <command> [<arguments>], where", "script from this one import os import sys import glob import time import", "multiprocessing [off]') top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input', action=\"store_true\", help='Run through input files in reverse order')", "factor [4]', default=4, type=int) parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>', help='Number of levels [4]', default=4, type=int)", "args): \"\"\" \"\"\" args.in_ext_filter = '.nii*' if not args.unzip: args.ext = '.nii.gz' else:", "'two'] if str not in acceptable: raise argparse.ArgumentTypeError(\"--tails argument must be 'one' or", "os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter = '*' + args.tn_filter + '*' # use wildcards", "mm_seg_EM_group.go(self.args) def main(): mm = MouseMorph() # print(\"{0}\".format(mm.__dict__)) print(\"{0}\".format(mm.args)) if __name__ == '__main__':", "parser, req=False): # Mask arguments parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>',", "Add specific arguments parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>', help='Atlas directory containing NIfTIs, or a single", "help=\"If tpm is a directory, filter files ['']\", default='') parser.add_argument('--priors4D', dest='priors4D', action='store_true', help='Use", "provided a directory alone args.input_name_filter_exact = args.in_filter args.input_directory = os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input): #", "or <.CSV file path>', help='Either a directory containing files whose names are to", "args.output_directory = os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print \"No output directory specified. Setting to input", "universal arguments top_parser = argparse.ArgumentParser(\"MouseMorph user input parser\", description = \"MouseMorph, an automatic", "and output top_parser.add_argument('-i', '--input', dest='input', metavar='<directory> or <file path>', help='Input file (NIfTI-1, *.nii.gz", "corrections will be performed individually.') parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle", "arguments top_parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or file',", "files whose names are to be matched, or a .CSV file whose Nth", "files.') parser.add_argument('--allsame', dest='allsame', action='store_true', help='Flag to indicate that all brains are in approximately", "Sanitise arguments & directories self.args = self.sanitise_arguments(self.args) if self.args.input: # Pre-populate a list", "not hasattr(self, first_arg.command): print(\"command '{0}' not recognised.\".format(first_arg.command)) command_parser.print_help() sys.exit(1) # Top-level parser with", "nargs='+', help='Column number(s) [0 1]', default=1, type=int, required=req) return parser def add_arg_mask(self, parser,", "user input parser\", description = \"MouseMorph, an automatic mouse MR image processor.\", usage", "directory, filter files ['']\", default='') # Processing options top_parser.add_argument('-ds', '--downsample', dest='downsample', metavar='<factor>', help='Downsampling", "metavar='<directory> or <.CSV file path>', help='Either a directory containing files whose names are", "1]', default=1, type=int, required=req) return parser def add_arg_mask(self, parser, req=False): # Mask arguments", "metavar='<string>', help='Tails, one or two [two]', default='two', type=self.tails_type) parser = self.add_arg_csv(parser, req=True) parser.add_argument('--group',", "if hasattr(args, 'mask'): if args.mask: args.mask = os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter = '*'", "input directory ({0}) in case it is required.\".format(args.input_directory) args.output_directory = args.input_directory if hasattr(args,", "dest='baseline_mask_path', metavar='<baseline mask NIfTI file path>', help='Full path of the baseline NIfTI image", "\"\"\"MouseMorph, an automatic mouse MR image processor This is the base MouseMorph program.", "= os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter = '*' + args.mn_filter + '*' # use", "'--column', dest='column', metavar='<int int ... int>', nargs='+', help='Column number(s) [0 1]', default=1, type=int,", "other directory arguments. # corresponding == FSL FLIRT's \"secondary\" # adapt to accept", "+ args.in_filter + '*' # use wildcards if provided a directory alone args.input_name_filter_exact_2", "to detect', default=0.25, type=float) parser.add_argument('--tails', dest='tails', metavar='<string>', help='Tails, one or two [two]', default='two',", "final user quote to the end of the string. The user shouldn't supply", "parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on each file', required=False) return", "all the necessary arguments passed to MouseMorph programs. \"\"\" def __init__(self): command_parser =", "args.input_directory_2 = os.getcwd() if hasattr(args, 'mask'): if args.mask: args.mask = os.path.normpath(args.mask) if os.path.isdir(args.mask):", "# Replace mm_multi.py with: run mousemorph.py multi action1 [-arg1 -arg2 -arg3 'param3'] action2", "NIfTI file path>', help='Full path of the baseline NIfTI image', required=True) parser.add_argument('-r', '--repeat',", "<command> [<arguments>]\") # Input and output top_parser.add_argument('-i', '--input', dest='input', metavar='<directory> or <file path>',", "expense of accuracy.)', default=0, type=float) top_parser.add_argument('-par', '--parallel', dest='parallel', action=\"store_true\", help='Use multiple cores to", "the string. The user shouldn't supply that \"\\\", really, but just in case...\"\"\"", "[-arg1 -arg2 -arg3 'param3'] action2 [-arg1 'param1' -arg2] action3 [-arg1 -arg2] # Run", "'--no_output', dest='no_output', action='store_true', help='Don\\'t save output files [False]') top_parser.add_argument('-ow', '--overwrite', action='store_true', help='Overwrite existing", "column of a .CSV file if hasattr(args, 'list'): if args.list: args.list = os.path.normpath(args.list)", "all images in dir will be oriented as per the single input #", "run on each file', required=False) return parser def seg_EM_args(self, parser): # parser =", "extensions args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory = os.path.dirname(args.mask) else: args.mask_directory = None if hasattr(args,", "required=False) parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>', help='NIfTI-1 file, or directory of files, to be oriented", "filter here, all images in dir matching the filter will be oriented as", "of a .CSV file if hasattr(args, 'list'): if args.list: args.list = os.path.normpath(args.list) if", "student, CMIC & CABI, UCL, UK), <EMAIL>' __created__ = '2015-06-28' def notify_start(): \"\"\"Give", "output files [compressed]') top_parser.add_argument('-no', '--no_output', dest='no_output', action='store_true', help='Don\\'t save output files [False]') top_parser.add_argument('-ow',", "parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0 < float < 1>', help='MRF prior strength [0.4]', default=0.4, type=float)", "# To do # ----- # 1. if -i has more than one", "try: # s = getattr(args, name) # setattr(args, name, s.rstrip(os.sep).rstrip('\"')) # except AttributeError:", "command_parser.add_argument('command', help=\"MouseMorph program to be run.\") first_arg = command_parser.parse_args(sys.argv[1:2]) if not hasattr(self, first_arg.command):", "be 'one' or 'two' (default is two, if omitted)\") else: return str def", "will be run with arguments: \\n\\t{1}\".format(first_arg.command, vars(self.args))) time_start = notify_start() getattr(self, first_arg.command)() notify_complete(time_start)", "args.unzip: args.ext = '.nii.gz' else: args.ext = '.nii' if args.input: if os.path.isdir(args.input): #", "# Processing options top_parser.add_argument('-ds', '--downsample', dest='downsample', metavar='<factor>', help='Downsampling factor [0, off]\\n\\t(Downsampling input files", "as a list of input directories (or files) and combine all input files", "...\".format(args.output) args.output_directory = os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print \"No output directory specified. Setting to", "print (\" Input {0} is a file ...\".format(args.input)) # Get the filename, removing", "FSL FLIRT.)') parser.add_argument('-res','--resample', dest='resample', action='store_true', help='Also resample output files.') parser.add_argument('--allsame', dest='allsame', action='store_true', help='Flag", "parser def add_arg_mask(self, parser, req=False): # Mask arguments parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory>", "path>', help='Full path of the repeat NIfTI image mask', required=True) return parser def", "type=int) parser.add_argument('-conv', dest='convergence', metavar='<convergence>', help='Convergence threshold [0.001]', default=0.001, type=float) parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>', help='Number", "Only the first image will be compared with an atlas and the rest", "the script.\"\"\" time_format = '%H:%M:%S %p on %b %d, %Y (%Z)' time_start =", "[two]', default='two', type=self.tails_type) parser = self.add_arg_csv(parser, req=True) parser.add_argument('--group', dest='csv_group_filter', metavar='<string>', help='Control group name", "of: \\n\\textract, \\n\\torient, \\n\\tnuc, \\n\\tintstan, \\n\\t...\"\"\") command_parser.add_argument('command', help=\"MouseMorph program to be run.\") first_arg", "dest='list_filter', metavar='<string>', help=\"String used to filter list input ['*']\", default='*') return parser def", "input files into the same list. Likewise for other directory arguments. # corresponding", "script.\"\"\" time_format = '%H:%M:%S %p on %b %d, %Y (%Z)' time_start = time.strftime(time_format)", "args.no_output: if args.output: if os.path.isdir(args.output): # print (\" Output {0} is a directory", "no filter here, corresponding images in dir will be oriented # if input", "# pass # else: # break # return args def add_arg_csv(self, parser, req=False):", "file path>', help='Path to .CSV file', required=req) parser.add_argument('-col', '--column', dest='column', metavar='<int int ...", "'mask'): if args.mask: args.mask = os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter = '*' + args.mn_filter", "arguments passed to MouseMorph programs. \"\"\" def __init__(self): command_parser = argparse.ArgumentParser(\"argp_mousemorph\", description =", "UCL, UK), <EMAIL>' __created__ = '2015-06-28' def notify_start(): \"\"\"Give the user some helpful", "a directory ...\".format(args.input)) args.output_directory = os.path.normpath(os.path.join(args.output)) else: print \"Specified output ({0}) is not", "...\".format(args.input)) args.output_directory = os.path.normpath(os.path.join(args.output)) else: print \"Specified output ({0}) is not a directory;", "import os import sys import glob import time import argparse import subprocess from", "(%Z)' time_stop = time.strftime(time_format) time_diff = datetime.strptime(time_stop, time_format) - datetime.strptime(time_start, time_format) mmfn.alert_user(\"Completion time", "exist: {0}\".format(args.input)) else: args.input_directory = os.getcwd() if not args.no_output: if args.output: if os.path.isdir(args.output):", "and 1+ extensions args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm) else: args.tpm_directory = None", "orient(self): import mm_orient mm_orient.go(self.args) def pair(self): import mm_pair mm_pair.go(self.args) def power(self): import mm_powercalc", "\"\"\" # To do # ----- # 1. if -i has more than", "type=int) parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>', help='Number of levels [4]', default=4, type=int) parser.add_argument('-conv', dest='convergence', metavar='<convergence>',", "image and there's no filter here, all images in dir will be oriented", "list. Likewise for other directory arguments. # corresponding == FSL FLIRT's \"secondary\" #", "removing path and 1+ extensions args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2) else: raise", "and there's no filter here, all images in dir will be oriented as", "a filter here, all images in dir matching the filter will be oriented", "adapt to accept -corr [dir] [filter] # if input is a single image", "default=0.5, type=float) parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>', help='Prior gaussian regularization [2.0]', default=2.0, type=float) return parser", "corresponding == FSL FLIRT's \"secondary\" # adapt to accept -corr [dir] [filter] #", "'*' + args.in_filter + '*' # use wildcards if provided a directory alone", "args.tpm_name_filter = '*' + args.tn_filter + '*' # use wildcards if provided a", "filename, removing path and 1+ extensions args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm) else:", "top_parser.add_argument('-ow', '--overwrite', action='store_true', help='Overwrite existing output files [skip]') top_parser.add_argument('-dt', '--delete_temp', action='store_true', help='Delete temp", "some helpful notes at the launch of the script.\"\"\" time_format = '%H:%M:%S %p", "directory of files, to be oriented in the same manner as their correspondingly-named", "as a list of names [current]', required=req) parser.add_argument('-lf', '--list_filter', dest='list_filter', metavar='<string>', help=\"String used", "args.tpm_directory = os.path.dirname(args.tpm) else: args.tpm_directory = None # Either get a list of", "= os.path.dirname(args.input_2) else: raise Exception(\"Input 2 not recognised or does not exist: {0}\".format(args.input_2))", "order') top_parser.add_argument('-v', '--verbose', action=\"store_true\", help=\"Verbose output\") # Add command-specific arguments parser = getattr(self,", "if self.args.reverse_input: self.args.input_files_list = self.args.input_files_list[::-1] # Run print(\"MouseMorph {0} will be run with", "os.path.dirname(args.tpm) else: args.tpm_directory = None # Either get a list of strings as", "is two, if omitted)\") else: return str def power_args(self, parser): parser.add_argument('--power', dest='power', metavar='<0", "time_start def notify_complete(time_start=None, log_location=''): time_format = '%H:%M:%S %p on %b %d, %Y (%Z)'", "None if hasattr(args, 'tpm'): if args.tpm: args.tpm = os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter =", "datetime import datetime # import test_go import mm_functions as mmfn __author__ = '<NAME>", "args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory = os.path.dirname(args.mask) else: args.mask_directory = None if hasattr(args, 'tpm'):", "performed individually.') parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle axis orientations, in", "args.input_2: if os.path.isdir(args.input_2): # print (\" Input 2 {0} is a directory ...\".format(args.input))", "metavar='<mask directory> or <mask file>', help='Mask directory or file', required=req) parser.add_argument('-mf', '--mask_filter', dest='mn_filter',", "be used as a list of names [current]', required=req) parser.add_argument('-lf', '--list_filter', dest='list_filter', metavar='<string>',", "path and 1+ extensions args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0] args.input_directory = os.path.dirname(args.input) else: raise Exception(\"Input", "1+ extensions args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0] args.input_directory = os.path.dirname(args.input) else: raise Exception(\"Input not recognised", "process in parallel using multiprocessing [off]') top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input', action=\"store_true\", help='Run through input", "if os.path.isdir(args.input): # print (\" Input {0} is a directory ...\".format(args.input)) args.input_name_filter =", "dest='tpm', metavar='<tpm directory> or <tpm file>', help='TPM directory or file', required=False) parser.add_argument('-tf', '--tpm_filter',", "The user shouldn't supply that \"\\\", really, but just in case...\"\"\" # for", "top_parser.add_argument('-i', '--input', dest='input', metavar='<directory> or <file path>', help='Input file (NIfTI-1, *.nii.gz or *.nii)", "list of relevant files. self.args.input_files_list = mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter) if self.args.reverse_input: self.args.input_files_list =", "-arg2] # Run any script from this one import os import sys import", "the necessary arguments passed to MouseMorph programs. \"\"\" def __init__(self): command_parser = argparse.ArgumentParser(\"argp_mousemorph\",", "in args.__dict__.keys(): # try: # s = getattr(args, name) # setattr(args, name, s.rstrip(os.sep).rstrip('\"'))", "# Top-level parser with universal arguments top_parser = argparse.ArgumentParser(\"MouseMorph user input parser\", description", "Nth column will be used as a list of names [current]', required=req) parser.add_argument('-lf',", "-i has more than one argument following it, use as a list of", "parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>', help='Number of levels [4]', default=4, type=int) parser.add_argument('-conv', dest='convergence', metavar='<convergence>', help='Convergence", "[4]', default=4, type=int) parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>', help='Number of levels [4]', default=4, type=int) parser.add_argument('-conv',", "files ['']\", default='') # Processing options top_parser.add_argument('-ds', '--downsample', dest='downsample', metavar='<factor>', help='Downsampling factor [0,", "image processor.\", usage = \"mousemorph <command> [<arguments>]\") # Input and output top_parser.add_argument('-i', '--input',", "notify_start() getattr(self, first_arg.command)() notify_complete(time_start) # def sanitise_arguments(self, args): # \"\"\"Windows: only necessary if", "parser.add_argument('-res','--resample', dest='resample', action='store_true', help='Also resample output files.') parser.add_argument('--allsame', dest='allsame', action='store_true', help='Flag to indicate", "['']\", default='') return parser def add_arg_list(self, parser, req=False): parser.add_argument('-l', '--list', dest='list', metavar='<directory> or", "dest='output', metavar='<directory>', help='Output directory [input directory]') top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>', help='Prepend this string to", "%p on %b %d, %Y (%Z)' time_stop = time.strftime(time_format) time_diff = datetime.strptime(time_stop, time_format)", "if input is a directory and there's no filter here, corresponding images in", "'--list', dest='list', metavar='<directory> or <.CSV file path>', help='Either a directory containing files whose", "# 1. if -i has more than one argument following it, use as", "be any of: \\n\\textract, \\n\\torient, \\n\\tnuc, \\n\\tintstan, \\n\\t...\"\"\") command_parser.add_argument('command', help=\"MouseMorph program to be", "NIfTIs, or a single file.', required=False) parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>', help='NIfTI-1 file, or directory", "single input # if input is a directory and there's no filter here,", "= os.path.dirname(args.tpm) else: args.tpm_directory = None # Either get a list of strings", "time_format = '%H:%M:%S %p on %b %d, %Y (%Z)' time_stop = time.strftime(time_format) time_diff", "log_location=''): time_format = '%H:%M:%S %p on %b %d, %Y (%Z)' time_stop = time.strftime(time_format)", "def nuc(self): import mm_nuc_n4 mm_nuc_n4.go(self.args) def orient(self): import mm_orient mm_orient.go(self.args) def pair(self): import", "+ args.mn_filter + '*' # use wildcards if provided a directory alone args.mask_name_filter_exact", "image will be compared with an atlas and the rest will have the", "'--parallel', dest='parallel', action=\"store_true\", help='Use multiple cores to process in parallel using multiprocessing [off]')", "os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm): # Get the filename, removing path and 1+ extensions args.tpm_name_filter", "not recognised or does not exist: {0}\".format(args.input_2)) else: args.input_directory_2 = os.getcwd() if hasattr(args,", "list of strings as file names from a directory, or from a given", "python mousemorph.py orient <arguments> Non-uniformity correct brains ----------------------------- python mousemorph.py nuc <arguments> \"\"\"", "sys.exit(1) # Top-level parser with universal arguments top_parser = argparse.ArgumentParser(\"MouseMorph user input parser\",", "os.getcwd() if not args.no_output: if args.output: if os.path.isdir(args.output): # print (\" Output {0}", "more than one argument following it, use as a list of input directories", "setattr(args, name, s.rstrip(os.sep).rstrip('\"')) # except AttributeError: # pass # else: # break #", "mean to detect', default=0.25, type=float) parser.add_argument('--tails', dest='tails', metavar='<string>', help='Tails, one or two [two]',", "%Y (%Z)' time_stop = time.strftime(time_format) time_diff = datetime.strptime(time_stop, time_format) - datetime.strptime(time_start, time_format) mmfn.alert_user(\"Completion", "required=False) return parser def seg_EM_args(self, parser): # parser = self.add_arg_mask(parser, req=True) parser.add_argument('-t', '--tpm',", "and there is a filter here, all images in dir matching the filter", "help='MouseMorph function to run on each file', required=False) return parser def seg_EM_args(self, parser):", "extensions args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm) else: args.tpm_directory = None # Either", "os import sys import glob import time import argparse import subprocess from itertools", "action=\"store_true\", help='Use multiple cores to process in parallel using multiprocessing [off]') top_parser.add_argument('-rev', '--reverse_input',", "4D NIfTIs rather than individual files per class', required=False) parser.add_argument('--nopriors', dest='nopriors', metavar='<int>', help='Number", "dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or file', required=req) parser.add_argument('-mf', '--mask_filter',", "def __init__(self): command_parser = argparse.ArgumentParser(\"argp_mousemorph\", description = \"MouseMorph, an automatic mouse MR image", "individual files per class', required=False) parser.add_argument('--nopriors', dest='nopriors', metavar='<int>', help='Number of classes (no TPM", "oriented # if input is a directory and there is a filter here,", "metavar='<0 < float < 1>', help='MRF prior strength [0.4]', default=0.4, type=float) parser.add_argument('--max_iter', dest='max_iter',", "name in args.__dict__.keys(): # try: # s = getattr(args, name) # setattr(args, name,", "help='Flag to indicate that all brains are in approximately the same initial orientation.", "directory ...\".format(args.input)) args.input_name_filter_2 = '*' + args.in_filter + '*' # use wildcards if", "in dir matching the filter will be oriented as per the single input", "default=0.8, type=float) parser.add_argument('--significance', dest='significance', metavar='<0 < float < 1>', help='Desired significance level, alpha", "name', default=\"\") top_parser.add_argument('-ona', '--out_name_append', metavar='<string>', help='Append this string to output name', default=\"\") top_parser.add_argument('-uz',", "existing output files [skip]') top_parser.add_argument('-dt', '--delete_temp', action='store_true', help='Delete temp files upon completion [False]')", "dir which also matching the filter will be oriented # Replace mm_multi.py with:", "not recognised.\".format(first_arg.command)) command_parser.print_help() sys.exit(1) # Top-level parser with universal arguments top_parser = argparse.ArgumentParser(\"MouseMorph", "with: run mousemorph.py multi action1 [-arg1 -arg2 -arg3 'param3'] action2 [-arg1 'param1' -arg2]", "Exception(\"Input not recognised or does not exist: {0}\".format(args.input)) else: args.input_directory = os.getcwd() if", "< 1>', help='Desired significance level, alpha [0.05]', default=0.05, type=float) parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0 <", "the end of the string. The user shouldn't supply that \"\\\", really, but", "if args.tpm: args.tpm = os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter = '*' + args.tn_filter +", "1+ extensions args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm) else: args.tpm_directory = None #", "that all brains are in approximately the same initial orientation. Only the first", "= args.in_filter args.input_directory = os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input): # print (\" Input {0} is", "run with arguments: \\n\\t{1}\".format(first_arg.command, vars(self.args))) time_start = notify_start() getattr(self, first_arg.command)() notify_complete(time_start) # def", "path>', help='Second input directory containing NIfTIs, or a single file.', required=True) parser.add_argument('-fn', '--function',", "files in input_directory. (As per \"secondary\" in FSL FLIRT.)') parser.add_argument('-res','--resample', dest='resample', action='store_true', help='Also", "processor.\", usage = \"\"\"mousemorph <command> [<arguments>], where <command> can be any of: \\n\\textract,", "args.list_filter, extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)): # args.column return args # Methods which actually", "help='Either a directory containing files whose names are to be matched, or a", "return parser def add_arg_list(self, parser, req=False): parser.add_argument('-l', '--list', dest='list', metavar='<directory> or <.CSV file", "action2 [-arg1 'param1' -arg2] action3 [-arg1 -arg2] # Run any script from this", "there is a filter here, corresponding images in dir which also matching the", "use wildcards if provided a directory alone args.tpm_name_filter_exact = args.tn_filter args.tpm_directory = os.path.normpath(os.path.join(args.tpm))", "<command> [<arguments>], where <command> can be any of: \\n\\textract, \\n\\torient, \\n\\tnuc, \\n\\tintstan, \\n\\t...\"\"\")", "have the same gross orientation applied. Final minor corrections will be performed individually.')", "args.in_filter args.input_directory = os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input): # print (\" Input {0} is a", "import mm_functions as mmfn __author__ = '<NAME> (PhD student, CMIC & CABI, UCL,", "files ['']\", default='') # Filters top_parser.add_argument('-if', '--in_filter', dest='in_filter', metavar='<filter>', help=\"If input is a", "directory or file', required=req) parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory,", "help='Column number(s) [0 1]', default=1, type=int, required=req) return parser def add_arg_mask(self, parser, req=False):", "dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter files ['']\", default='') # Filters", "here, corresponding images in dir will be oriented # if input is a", "parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter files ['']\", default='')", "directory, or from a given column of a .CSV file if hasattr(args, 'list'):", "is {0}. Took {1}.\".format(time_stop, time_diff)) return class MouseMorph(object): \"\"\"Define all the necessary arguments", "automatic mouse MR image processor.\", usage = \"mousemorph <command> [<arguments>]\") # Input and", "must be 'one' or 'two' (default is two, if omitted)\") else: return str", "FLIRT's \"secondary\" # adapt to accept -corr [dir] [filter] # if input is", "args.input_name_filter_exact = args.in_filter args.input_directory = os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input): # print (\" Input {0}", "help='Full path of the baseline NIfTI image mask', required=True) parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path', metavar='<repeat", "a .CSV file whose Nth column will be used as a list of", "self.args.input_files_list = mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter) if self.args.reverse_input: self.args.input_files_list = self.args.input_files_list[::-1] # Run print(\"MouseMorph", "import test_go import mm_functions as mmfn __author__ = '<NAME> (PhD student, CMIC &", "[*]', default='') return parser def pair_args(self, parser): parser = self.add_arg_list(parser, req=False) parser.add_argument('-col', '--column',", "be oriented # Replace mm_multi.py with: run mousemorph.py multi action1 [-arg1 -arg2 -arg3", "automatic mouse MR image processor This is the base MouseMorph program. Run it", "args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2) else: raise Exception(\"Input 2 not recognised or", "a list of names [current]', required=req) parser.add_argument('-lf', '--list_filter', dest='list_filter', metavar='<string>', help=\"String used to", "help='Convergence threshold [0.001]', default=0.001, type=float) parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>', help='Number of histogram bins [256]',", "(or files) and combine all input files into the same list. Likewise for", "+ '*' # use wildcards if provided a directory alone args.input_name_filter_exact_2 = args.in_filter", "filter will be oriented as per the single input # if input is", "import glob import time import argparse import subprocess from itertools import chain from", "a directory and there's no filter here, corresponding images in dir will be", "help='Control group name filter [*]', default='') return parser def pair_args(self, parser): parser =", "(\" Output {0} is a directory ...\".format(args.input)) args.output_directory = os.path.normpath(os.path.join(args.output)) else: print \"Specified", "< 1>', help='Desired power, 1-beta [0.8]', default=0.8, type=float) parser.add_argument('--significance', dest='significance', metavar='<0 < float", "action='store_true', help='Check all 12 possible principle axis orientations, in case AP is not", "import mm_orient mm_orient.go(self.args) def pair(self): import mm_pair mm_pair.go(self.args) def power(self): import mm_powercalc mm_powercalc.go(self.args)", "help='MRF prior strength [0.4]', default=0.4, type=float) parser.add_argument('--max_iter', dest='max_iter', metavar='<int>', help='Maximum number of iterations", "options top_parser.add_argument('-ds', '--downsample', dest='downsample', metavar='<factor>', help='Downsampling factor [0, off]\\n\\t(Downsampling input files may speed", "args.in_filter args.input_directory_2 = os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2): # print (\" Input 2 {0} is", "time_diff = datetime.strptime(time_stop, time_format) - datetime.strptime(time_start, time_format) mmfn.alert_user(\"Completion time is {0}. Took {1}.\".format(time_stop,", "help='Don\\'t save output files [False]') top_parser.add_argument('-ow', '--overwrite', action='store_true', help='Overwrite existing output files [skip]')", "if os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)): # args.column return", "a list of strings as file names from a directory, or from a", "mask', required=True) return parser def orient_args(self, parser): # Add specific arguments parser.add_argument('-at','--atlas', dest='atlas',", "to output name', default=\"\") top_parser.add_argument('-uz', '--unzip', action='store_true', help='Uncompressed output files [compressed]') top_parser.add_argument('-no', '--no_output',", "Filters top_parser.add_argument('-if', '--in_filter', dest='in_filter', metavar='<filter>', help=\"If input is a directory, filter files ['']\",", "%b %d, %Y (%Z)' time_start = time.strftime(time_format) mmfn.alert_user(\"Start time is {0} ...\".format(time_start)) return", "parser.add_argument('--max_iter', dest='max_iter', metavar='<int>', help='Maximum number of iterations [100]', default=100, type=int) parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0", "is not the greatest dimension.') # parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible", "Pre-populate a list of relevant files. self.args.input_files_list = mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter) if self.args.reverse_input:", "a directory, filter files ['']\", default='') # Processing options top_parser.add_argument('-ds', '--downsample', dest='downsample', metavar='<factor>',", "number [0]', default=0, type=int, required=False) parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory> or <file path>', help='Second input", "'--input', dest='input', metavar='<directory> or <file path>', help='Input file (NIfTI-1, *.nii.gz or *.nii) or", "filename, removing path and 1+ extensions args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory = os.path.dirname(args.mask) else:", "hasattr(args, 'tpm'): if args.tpm: args.tpm = os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter = '*' +", "to accept -corr [dir] [filter] # if input is a single image and", "mouse MR image processor This is the base MouseMorph program. Run it with", "'one' or 'two' (default is two, if omitted)\") else: return str def power_args(self,", "dest='csv_path', metavar='<.CSV file path>', help='Path to .CSV file', required=req) parser.add_argument('-col', '--column', dest='column', metavar='<int", "self.add_arg_list(parser, req=False) parser.add_argument('-col', '--column', dest='column', metavar='<int>', help='Column number [0]', default=0, type=int, required=False) parser.add_argument('-i2','--input_2',", "self.args.input: # Pre-populate a list of relevant files. self.args.input_files_list = mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter)", "int ... int>', nargs='+', help='Column number(s) [0 1]', default=1, type=int, required=req) return parser", "< 1>', help='Fractional difference from the control mean to detect', default=0.25, type=float) parser.add_argument('--tails',", "raise Exception(\"Input 2 not recognised or does not exist: {0}\".format(args.input_2)) else: args.input_directory_2 =", "= '.nii*' if not args.unzip: args.ext = '.nii.gz' else: args.ext = '.nii' if", "specified. Setting to input directory ({0}) in case it is required.\".format(args.input_directory) args.output_directory =", "somewhat-unique requirements for BSI.\"\"\" parser.add_argument('-b', '--baseline', dest='baseline_path', metavar='<baseline NIfTI file path>', help='Full path", "(NIfTI-1, *.nii.gz or *.nii) or directory [current]') top_parser.add_argument('-o', '--output', dest='output', metavar='<directory>', help='Output directory", "and there's no filter here, corresponding images in dir will be oriented #", "action=\"store_true\", help=\"Verbose output\") # Add command-specific arguments parser = getattr(self, first_arg.command + '_args')(top_parser)", "['*']\", default='*') return parser def bsi_args(self, parser): \"\"\"These are the somewhat-unique requirements for", "<arguments> \"\"\" # To do # ----- # 1. if -i has more", "argument must be 'one' or 'two' (default is two, if omitted)\") else: return", "a directory alone args.mask_name_filter_exact = args.mn_filter args.mask_directory = os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask): # Get", "for BSI.\"\"\" parser.add_argument('-b', '--baseline', dest='baseline_path', metavar='<baseline NIfTI file path>', help='Full path of the", "[0.4]', default=0.4, type=float) parser.add_argument('--max_iter', dest='max_iter', metavar='<int>', help='Maximum number of iterations [100]', default=100, type=int)", "image processor.\", usage = \"\"\"mousemorph <command> [<arguments>], where <command> can be any of:", "default=0, type=float) top_parser.add_argument('-par', '--parallel', dest='parallel', action=\"store_true\", help='Use multiple cores to process in parallel", "'--list_filter', dest='list_filter', metavar='<string>', help=\"String used to filter list input ['*']\", default='*') return parser", "for other directory arguments. # corresponding == FSL FLIRT's \"secondary\" # adapt to", "req=False): # Mask arguments parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask", "sys import glob import time import argparse import subprocess from itertools import chain", "the same initial orientation. Only the first image will be compared with an", "the command the user asked for def nuc(self): import mm_nuc_n4 mm_nuc_n4.go(self.args) def orient(self):", "to .CSV file', required=req) parser.add_argument('-col', '--column', dest='column', metavar='<int int ... int>', nargs='+', help='Column", "def seg_EM(self): import mm_seg_EM_group mm_seg_EM_group.go(self.args) def main(): mm = MouseMorph() # print(\"{0}\".format(mm.__dict__)) print(\"{0}\".format(mm.args))", "dest='no_output', action='store_true', help='Don\\'t save output files [False]') top_parser.add_argument('-ow', '--overwrite', action='store_true', help='Overwrite existing output", "except AttributeError: # pass # else: # break # return args def add_arg_csv(self,", "metavar='<atlas>', help='Atlas directory containing NIfTIs, or a single file.', required=False) parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>',", "# Either get a list of strings as file names from a directory,", "file, or directory of files, to be oriented in the same manner as", "power(self): import mm_powercalc mm_powercalc.go(self.args) def loop(self): import mm_loop mm_loop.go(self.args) def seg_EM(self): import mm_seg_EM_group", "this flag if the priors are all single 4D NIfTIs rather than individual", "strings as file names from a directory, or from a given column of", "= '%H:%M:%S %p on %b %d, %Y (%Z)' time_start = time.strftime(time_format) mmfn.alert_user(\"Start time", "time_format) mmfn.alert_user(\"Completion time is {0}. Took {1}.\".format(time_stop, time_diff)) return class MouseMorph(object): \"\"\"Define all", "required=req) parser.add_argument('-col', '--column', dest='column', metavar='<int int ... int>', nargs='+', help='Column number(s) [0 1]',", "dest='nhistbins', metavar='<nhistbins>', help='Number of histogram bins [256]', default=256, type=int) return parser def tails_type(self,", "elif os.path.isfile(args.input): # print (\" Input {0} is a file ...\".format(args.input)) # Get", "Either get a list of strings as file names from a directory, or", "baseline NIfTI image mask', required=True) parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path', metavar='<repeat mask NIfTI file path>',", "path>', help='Full path of the baseline NIfTI image mask', required=True) parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path',", "< float < 1>', help='Fractional difference from the control mean to detect', default=0.25,", "parser = getattr(self, first_arg.command + '_args')(top_parser) self.args = parser.parse_args(sys.argv[2:]) # Sanitise arguments &", "Get the filename, removing path and 1+ extensions args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory =", "= '*' + args.in_filter + '*' # use wildcards if provided a directory", "else: args.input_directory = os.getcwd() if not args.no_output: if args.output: if os.path.isdir(args.output): # print", "help='Tails, one or two [two]', default='two', type=self.tails_type) parser = self.add_arg_csv(parser, req=True) parser.add_argument('--group', dest='csv_group_filter',", "metavar='<int int ... int>', nargs='+', help='Column number(s) [0 1]', default=1, type=int, required=req) return", "threshold [0.001]', default=0.001, type=float) parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>', help='Number of histogram bins [256]', default=256,", "a .CSV file if hasattr(args, 'list'): if args.list: args.list = os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)):", "containing NIfTIs, or a single file.', required=False) parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>', help='NIfTI-1 file, or", "'--reverse_input', dest='reverse_input', action=\"store_true\", help='Run through input files in reverse order') top_parser.add_argument('-v', '--verbose', action=\"store_true\",", "input directory containing NIfTIs, or a single file.', required=True) parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>',", "case it is required.\".format(args.input_directory) args.output_directory = args.input_directory if hasattr(args, 'input_2'): if args.input_2: if", "list input ['*']\", default='*') return parser def bsi_args(self, parser): \"\"\"These are the somewhat-unique", "each file', required=False) return parser def seg_EM_args(self, parser): # parser = self.add_arg_mask(parser, req=True)", "class MouseMorph(object): \"\"\"Define all the necessary arguments passed to MouseMorph programs. \"\"\" def", "parallel using multiprocessing [off]') top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input', action=\"store_true\", help='Run through input files in", "# break # return args def add_arg_csv(self, parser, req=False): parser.add_argument('-csv', '--csv', dest='csv_path', metavar='<.CSV", "metavar='<baseline NIfTI file path>', help='Full path of the baseline NIfTI image', required=True) parser.add_argument('-r',", "dest='repeat_path', metavar='<repeat NIfTI file path>', help='Full path of the repeat NIfTI image', required=True)", "files into the same list. Likewise for other directory arguments. # corresponding ==", "the single input # if input is a single image and there is", "['']\", default='') # Filters top_parser.add_argument('-if', '--in_filter', dest='in_filter', metavar='<filter>', help=\"If input is a directory,", "is a single image and there is a filter here, all images in", "if provided a directory alone args.mask_name_filter_exact = args.mn_filter args.mask_directory = os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask):", "path>', help='Input file (NIfTI-1, *.nii.gz or *.nii) or directory [current]') top_parser.add_argument('-o', '--output', dest='output',", "directory alone args.tpm_name_filter_exact = args.tn_filter args.tpm_directory = os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm): # Get the", "directory and there is a filter here, corresponding images in dir which also", "NIfTI file path>', help='Full path of the repeat NIfTI image mask', required=True) return", "help='Mask directory or file', required=False) top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a", "s = getattr(args, name) # setattr(args, name, s.rstrip(os.sep).rstrip('\"')) # except AttributeError: # pass", "args.mask_name_filter_exact = args.mn_filter args.mask_directory = os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask): # Get the filename, removing", "import time import argparse import subprocess from itertools import chain from datetime import", "has supplied directories ending with a \"\\\" (os.sep), which argparse assumes was intentional", "case AP is not the greatest dimension.') return parser def nuc_args(self, parser): #", "use wildcards if provided a directory alone args.mask_name_filter_exact = args.mn_filter args.mask_directory = os.path.normpath(os.path.join(args.mask))", "arguments top_parser = argparse.ArgumentParser(\"MouseMorph user input parser\", description = \"MouseMorph, an automatic mouse", "help=\"If input is a directory, filter files ['']\", default='') # Processing options top_parser.add_argument('-ds',", "assumes was intentional and adds the final user quote to the end of", "acceptable: raise argparse.ArgumentTypeError(\"--tails argument must be 'one' or 'two' (default is two, if", "images in dir which also matching the filter will be oriented # Replace", "tails_type(self, str): acceptable = ['one', 'two'] if str not in acceptable: raise argparse.ArgumentTypeError(\"--tails", "adds the final user quote to the end of the string. The user", "or *.nii) or directory [current]') top_parser.add_argument('-o', '--output', dest='output', metavar='<directory>', help='Output directory [input directory]')", "iterations [100]', default=100, type=int) parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0 < float < 1>', help='Prior relaxation", "time.strftime(time_format) mmfn.alert_user(\"Start time is {0} ...\".format(time_start)) return time_start def notify_complete(time_start=None, log_location=''): time_format =", "\\n\\t{1}\".format(first_arg.command, vars(self.args))) time_start = notify_start() getattr(self, first_arg.command)() notify_complete(time_start) # def sanitise_arguments(self, args): #", "Non-uniformity correct brains ----------------------------- python mousemorph.py nuc <arguments> \"\"\" # To do #", "directory specified. Setting to input directory ({0}) in case it is required.\".format(args.input_directory) args.output_directory", "import argparse import subprocess from itertools import chain from datetime import datetime #", "directory, filter files ['']\", default='') return parser def add_arg_list(self, parser, req=False): parser.add_argument('-l', '--list',", "type=int) return parser def tails_type(self, str): acceptable = ['one', 'two'] if str not", "the filename, removing path and 1+ extensions args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2)", "input files in reverse order') top_parser.add_argument('-v', '--verbose', action=\"store_true\", help=\"Verbose output\") # Add command-specific", "chain from datetime import datetime # import test_go import mm_functions as mmfn __author__", "priors are all single 4D NIfTIs rather than individual files per class', required=False)", "pair_args(self, parser): parser = self.add_arg_list(parser, req=False) parser.add_argument('-col', '--column', dest='column', metavar='<int>', help='Column number [0]',", "alone args.mask_name_filter_exact = args.mn_filter args.mask_directory = os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask): # Get the filename,", "(\" Input {0} is a directory ...\".format(args.input)) args.input_name_filter = '*' + args.in_filter +", "if os.path.isdir(args.input_2): # print (\" Input 2 {0} is a directory ...\".format(args.input)) args.input_name_filter_2", "output directory specified. Setting to input directory ({0}) in case it is required.\".format(args.input_directory)", "from this one import os import sys import glob import time import argparse", "seg_EM(self): import mm_seg_EM_group mm_seg_EM_group.go(self.args) def main(): mm = MouseMorph() # print(\"{0}\".format(mm.__dict__)) print(\"{0}\".format(mm.args)) if", "args.__dict__.keys(): # try: # s = getattr(args, name) # setattr(args, name, s.rstrip(os.sep).rstrip('\"')) #", "single input # if input is a single image and there is a", "supplied directories ending with a \"\\\" (os.sep), which argparse assumes was intentional and", "brains to standard space ------------------------------- python mousemorph.py orient <arguments> Non-uniformity correct brains -----------------------------", "the baseline NIfTI image mask', required=True) parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path', metavar='<repeat mask NIfTI file", "help='Check all 12 possible principle axis orientations, in case AP is not the", "args.input_directory = os.path.dirname(args.input) else: raise Exception(\"Input not recognised or does not exist: {0}\".format(args.input))", "subprocess from itertools import chain from datetime import datetime # import test_go import", "os.path.isfile(args.input_2): # print (\" Input 2 {0} is a file ...\".format(args.input)) # Get", "'--delete_temp', action='store_true', help='Delete temp files upon completion [False]') # Mask arguments top_parser.add_argument('-m', '--mask',", "self.args.input_files_list[::-1] # Run print(\"MouseMorph {0} will be run with arguments: \\n\\t{1}\".format(first_arg.command, vars(self.args))) time_start", "= time.strftime(time_format) time_diff = datetime.strptime(time_stop, time_format) - datetime.strptime(time_start, time_format) mmfn.alert_user(\"Completion time is {0}.", "args.input: if os.path.isdir(args.input): # print (\" Input {0} is a directory ...\".format(args.input)) args.input_name_filter", "args.tpm_directory = None # Either get a list of strings as file names", "= args.tn_filter args.tpm_directory = os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm): # Get the filename, removing path", "float < 1>', help='MRF prior strength [0.4]', default=0.4, type=float) parser.add_argument('--max_iter', dest='max_iter', metavar='<int>', help='Maximum", "exist: {0}\".format(args.input_2)) else: args.input_directory_2 = os.getcwd() if hasattr(args, 'mask'): if args.mask: args.mask =", "action3 [-arg1 -arg2] # Run any script from this one import os import", "path of the repeat NIfTI image', required=True) parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path', metavar='<baseline mask NIfTI", "[0]', default=0, type=int, required=False) parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory> or <file path>', help='Second input directory", "'--tpm_filter', dest='tn_filter', metavar='<filter>', help=\"If tpm is a directory, filter files ['']\", default='') parser.add_argument('--priors4D',", "dest='power', metavar='<0 < float < 1>', help='Desired power, 1-beta [0.8]', default=0.8, type=float) parser.add_argument('--significance',", "filter list input ['*']\", default='*') return parser def bsi_args(self, parser): \"\"\"These are the", "help='Downsampling factor [0, off]\\n\\t(Downsampling input files may speed up processing, at the expense", "file>', help='Mask directory or file', required=False) top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is", "args.input_directory = os.getcwd() if not args.no_output: if args.output: if os.path.isdir(args.output): # print (\"", "parser): # Add specific arguments parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>', help='Iterations to run [200]', default=200,", "correct brains ----------------------------- python mousemorph.py nuc <arguments> \"\"\" # To do # -----", "for name in args.__dict__.keys(): # try: # s = getattr(args, name) # setattr(args,", "import datetime # import test_go import mm_functions as mmfn __author__ = '<NAME> (PhD", "__created__ = '2015-06-28' def notify_start(): \"\"\"Give the user some helpful notes at the", "orientations, in case AP is not the greatest dimension.') # parser.add_argument('--allpa', dest='allpa', action='store_true',", "= datetime.strptime(time_stop, time_format) - datetime.strptime(time_start, time_format) mmfn.alert_user(\"Completion time is {0}. Took {1}.\".format(time_stop, time_diff))", "= argparse.ArgumentParser(\"MouseMorph user input parser\", description = \"MouseMorph, an automatic mouse MR image", "[<arguments>]\") # Input and output top_parser.add_argument('-i', '--input', dest='input', metavar='<directory> or <file path>', help='Input", "parser): # Add specific arguments parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>', help='Atlas directory containing NIfTIs, or", "Get the filename, removing path and 1+ extensions args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory =", "pair(self): import mm_pair mm_pair.go(self.args) def power(self): import mm_powercalc mm_powercalc.go(self.args) def loop(self): import mm_loop", "input parser\", description = \"MouseMorph, an automatic mouse MR image processor.\", usage =", "width, half maximum [0.15]', default=0.15, type=float) parser.add_argument('-ss', dest='subsample', metavar='<factor>', help='Subsampling factor [4]', default=4,", "not args.unzip: args.ext = '.nii.gz' else: args.ext = '.nii' if args.input: if os.path.isdir(args.input):", "type=float) top_parser.add_argument('-par', '--parallel', dest='parallel', action=\"store_true\", help='Use multiple cores to process in parallel using", "help='Prior gaussian regularization [2.0]', default=2.0, type=float) return parser def sanitise_arguments(self, args): \"\"\" \"\"\"", "main(): mm = MouseMorph() # print(\"{0}\".format(mm.__dict__)) print(\"{0}\".format(mm.args)) if __name__ == '__main__': main() #", "time_start = time.strftime(time_format) mmfn.alert_user(\"Start time is {0} ...\".format(time_start)) return time_start def notify_complete(time_start=None, log_location=''):", "[0, off]\\n\\t(Downsampling input files may speed up processing, at the expense of accuracy.)',", "run the command the user asked for def nuc(self): import mm_nuc_n4 mm_nuc_n4.go(self.args) def", "path of the repeat NIfTI image mask', required=True) return parser def orient_args(self, parser):", "directory> or <mask file>', help='Mask directory or file', required=req) parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>',", "<command> can be any of: \\n\\textract, \\n\\torient, \\n\\tnuc, \\n\\tintstan, \\n\\t...\"\"\") command_parser.add_argument('command', help=\"MouseMorph program", "no filter here, all images in dir will be oriented as per the", "case AP is not the greatest dimension.') # parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all", "mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)): # args.column return args # Methods which", "os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input): # print (\" Input {0} is a file ...\".format(args.input)) #", "[0.05]', default=0.05, type=float) parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0 < float < 1>', help='Fractional difference from", "directory alone args.mask_name_filter_exact = args.mn_filter args.mask_directory = os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask): # Get the", "time_format = '%H:%M:%S %p on %b %d, %Y (%Z)' time_start = time.strftime(time_format) mmfn.alert_user(\"Start", "will be performed individually.') parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle axis", "metavar='<fwhm>', help='Full width, half maximum [0.15]', default=0.15, type=float) parser.add_argument('-ss', dest='subsample', metavar='<factor>', help='Subsampling factor", "# import test_go import mm_functions as mmfn __author__ = '<NAME> (PhD student, CMIC", "# if input is a directory and there is a filter here, corresponding", "or file', required=req) parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter", "default=\"\") top_parser.add_argument('-uz', '--unzip', action='store_true', help='Uncompressed output files [compressed]') top_parser.add_argument('-no', '--no_output', dest='no_output', action='store_true', help='Don\\'t", "'--overwrite', action='store_true', help='Overwrite existing output files [skip]') top_parser.add_argument('-dt', '--delete_temp', action='store_true', help='Delete temp files", "dest='column', metavar='<int int ... int>', nargs='+', help='Column number(s) [0 1]', default=1, type=int, required=req)", "[off]') top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input', action=\"store_true\", help='Run through input files in reverse order') top_parser.add_argument('-v',", "a list of input directories (or files) and combine all input files into", "args.tpm: args.tpm = os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter = '*' + args.tn_filter + '*'", "\"mousemorph <command> [<arguments>]\") # Input and output top_parser.add_argument('-i', '--input', dest='input', metavar='<directory> or <file", "help='Full path of the repeat NIfTI image mask', required=True) return parser def orient_args(self,", "args.tpm_name_filter_exact = args.tn_filter args.tpm_directory = os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm): # Get the filename, removing", "= '<NAME> (PhD student, CMIC & CABI, UCL, UK), <EMAIL>' __created__ = '2015-06-28'", "filename, removing path and 1+ extensions args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2) else:", "argument following it, use as a list of input directories (or files) and", "return parser def loop_args(self, parser): parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run", "elif os.path.isfile(os.path.normpath(args.list)): # args.column return args # Methods which actually run the command", "1. if -i has more than one argument following it, use as a", "function to run on each file', required=False) return parser def seg_EM_args(self, parser): #", "help='Full path of the repeat NIfTI image', required=True) parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path', metavar='<baseline mask", "# def sanitise_arguments(self, args): # \"\"\"Windows: only necessary if the user has supplied", "of files, to be oriented in the same manner as their correspondingly-named files", "+ '_args')(top_parser) self.args = parser.parse_args(sys.argv[2:]) # Sanitise arguments & directories self.args = self.sanitise_arguments(self.args)", "in case it is required.\".format(args.input_directory) args.output_directory = args.input_directory if hasattr(args, 'input_2'): if args.input_2:", "CMIC & CABI, UCL, UK), <EMAIL>' __created__ = '2015-06-28' def notify_start(): \"\"\"Give the", "hasattr(self, first_arg.command): print(\"command '{0}' not recognised.\".format(first_arg.command)) command_parser.print_help() sys.exit(1) # Top-level parser with universal", "help='Second input directory containing NIfTIs, or a single file.', required=True) parser.add_argument('-fn', '--function', dest='function_name',", "required=False) parser.add_argument('-tf', '--tpm_filter', dest='tn_filter', metavar='<filter>', help=\"If tpm is a directory, filter files ['']\",", "notes at the launch of the script.\"\"\" time_format = '%H:%M:%S %p on %b", "\\n\\torient, \\n\\tnuc, \\n\\tintstan, \\n\\t...\"\"\") command_parser.add_argument('command', help=\"MouseMorph program to be run.\") first_arg = command_parser.parse_args(sys.argv[1:2])", "time is {0}. Took {1}.\".format(time_stop, time_diff)) return class MouseMorph(object): \"\"\"Define all the necessary", "the baseline NIfTI image', required=True) parser.add_argument('-r', '--repeat', dest='repeat_path', metavar='<repeat NIfTI file path>', help='Full", "[compressed]') top_parser.add_argument('-no', '--no_output', dest='no_output', action='store_true', help='Don\\'t save output files [False]') top_parser.add_argument('-ow', '--overwrite', action='store_true',", "tpm is a directory, filter files ['']\", default='') parser.add_argument('--priors4D', dest='priors4D', action='store_true', help='Use this", "%b %d, %Y (%Z)' time_stop = time.strftime(time_format) time_diff = datetime.strptime(time_stop, time_format) - datetime.strptime(time_start,", "with universal arguments top_parser = argparse.ArgumentParser(\"MouseMorph user input parser\", description = \"MouseMorph, an", "['']\", default='') # Processing options top_parser.add_argument('-ds', '--downsample', dest='downsample', metavar='<factor>', help='Downsampling factor [0, off]\\n\\t(Downsampling", "if the priors are all single 4D NIfTIs rather than individual files per", "top_parser = argparse.ArgumentParser(\"MouseMorph user input parser\", description = \"MouseMorph, an automatic mouse MR", "arguments. # corresponding == FSL FLIRT's \"secondary\" # adapt to accept -corr [dir]", "else: args.ext = '.nii' if args.input: if os.path.isdir(args.input): # print (\" Input {0}", "the filter will be oriented # Replace mm_multi.py with: run mousemorph.py multi action1", "mmfn.alert_user(\"Start time is {0} ...\".format(time_start)) return time_start def notify_complete(time_start=None, log_location=''): time_format = '%H:%M:%S", "\"\"\"Define all the necessary arguments passed to MouseMorph programs. \"\"\" def __init__(self): command_parser", "NIfTI image mask', required=True) parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path', metavar='<repeat mask NIfTI file path>', help='Full", "name filter [*]', default='') return parser def pair_args(self, parser): parser = self.add_arg_list(parser, req=False)", "# print (\" Output {0} is a directory ...\".format(args.input)) args.output_directory = os.path.normpath(os.path.join(args.output)) else:", "Run it with Python. Usage python mousemorph.py --help Orient brains to standard space", "metavar='<string>', help='Prepend this string to output name', default=\"\") top_parser.add_argument('-ona', '--out_name_append', metavar='<string>', help='Append this", "parser): \"\"\"These are the somewhat-unique requirements for BSI.\"\"\" parser.add_argument('-b', '--baseline', dest='baseline_path', metavar='<baseline NIfTI", "resample output files.') parser.add_argument('--allsame', dest='allsame', action='store_true', help='Flag to indicate that all brains are", "their correspondingly-named files in input_directory. (As per \"secondary\" in FSL FLIRT.)') parser.add_argument('-res','--resample', dest='resample',", "# Filters top_parser.add_argument('-if', '--in_filter', dest='in_filter', metavar='<filter>', help=\"If input is a directory, filter files", "metavar='<int>', help='Number of classes (no TPM inputs)', type=int) parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0 < float", "mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter) if self.args.reverse_input: self.args.input_files_list = self.args.input_files_list[::-1] # Run print(\"MouseMorph {0} will", "directory ({0}) in case it is required.\".format(args.input_directory) args.output_directory = args.input_directory if hasattr(args, 'input_2'):", "bsi_args(self, parser): \"\"\"These are the somewhat-unique requirements for BSI.\"\"\" parser.add_argument('-b', '--baseline', dest='baseline_path', metavar='<baseline", "= mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)): # args.column return args # Methods", "args.output_directory = os.path.normpath(os.path.join(args.output)) else: print \"Specified output ({0}) is not a directory; creating", "all single 4D NIfTIs rather than individual files per class', required=False) parser.add_argument('--nopriors', dest='nopriors',", "input is a directory and there is a filter here, corresponding images in", "run on each file', required=False) return parser def loop_args(self, parser): parser.add_argument('-fn', '--function', dest='function_name',", "AP is not the greatest dimension.') # parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12", "or file', required=False) top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask is a directory, filter", "# print (\" Input 2 {0} is a directory ...\".format(args.input)) args.input_name_filter_2 = '*'", "orient_args(self, parser): # Add specific arguments parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>', help='Atlas directory containing NIfTIs,", "parser): parser = self.add_arg_list(parser, req=False) parser.add_argument('-col', '--column', dest='column', metavar='<int>', help='Column number [0]', default=0,", "not exist: {0}\".format(args.input)) else: args.input_directory = os.getcwd() if not args.no_output: if args.output: if", "parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path', metavar='<baseline mask NIfTI file path>', help='Full path of the baseline", "string to output name', default=\"\") top_parser.add_argument('-uz', '--unzip', action='store_true', help='Uncompressed output files [compressed]') top_parser.add_argument('-no',", "or from a given column of a .CSV file if hasattr(args, 'list'): if", "def power(self): import mm_powercalc mm_powercalc.go(self.args) def loop(self): import mm_loop mm_loop.go(self.args) def seg_EM(self): import", "# Mask arguments parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory", "the filename, removing path and 1+ extensions args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0] args.input_directory = os.path.dirname(args.input)", "cores to process in parallel using multiprocessing [off]') top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input', action=\"store_true\", help='Run", "images in dir will be oriented # if input is a directory and", "\"\"\"These are the somewhat-unique requirements for BSI.\"\"\" parser.add_argument('-b', '--baseline', dest='baseline_path', metavar='<baseline NIfTI file", "\"\"\"Windows: only necessary if the user has supplied directories ending with a \"\\\"", "= os.path.dirname(args.mask) else: args.mask_directory = None if hasattr(args, 'tpm'): if args.tpm: args.tpm =", "command_parser = argparse.ArgumentParser(\"argp_mousemorph\", description = \"MouseMorph, an automatic mouse MR image processor.\", usage", "required.\".format(args.input_directory) args.output_directory = args.input_directory if hasattr(args, 'input_2'): if args.input_2: if os.path.isdir(args.input_2): # print", "oriented as per the single input # if input is a directory and", "<mask file>', help='Mask directory or file', required=req) parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help=\"If mask", "[-arg1 -arg2] # Run any script from this one import os import sys", "and adds the final user quote to the end of the string. The", "name) # setattr(args, name, s.rstrip(os.sep).rstrip('\"')) # except AttributeError: # pass # else: #", "strength [0.4]', default=0.4, type=float) parser.add_argument('--max_iter', dest='max_iter', metavar='<int>', help='Maximum number of iterations [100]', default=100,", "= os.path.basename(args.input).split(os.extsep)[0] args.input_directory = os.path.dirname(args.input) else: raise Exception(\"Input not recognised or does not", "Setting to input directory ({0}) in case it is required.\".format(args.input_directory) args.output_directory = args.input_directory", "[<arguments>], where <command> can be any of: \\n\\textract, \\n\\torient, \\n\\tnuc, \\n\\tintstan, \\n\\t...\"\"\") command_parser.add_argument('command',", "help='Use multiple cores to process in parallel using multiprocessing [off]') top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input',", "help='Iterations to run [200]', default=200, type=int) parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>', help='Full width, half maximum", "the somewhat-unique requirements for BSI.\"\"\" parser.add_argument('-b', '--baseline', dest='baseline_path', metavar='<baseline NIfTI file path>', help='Full", "on %b %d, %Y (%Z)' time_stop = time.strftime(time_format) time_diff = datetime.strptime(time_stop, time_format) -", "= os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input): # print (\" Input {0} is a file ...\".format(args.input))", "input is a single image and there's no filter here, all images in", "dir will be oriented as per the single input # if input is", "Likewise for other directory arguments. # corresponding == FSL FLIRT's \"secondary\" # adapt", "dest='reverse_input', action=\"store_true\", help='Run through input files in reverse order') top_parser.add_argument('-v', '--verbose', action=\"store_true\", help=\"Verbose", "if str not in acceptable: raise argparse.ArgumentTypeError(\"--tails argument must be 'one' or 'two'", "be oriented as per the single input # if input is a single", "metavar='<.CSV file path>', help='Path to .CSV file', required=req) parser.add_argument('-col', '--column', dest='column', metavar='<int int", "is a directory and there's no filter here, corresponding images in dir will", "os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)): # args.column", "parser def nuc_args(self, parser): # Add specific arguments parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>', help='Iterations to", "type=float) parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>', help='Prior gaussian regularization [2.0]', default=2.0, type=float) return parser def", "and 1+ extensions args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0] args.input_directory = os.path.dirname(args.input) else: raise Exception(\"Input not", "= os.path.dirname(args.input) else: raise Exception(\"Input not recognised or does not exist: {0}\".format(args.input)) else:", "the control mean to detect', default=0.25, type=float) parser.add_argument('--tails', dest='tails', metavar='<string>', help='Tails, one or", "directory]') top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>', help='Prepend this string to output name', default=\"\") top_parser.add_argument('-ona', '--out_name_append',", "os.path.isdir(args.input): # print (\" Input {0} is a directory ...\".format(args.input)) args.input_name_filter = '*'", "os.path.dirname(args.mask) else: args.mask_directory = None if hasattr(args, 'tpm'): if args.tpm: args.tpm = os.path.normpath(args.tpm)", "or <file path>', help='Second input directory containing NIfTIs, or a single file.', required=True)", "input files may speed up processing, at the expense of accuracy.)', default=0, type=float)", "args.input_name_filter_exact_2 = args.in_filter args.input_directory_2 = os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2): # print (\" Input 2", "'_args')(top_parser) self.args = parser.parse_args(sys.argv[2:]) # Sanitise arguments & directories self.args = self.sanitise_arguments(self.args) if", "AttributeError: # pass # else: # break # return args def add_arg_csv(self, parser,", "input is a directory and there's no filter here, corresponding images in dir", "print(\"command '{0}' not recognised.\".format(first_arg.command)) command_parser.print_help() sys.exit(1) # Top-level parser with universal arguments top_parser", "power, 1-beta [0.8]', default=0.8, type=float) parser.add_argument('--significance', dest='significance', metavar='<0 < float < 1>', help='Desired", "in dir will be oriented as per the single input # if input", "of the repeat NIfTI image mask', required=True) return parser def orient_args(self, parser): #", "# Add specific arguments parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>', help='Atlas directory containing NIfTIs, or a", "os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter = '*' + args.mn_filter + '*' # use wildcards", "the same manner as their correspondingly-named files in input_directory. (As per \"secondary\" in", "directory> or <mask file>', help='Mask directory or file', required=False) top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>',", "function to run on each file', required=False) return parser def loop_args(self, parser): parser.add_argument('-fn',", "filter files ['']\", default='') # Processing options top_parser.add_argument('-ds', '--downsample', dest='downsample', metavar='<factor>', help='Downsampling factor", "as per the single input # if input is a directory and there's", "python mousemorph.py --help Orient brains to standard space ------------------------------- python mousemorph.py orient <arguments>", "input # if input is a directory and there's no filter here, corresponding", "\"MouseMorph, an automatic mouse MR image processor.\", usage = \"mousemorph <command> [<arguments>]\") #", "nuc <arguments> \"\"\" # To do # ----- # 1. if -i has", "os.path.dirname(args.input_2) else: raise Exception(\"Input 2 not recognised or does not exist: {0}\".format(args.input_2)) else:", "else: return str def power_args(self, parser): parser.add_argument('--power', dest='power', metavar='<0 < float < 1>',", "is {0} ...\".format(time_start)) return time_start def notify_complete(time_start=None, log_location=''): time_format = '%H:%M:%S %p on", "\"\"\" \"\"\" args.in_ext_filter = '.nii*' if not args.unzip: args.ext = '.nii.gz' else: args.ext", "Usage python mousemorph.py --help Orient brains to standard space ------------------------------- python mousemorph.py orient", "to run on each file', required=False) return parser def seg_EM_args(self, parser): # parser", "if os.path.isdir(args.tpm): args.tpm_name_filter = '*' + args.tn_filter + '*' # use wildcards if", "atlas and the rest will have the same gross orientation applied. Final minor", "= time.strftime(time_format) mmfn.alert_user(\"Start time is {0} ...\".format(time_start)) return time_start def notify_complete(time_start=None, log_location=''): time_format", "= \"MouseMorph, an automatic mouse MR image processor.\", usage = \"\"\"mousemorph <command> [<arguments>],", "'*' + args.mn_filter + '*' # use wildcards if provided a directory alone", "to MouseMorph programs. \"\"\" def __init__(self): command_parser = argparse.ArgumentParser(\"argp_mousemorph\", description = \"MouseMorph, an", "to be oriented in the same manner as their correspondingly-named files in input_directory.", "= os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print \"No output directory specified. Setting to input directory", "= '2015-06-28' def notify_start(): \"\"\"Give the user some helpful notes at the launch", "args.tpm = os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter = '*' + args.tn_filter + '*' #", "extensions args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2) else: raise Exception(\"Input 2 not recognised", "= \"mousemorph <command> [<arguments>]\") # Input and output top_parser.add_argument('-i', '--input', dest='input', metavar='<directory> or", "default=0.05, type=float) parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0 < float < 1>', help='Fractional difference from the", "'--baseline', dest='baseline_path', metavar='<baseline NIfTI file path>', help='Full path of the baseline NIfTI image',", "glob import time import argparse import subprocess from itertools import chain from datetime", "{1}.\".format(time_stop, time_diff)) return class MouseMorph(object): \"\"\"Define all the necessary arguments passed to MouseMorph", "files ['']\", default='') return parser def add_arg_list(self, parser, req=False): parser.add_argument('-l', '--list', dest='list', metavar='<directory>", "return str def power_args(self, parser): parser.add_argument('--power', dest='power', metavar='<0 < float < 1>', help='Desired", "args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm) else: args.tpm_directory = None # Either get", "\\n\\textract, \\n\\torient, \\n\\tnuc, \\n\\tintstan, \\n\\t...\"\"\") command_parser.add_argument('command', help=\"MouseMorph program to be run.\") first_arg =", "'*' # use wildcards if provided a directory alone args.input_name_filter_exact_2 = args.in_filter args.input_directory_2", "...\".format(args.input)) args.input_name_filter = '*' + args.in_filter + '*' # use wildcards if provided", "= argparse.ArgumentParser(\"argp_mousemorph\", description = \"MouseMorph, an automatic mouse MR image processor.\", usage =", "containing files whose names are to be matched, or a .CSV file whose", "= '.nii.gz' else: args.ext = '.nii' if args.input: if os.path.isdir(args.input): # print (\"", "user has supplied directories ending with a \"\\\" (os.sep), which argparse assumes was", "help=\"String used to filter list input ['*']\", default='*') return parser def bsi_args(self, parser):", "type=float) parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0 < float < 1>', help='Fractional difference from the control", "it is required.\".format(args.input_directory) args.output_directory = args.input_directory if hasattr(args, 'input_2'): if args.input_2: if os.path.isdir(args.input_2):", "if args.list: args.list = os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter) #", "dest='fwhm', metavar='<fwhm>', help='Full width, half maximum [0.15]', default=0.15, type=float) parser.add_argument('-ss', dest='subsample', metavar='<factor>', help='Subsampling", "metavar='<int>', help='Column number [0]', default=0, type=int, required=False) parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory> or <file path>',", "def add_arg_csv(self, parser, req=False): parser.add_argument('-csv', '--csv', dest='csv_path', metavar='<.CSV file path>', help='Path to .CSV", "filter files ['']\", default='') return parser def add_arg_list(self, parser, req=False): parser.add_argument('-l', '--list', dest='list',", "dest='csv_group_filter', metavar='<string>', help='Control group name filter [*]', default='') return parser def pair_args(self, parser):", "def pair_args(self, parser): parser = self.add_arg_list(parser, req=False) parser.add_argument('-col', '--column', dest='column', metavar='<int>', help='Column number", "self.add_arg_csv(parser, req=True) parser.add_argument('--group', dest='csv_group_filter', metavar='<string>', help='Control group name filter [*]', default='') return parser", "file path>', help='Full path of the baseline NIfTI image', required=True) parser.add_argument('-r', '--repeat', dest='repeat_path',", "dest='priors4D', action='store_true', help='Use this flag if the priors are all single 4D NIfTIs", "of iterations [100]', default=100, type=int) parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0 < float < 1>', help='Prior", "factor [0.5]', default=0.5, type=float) parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>', help='Prior gaussian regularization [2.0]', default=2.0, type=float)", "print (\" Input 2 {0} is a directory ...\".format(args.input)) args.input_name_filter_2 = '*' +", "in input_directory. (As per \"secondary\" in FSL FLIRT.)') parser.add_argument('-res','--resample', dest='resample', action='store_true', help='Also resample", "None # Either get a list of strings as file names from a", "list of input directories (or files) and combine all input files into the", "import sys import glob import time import argparse import subprocess from itertools import", "mmfn __author__ = '<NAME> (PhD student, CMIC & CABI, UCL, UK), <EMAIL>' __created__", "\"\\\", really, but just in case...\"\"\" # for name in args.__dict__.keys(): # try:", "whose Nth column will be used as a list of names [current]', required=req)", "input # if input is a single image and there is a filter", "minor corrections will be performed individually.') parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible", "a directory, filter files ['']\", default='') # Filters top_parser.add_argument('-if', '--in_filter', dest='in_filter', metavar='<filter>', help=\"If", "'--downsample', dest='downsample', metavar='<factor>', help='Downsampling factor [0, off]\\n\\t(Downsampling input files may speed up processing,", "or a .CSV file whose Nth column will be used as a list", "files, to be oriented in the same manner as their correspondingly-named files in", "...\".format(args.input)) # Get the filename, removing path and 1+ extensions args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0]", "%p on %b %d, %Y (%Z)' time_start = time.strftime(time_format) mmfn.alert_user(\"Start time is {0}", "args.input_name_filter_2 = '*' + args.in_filter + '*' # use wildcards if provided a", "os.path.normpath(os.path.join(args.output)) else: print \"Specified output ({0}) is not a directory; creating it ...\".format(args.output)", "'*' + args.tn_filter + '*' # use wildcards if provided a directory alone", "do # ----- # 1. if -i has more than one argument following", "if os.path.isdir(args.mask): args.mask_name_filter = '*' + args.mn_filter + '*' # use wildcards if", "float < 1>', help='Fractional difference from the control mean to detect', default=0.25, type=float)", "has more than one argument following it, use as a list of input", "test_go import mm_functions as mmfn __author__ = '<NAME> (PhD student, CMIC & CABI,", "which also matching the filter will be oriented # Replace mm_multi.py with: run", "dir matching the filter will be oriented as per the single input #", "output ({0}) is not a directory; creating it ...\".format(args.output) args.output_directory = os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory])", "with Python. Usage python mousemorph.py --help Orient brains to standard space ------------------------------- python", "help='Uncompressed output files [compressed]') top_parser.add_argument('-no', '--no_output', dest='no_output', action='store_true', help='Don\\'t save output files [False]')", "time import argparse import subprocess from itertools import chain from datetime import datetime", "args.list_names = mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)): # args.column return args #", "...\".format(time_start)) return time_start def notify_complete(time_start=None, log_location=''): time_format = '%H:%M:%S %p on %b %d,", "the base MouseMorph program. Run it with Python. Usage python mousemorph.py --help Orient", "print(\"MouseMorph {0} will be run with arguments: \\n\\t{1}\".format(first_arg.command, vars(self.args))) time_start = notify_start() getattr(self,", "NIfTIs, or a single file.', required=True) parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to", "# print (\" Input {0} is a file ...\".format(args.input)) # Get the filename,", "metavar='<nhistbins>', help='Number of histogram bins [256]', default=256, type=int) return parser def tails_type(self, str):", "required=False) parser.add_argument('--nopriors', dest='nopriors', metavar='<int>', help='Number of classes (no TPM inputs)', type=int) parser.add_argument('--mrf_beta', dest='mrf_beta',", "run mousemorph.py multi action1 [-arg1 -arg2 -arg3 'param3'] action2 [-arg1 'param1' -arg2] action3", "= args.mn_filter args.mask_directory = os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask): # Get the filename, removing path", "loop(self): import mm_loop mm_loop.go(self.args) def seg_EM(self): import mm_seg_EM_group mm_seg_EM_group.go(self.args) def main(): mm =", "os.path.isdir(args.output): # print (\" Output {0} is a directory ...\".format(args.input)) args.output_directory = os.path.normpath(os.path.join(args.output))", "Mask arguments parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or", "help='Column number [0]', default=0, type=int, required=False) parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory> or <file path>', help='Second", "removing path and 1+ extensions args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory = os.path.dirname(args.mask) else: args.mask_directory", "necessary if the user has supplied directories ending with a \"\\\" (os.sep), which", "FSL FLIRT's \"secondary\" # adapt to accept -corr [dir] [filter] # if input", "is a directory ...\".format(args.input)) args.input_name_filter_2 = '*' + args.in_filter + '*' # use", "mm_powercalc mm_powercalc.go(self.args) def loop(self): import mm_loop mm_loop.go(self.args) def seg_EM(self): import mm_seg_EM_group mm_seg_EM_group.go(self.args) def", "'.nii' if args.input: if os.path.isdir(args.input): # print (\" Input {0} is a directory", "args.mask = os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter = '*' + args.mn_filter + '*' #", "else: args.input_directory_2 = os.getcwd() if hasattr(args, 'mask'): if args.mask: args.mask = os.path.normpath(args.mask) if", "python mousemorph.py nuc <arguments> \"\"\" # To do # ----- # 1. if", "# parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle axis orientations, in case", "args.input_directory_2 = os.path.dirname(args.input_2) else: raise Exception(\"Input 2 not recognised or does not exist:", "'2015-06-28' def notify_start(): \"\"\"Give the user some helpful notes at the launch of", "'--unzip', action='store_true', help='Uncompressed output files [compressed]') top_parser.add_argument('-no', '--no_output', dest='no_output', action='store_true', help='Don\\'t save output", "MouseMorph(object): \"\"\"Define all the necessary arguments passed to MouseMorph programs. \"\"\" def __init__(self):", "= os.getcwd() if not args.no_output: if args.output: if os.path.isdir(args.output): # print (\" Output", "in the same manner as their correspondingly-named files in input_directory. (As per \"secondary\"", "'{0}' not recognised.\".format(first_arg.command)) command_parser.print_help() sys.exit(1) # Top-level parser with universal arguments top_parser =", "help='Delete temp files upon completion [False]') # Mask arguments top_parser.add_argument('-m', '--mask', dest='mask', metavar='<mask", "each file', required=False) return parser def loop_args(self, parser): parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph", "type=int) parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0 < float < 1>', help='MRF prior strength [0.4]', default=0.4,", "[dir] [filter] # if input is a single image and there's no filter", "matching the filter will be oriented # Replace mm_multi.py with: run mousemorph.py multi", "as mmfn __author__ = '<NAME> (PhD student, CMIC & CABI, UCL, UK), <EMAIL>'", "any script from this one import os import sys import glob import time", "specific arguments parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>', help='Iterations to run [200]', default=200, type=int) parser.add_argument('-fwhm', dest='fwhm',", "req=False) parser.add_argument('-col', '--column', dest='column', metavar='<int>', help='Column number [0]', default=0, type=int, required=False) parser.add_argument('-i2','--input_2', dest='input_2',", "os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask): # Get the filename, removing path and 1+ extensions args.mask_name_filter", "is a directory, filter files ['']\", default='') return parser def add_arg_list(self, parser, req=False):", "space ------------------------------- python mousemorph.py orient <arguments> Non-uniformity correct brains ----------------------------- python mousemorph.py nuc", "= getattr(self, first_arg.command + '_args')(top_parser) self.args = parser.parse_args(sys.argv[2:]) # Sanitise arguments & directories", "required=False) parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory> or <file path>', help='Second input directory containing NIfTIs, or", "not a directory; creating it ...\".format(args.output) args.output_directory = os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print \"No", "path>', help='Full path of the repeat NIfTI image', required=True) parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path', metavar='<baseline", "arguments parser = getattr(self, first_arg.command + '_args')(top_parser) self.args = parser.parse_args(sys.argv[2:]) # Sanitise arguments", "parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path', metavar='<repeat mask NIfTI file path>', help='Full path of the repeat", "recognised or does not exist: {0}\".format(args.input)) else: args.input_directory = os.getcwd() if not args.no_output:", "mouse MR image processor.\", usage = \"mousemorph <command> [<arguments>]\") # Input and output", "Input 2 {0} is a file ...\".format(args.input)) # Get the filename, removing path", "of the string. The user shouldn't supply that \"\\\", really, but just in", "recognised or does not exist: {0}\".format(args.input_2)) else: args.input_directory_2 = os.getcwd() if hasattr(args, 'mask'):", "corresponding images in dir will be oriented # if input is a directory", "directory containing NIfTIs, or a single file.', required=True) parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph", "args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0] args.input_directory = os.path.dirname(args.input) else: raise Exception(\"Input not recognised or does", "self.args = self.sanitise_arguments(self.args) if self.args.input: # Pre-populate a list of relevant files. self.args.input_files_list", "applied. Final minor corrections will be performed individually.') parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all", "as their correspondingly-named files in input_directory. (As per \"secondary\" in FSL FLIRT.)') parser.add_argument('-res','--resample',", "# if input is a directory and there's no filter here, corresponding images", "the final user quote to the end of the string. The user shouldn't", "one or two [two]', default='two', type=self.tails_type) parser = self.add_arg_csv(parser, req=True) parser.add_argument('--group', dest='csv_group_filter', metavar='<string>',", "parser.add_argument('-ss', dest='subsample', metavar='<factor>', help='Subsampling factor [4]', default=4, type=int) parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>', help='Number of", "if not hasattr(self, first_arg.command): print(\"command '{0}' not recognised.\".format(first_arg.command)) command_parser.print_help() sys.exit(1) # Top-level parser", "default=2.0, type=float) return parser def sanitise_arguments(self, args): \"\"\" \"\"\" args.in_ext_filter = '.nii*' if", "than one argument following it, use as a list of input directories (or", "'%H:%M:%S %p on %b %d, %Y (%Z)' time_stop = time.strftime(time_format) time_diff = datetime.strptime(time_stop,", "os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm) else: args.tpm_directory = None # Either get a list", "itertools import chain from datetime import datetime # import test_go import mm_functions as", "output name', default=\"\") top_parser.add_argument('-uz', '--unzip', action='store_true', help='Uncompressed output files [compressed]') top_parser.add_argument('-no', '--no_output', dest='no_output',", "required=True) return parser def orient_args(self, parser): # Add specific arguments parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>',", "a directory ...\".format(args.input)) args.input_name_filter_2 = '*' + args.in_filter + '*' # use wildcards", "that \"\\\", really, but just in case...\"\"\" # for name in args.__dict__.keys(): #", "column will be used as a list of names [current]', required=req) parser.add_argument('-lf', '--list_filter',", "the same gross orientation applied. Final minor corrections will be performed individually.') parser.add_argument('--allpa',", "repeat NIfTI image mask', required=True) return parser def orient_args(self, parser): # Add specific", "extensions args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0] args.input_directory = os.path.dirname(args.input) else: raise Exception(\"Input not recognised or", "mm_seg_EM_group mm_seg_EM_group.go(self.args) def main(): mm = MouseMorph() # print(\"{0}\".format(mm.__dict__)) print(\"{0}\".format(mm.args)) if __name__ ==", "at the expense of accuracy.)', default=0, type=float) top_parser.add_argument('-par', '--parallel', dest='parallel', action=\"store_true\", help='Use multiple", "args.mask: args.mask = os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter = '*' + args.mn_filter + '*'", "'--csv', dest='csv_path', metavar='<.CSV file path>', help='Path to .CSV file', required=req) parser.add_argument('-col', '--column', dest='column',", "+ args.in_filter + '*' # use wildcards if provided a directory alone args.input_name_filter_exact", "[0.8]', default=0.8, type=float) parser.add_argument('--significance', dest='significance', metavar='<0 < float < 1>', help='Desired significance level,", "[100]', default=100, type=int) parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0 < float < 1>', help='Prior relaxation factor", "os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2): # print (\" Input 2 {0} is a file ...\".format(args.input))", "dest='parallel', action=\"store_true\", help='Use multiple cores to process in parallel using multiprocessing [off]') top_parser.add_argument('-rev',", "recognised.\".format(first_arg.command)) command_parser.print_help() sys.exit(1) # Top-level parser with universal arguments top_parser = argparse.ArgumentParser(\"MouseMorph user", "is a directory, filter files ['']\", default='') # Filters top_parser.add_argument('-if', '--in_filter', dest='in_filter', metavar='<filter>',", "is a directory ...\".format(args.input)) args.output_directory = os.path.normpath(os.path.join(args.output)) else: print \"Specified output ({0}) is", "def bsi_args(self, parser): \"\"\"These are the somewhat-unique requirements for BSI.\"\"\" parser.add_argument('-b', '--baseline', dest='baseline_path',", "processor.\", usage = \"mousemorph <command> [<arguments>]\") # Input and output top_parser.add_argument('-i', '--input', dest='input',", "args.mask_directory = os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask): # Get the filename, removing path and 1+", "from a directory, or from a given column of a .CSV file if", "or a single file.', required=False) parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>', help='NIfTI-1 file, or directory of", "on each file', required=False) return parser def seg_EM_args(self, parser): # parser = self.add_arg_mask(parser,", "using multiprocessing [off]') top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input', action=\"store_true\", help='Run through input files in reverse", "mouse MR image processor.\", usage = \"\"\"mousemorph <command> [<arguments>], where <command> can be", "mm_powercalc.go(self.args) def loop(self): import mm_loop mm_loop.go(self.args) def seg_EM(self): import mm_seg_EM_group mm_seg_EM_group.go(self.args) def main():", "output files [skip]') top_parser.add_argument('-dt', '--delete_temp', action='store_true', help='Delete temp files upon completion [False]') #", "filter will be oriented # Replace mm_multi.py with: run mousemorph.py multi action1 [-arg1", "alpha [0.05]', default=0.05, type=float) parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0 < float < 1>', help='Fractional difference", "of histogram bins [256]', default=256, type=int) return parser def tails_type(self, str): acceptable =", "requirements for BSI.\"\"\" parser.add_argument('-b', '--baseline', dest='baseline_path', metavar='<baseline NIfTI file path>', help='Full path of", "raise argparse.ArgumentTypeError(\"--tails argument must be 'one' or 'two' (default is two, if omitted)\")", "provided a directory alone args.mask_name_filter_exact = args.mn_filter args.mask_directory = os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask): #", "action='store_true', help='Flag to indicate that all brains are in approximately the same initial" ]
[ "def test_KDClassifierRF(data): X, y = data clf = KDClassifierRF() assert hasattr(clf, 'approx') assert", "'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierSORF(data): X, y =", "= KDClassifierRF(approx=approx, sampler=RBFSamplerORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx", "'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert", "approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx) clf.fit(X, y) assert hasattr(clf,", "'exact': assert hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierSORF(data):", ".._RBFSamplerORF import RBFSamplerORF from .._RBFSamplerSORF import RBFSamplerSORF @pytest.fixture def data(): return load_iris(return_X_y=True) def", "test_KDClassifierRF(data): X, y = data clf = KDClassifierRF() assert hasattr(clf, 'approx') assert hasattr(clf,", "'exact']: clf = KDClassifierRF(approx=approx) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if", "test_KDClassifierSORF(data): X, y = data for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf", "if clf.approx != 'exact': assert hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape ==", "for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx) clf.fit(X, y) assert", "def test_KDClassifierORF(data): X, y = data for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']:", "y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierSORF(data): X, y = data", "KDClassifierRF from .._RBFSamplerORF import RBFSamplerORF from .._RBFSamplerSORF import RBFSamplerSORF @pytest.fixture def data(): return", "as np from sklearn.datasets import load_iris from numpy.testing import assert_array_equal from numpy.testing import", "assert hasattr(clf, 'normalize') assert hasattr(clf, 'gamma') assert hasattr(clf, 'n_components') for approx in ['rff+','rff',", "'approx') assert hasattr(clf, 'normalize') assert hasattr(clf, 'gamma') assert hasattr(clf, 'n_components') for approx in", "load_iris(return_X_y=True) def test_KDClassifierRF(data): X, y = data clf = KDClassifierRF() assert hasattr(clf, 'approx')", "'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx != 'exact': assert hasattr(clf, 'rbf_sampler_') y_pred =", "= data clf = KDClassifierRF() assert hasattr(clf, 'approx') assert hasattr(clf, 'normalize') assert hasattr(clf,", "y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierORF(data): X, y = data", "in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx) clf.fit(X, y) assert hasattr(clf, 'classes_')", "'n_components') for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx) clf.fit(X, y)", "from sklearn.datasets import load_iris from numpy.testing import assert_array_equal from numpy.testing import assert_allclose from", "np from sklearn.datasets import load_iris from numpy.testing import assert_array_equal from numpy.testing import assert_allclose", "assert_array_equal from numpy.testing import assert_allclose from .._kdclassifier import KDClassifierRF from .._RBFSamplerORF import RBFSamplerORF", "y = data clf = KDClassifierRF() assert hasattr(clf, 'approx') assert hasattr(clf, 'normalize') assert", "y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx != 'exact': assert hasattr(clf,", "clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if", "test_KDClassifierORF(data): X, y = data for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf", "assert y_pred.shape == (X.shape[0],) def test_KDClassifierSORF(data): X, y = data for approx in", "data for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF()) clf.fit(X,", "['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx) clf.fit(X, y) assert hasattr(clf, 'classes_') assert", "'gamma') assert hasattr(clf, 'n_components') for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf =", "X, y = data for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf =", "assert hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierSORF(data): X,", "sklearn.datasets import load_iris from numpy.testing import assert_array_equal from numpy.testing import assert_allclose from .._kdclassifier", "RBFSamplerSORF @pytest.fixture def data(): return load_iris(return_X_y=True) def test_KDClassifierRF(data): X, y = data clf", "def data(): return load_iris(return_X_y=True) def test_KDClassifierRF(data): X, y = data clf = KDClassifierRF()", "(X.shape[0],) def test_KDClassifierORF(data): X, y = data for approx in ['rff+','rff', 'lrff', 'lrff+',", "'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf,", "from .._kdclassifier import KDClassifierRF from .._RBFSamplerORF import RBFSamplerORF from .._RBFSamplerSORF import RBFSamplerSORF @pytest.fixture", "assert hasattr(clf, 'Xtrain_') if clf.approx != 'exact': assert hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X)", "hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierORF(data): X, y", "KDClassifierRF(approx=approx, sampler=RBFSamplerSORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx !=", "'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf,", "numpy.testing import assert_allclose from .._kdclassifier import KDClassifierRF from .._RBFSamplerORF import RBFSamplerORF from .._RBFSamplerSORF", "clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx != 'exact': assert", "X, y = data clf = KDClassifierRF() assert hasattr(clf, 'approx') assert hasattr(clf, 'normalize')", "assert hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierORF(data): X,", "!= 'exact': assert hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],) def", "hasattr(clf, 'gamma') assert hasattr(clf, 'n_components') for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf", "y_pred.shape == (X.shape[0],) def test_KDClassifierORF(data): X, y = data for approx in ['rff+','rff',", "hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierSORF(data): X, y", "= clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierORF(data): X, y = data for", "import RBFSamplerORF from .._RBFSamplerSORF import RBFSamplerSORF @pytest.fixture def data(): return load_iris(return_X_y=True) def test_KDClassifierRF(data):", "RBFSamplerORF from .._RBFSamplerSORF import RBFSamplerSORF @pytest.fixture def data(): return load_iris(return_X_y=True) def test_KDClassifierRF(data): X,", "approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF()) clf.fit(X, y) assert", "import load_iris from numpy.testing import assert_array_equal from numpy.testing import assert_allclose from .._kdclassifier import", "'exact': assert hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierORF(data):", "= KDClassifierRF(approx=approx) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx !=", "for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF()) clf.fit(X, y)", "import numpy as np from sklearn.datasets import load_iris from numpy.testing import assert_array_equal from", "clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierSORF(data): X, y = data for approx", "= data for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF())", "in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF()) clf.fit(X, y) assert hasattr(clf,", "data(): return load_iris(return_X_y=True) def test_KDClassifierRF(data): X, y = data clf = KDClassifierRF() assert", "clf = KDClassifierRF(approx=approx) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx", "return load_iris(return_X_y=True) def test_KDClassifierRF(data): X, y = data clf = KDClassifierRF() assert hasattr(clf,", "clf = KDClassifierRF() assert hasattr(clf, 'approx') assert hasattr(clf, 'normalize') assert hasattr(clf, 'gamma') assert", "'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierORF(data): X, y =", "load_iris from numpy.testing import assert_array_equal from numpy.testing import assert_allclose from .._kdclassifier import KDClassifierRF", "approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF()) clf.fit(X, y) assert", "KDClassifierRF(approx=approx, sampler=RBFSamplerORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx !=", "in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF()) clf.fit(X, y) assert hasattr(clf,", ".._RBFSamplerSORF import RBFSamplerSORF @pytest.fixture def data(): return load_iris(return_X_y=True) def test_KDClassifierRF(data): X, y =", "hasattr(clf, 'Xtrain_') if clf.approx != 'exact': assert hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X) assert", "['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF()) clf.fit(X, y) assert hasattr(clf, 'classes_')", "assert hasattr(clf, 'gamma') assert hasattr(clf, 'n_components') for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']:", "clf.approx != 'exact': assert hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape == (X.shape[0],)", "hasattr(clf, 'approx') assert hasattr(clf, 'normalize') assert hasattr(clf, 'gamma') assert hasattr(clf, 'n_components') for approx", "hasattr(clf, 'n_components') for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx) clf.fit(X,", "== (X.shape[0],) def test_KDClassifierORF(data): X, y = data for approx in ['rff+','rff', 'lrff',", "numpy.testing import assert_array_equal from numpy.testing import assert_allclose from .._kdclassifier import KDClassifierRF from .._RBFSamplerORF", "['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF()) clf.fit(X, y) assert hasattr(clf, 'classes_')", "assert hasattr(clf, 'approx') assert hasattr(clf, 'normalize') assert hasattr(clf, 'gamma') assert hasattr(clf, 'n_components') for", "'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert", "(X.shape[0],) def test_KDClassifierSORF(data): X, y = data for approx in ['rff+','rff', 'lrff', 'lrff+',", "from .._RBFSamplerSORF import RBFSamplerSORF @pytest.fixture def data(): return load_iris(return_X_y=True) def test_KDClassifierRF(data): X, y", "y = data for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx,", "= clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierSORF(data): X, y = data for", "assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx != 'exact': assert hasattr(clf, 'rbf_sampler_')", "pytest import numpy as np from sklearn.datasets import load_iris from numpy.testing import assert_array_equal", "import pytest import numpy as np from sklearn.datasets import load_iris from numpy.testing import", "from numpy.testing import assert_array_equal from numpy.testing import assert_allclose from .._kdclassifier import KDClassifierRF from", "hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx != 'exact': assert hasattr(clf, 'rbf_sampler_') y_pred", "KDClassifierRF() assert hasattr(clf, 'approx') assert hasattr(clf, 'normalize') assert hasattr(clf, 'gamma') assert hasattr(clf, 'n_components')", "data clf = KDClassifierRF() assert hasattr(clf, 'approx') assert hasattr(clf, 'normalize') assert hasattr(clf, 'gamma')", "assert_allclose from .._kdclassifier import KDClassifierRF from .._RBFSamplerORF import RBFSamplerORF from .._RBFSamplerSORF import RBFSamplerSORF", ".._kdclassifier import KDClassifierRF from .._RBFSamplerORF import RBFSamplerORF from .._RBFSamplerSORF import RBFSamplerSORF @pytest.fixture def", "sampler=RBFSamplerORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx != 'exact':", "'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_')", "== (X.shape[0],) def test_KDClassifierSORF(data): X, y = data for approx in ['rff+','rff', 'lrff',", "'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_')", "import assert_allclose from .._kdclassifier import KDClassifierRF from .._RBFSamplerORF import RBFSamplerORF from .._RBFSamplerSORF import", "for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF()) clf.fit(X, y)", "def test_KDClassifierSORF(data): X, y = data for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']:", "import KDClassifierRF from .._RBFSamplerORF import RBFSamplerORF from .._RBFSamplerSORF import RBFSamplerSORF @pytest.fixture def data():", "from .._RBFSamplerORF import RBFSamplerORF from .._RBFSamplerSORF import RBFSamplerSORF @pytest.fixture def data(): return load_iris(return_X_y=True)", "data for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF()) clf.fit(X,", "@pytest.fixture def data(): return load_iris(return_X_y=True) def test_KDClassifierRF(data): X, y = data clf =", "sampler=RBFSamplerSORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx != 'exact':", "'Xtrain_') if clf.approx != 'exact': assert hasattr(clf, 'rbf_sampler_') y_pred = clf.predict(X) assert y_pred.shape", "import RBFSamplerSORF @pytest.fixture def data(): return load_iris(return_X_y=True) def test_KDClassifierRF(data): X, y = data", "= data for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF())", "clf.predict(X) assert y_pred.shape == (X.shape[0],) def test_KDClassifierORF(data): X, y = data for approx", "= KDClassifierRF(approx=approx, sampler=RBFSamplerSORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx", "clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF()) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if", "'normalize') assert hasattr(clf, 'gamma') assert hasattr(clf, 'n_components') for approx in ['rff+','rff', 'lrff', 'lrff+',", "import assert_array_equal from numpy.testing import assert_allclose from .._kdclassifier import KDClassifierRF from .._RBFSamplerORF import", "assert hasattr(clf, 'n_components') for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx)", "y_pred.shape == (X.shape[0],) def test_KDClassifierSORF(data): X, y = data for approx in ['rff+','rff',", "'lrff+', 'exact']: clf = KDClassifierRF(approx=approx) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_')", "KDClassifierRF(approx=approx) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf, 'Xtrain_') if clf.approx != 'exact':", "'lrff', 'lrff+', 'exact']: clf = KDClassifierRF(approx=approx) clf.fit(X, y) assert hasattr(clf, 'classes_') assert hasattr(clf,", "hasattr(clf, 'normalize') assert hasattr(clf, 'gamma') assert hasattr(clf, 'n_components') for approx in ['rff+','rff', 'lrff',", "assert y_pred.shape == (X.shape[0],) def test_KDClassifierORF(data): X, y = data for approx in", "numpy as np from sklearn.datasets import load_iris from numpy.testing import assert_array_equal from numpy.testing", "from numpy.testing import assert_allclose from .._kdclassifier import KDClassifierRF from .._RBFSamplerORF import RBFSamplerORF from", "= KDClassifierRF() assert hasattr(clf, 'approx') assert hasattr(clf, 'normalize') assert hasattr(clf, 'gamma') assert hasattr(clf," ]
[ "applications. Sample use cases include: Golden training sets management Reproducibility (dataset training snapshot)", "before any major process. For example, a snapshot can serve as a roll-back", "a reference to the original binary and not a new copy of the", "datasets on the same item. * Different datasets (not clones) with similar recipes", "outcome depends on how similar or different the datasets are. * Cloned Datasets", "you will see annotations from different datasets on the same item. * Different", "to original datasets in case of any error without losing the data. ##", "data versioning provides you with unique tools for data management - clone, merge,", "different recipes - Datasets with different default recipes cannot be merged. Use the", "can serve as a roll-back mechanism to original datasets in case of any", "the original. Files are actually a reference to the original binary and not", "* Datasets with different recipes - Datasets with different default recipes cannot be", "multiple versions for various applications. Sample use cases include: Golden training sets management", "recipes - items will be summed up, which will cause duplication of similar", "safe and protected. When cloning a dataset, you can add a destination dataset,", "def section2(): \"\"\" ## Merge Datasets Dataset merging outcome depends on how similar", "unique tools for data management - clone, merge, slice & dice your files,", "sets management Reproducibility (dataset training snapshot) Experimentation (creating subsets from different kinds) Task/Assignment", "dataset, remote file path, and more... \"\"\" def section2(): \"\"\" ## Merge Datasets", "cases include: Golden training sets management Reproducibility (dataset training snapshot) Experimentation (creating subsets", "destination dataset, remote file path, and more... \"\"\" def section2(): \"\"\" ## Merge", "to create multiple versions for various applications. Sample use cases include: Golden training", "Dataloop's powerful data versioning provides you with unique tools for data management -", "data. ## Clone Datasets Cloning a dataset creates a new dataset with the", "the same files as the original. Files are actually a reference to the", "Version \"Snapshot\" - Use our versioning feature as a way to save data", "with similar recipes - items will be summed up, which will cause duplication", "management Reproducibility (dataset training snapshot) Experimentation (creating subsets from different kinds) Task/Assignment management", "our versioning feature as a way to save data (items, annotations, metadata) before", "a destination dataset, remote file path, and more... \"\"\" def section2(): \"\"\" ##", "data (items, annotations, metadata) before any major process. For example, a snapshot can", "new dataset with the same files as the original. Files are actually a", "various applications. Sample use cases include: Golden training sets management Reproducibility (dataset training", "\"\"\" ## Merge Datasets Dataset merging outcome depends on how similar or different", "can add a destination dataset, remote file path, and more... \"\"\" def section2():", "will see annotations from different datasets on the same item. * Different datasets", "a new copy of the original, so your cloud data remains safe and", "action button) to match recipes between datasets and be able to merge them.", "snapshot can serve as a roll-back mechanism to original datasets in case of", "- items, annotations, and metadata will be merged. This means that you will", "merged. This means that you will see annotations from different datasets on the", "path, and more... \"\"\" def section2(): \"\"\" ## Merge Datasets Dataset merging outcome", "option on dataset level (3-dots action button) to match recipes between datasets and", "major process. For example, a snapshot can serve as a roll-back mechanism to", "Datasets with different recipes - Datasets with different default recipes cannot be merged.", "Merge Datasets Dataset merging outcome depends on how similar or different the datasets", "protected. When cloning a dataset, you can add a destination dataset, remote file", "will be summed up, which will cause duplication of similar items. * Datasets", "dataset level (3-dots action button) to match recipes between datasets and be able", "clone, merge, slice & dice your files, to create multiple versions for various", "section1(): \"\"\" # Data Versioning Dataloop's powerful data versioning provides you with unique", "Reproducibility (dataset training snapshot) Experimentation (creating subsets from different kinds) Task/Assignment management Data", "creates a new dataset with the same files as the original. Files are", "* Cloned Datasets - items, annotations, and metadata will be merged. This means", "Different datasets (not clones) with similar recipes - items will be summed up,", "and metadata will be merged. This means that you will see annotations from", "* Different datasets (not clones) with similar recipes - items will be summed", "## Clone Datasets Cloning a dataset creates a new dataset with the same", "button) to match recipes between datasets and be able to merge them. \"\"\"", "a way to save data (items, annotations, metadata) before any major process. For", "without losing the data. ## Clone Datasets Cloning a dataset creates a new", "Data Versioning Dataloop's powerful data versioning provides you with unique tools for data", "When cloning a dataset, you can add a destination dataset, remote file path,", "management - clone, merge, slice & dice your files, to create multiple versions", "with different default recipes cannot be merged. Use the 'Switch recipe' option on", "with unique tools for data management - clone, merge, slice & dice your", "(creating subsets from different kinds) Task/Assignment management Data Version \"Snapshot\" - Use our", "Files are actually a reference to the original binary and not a new", "for various applications. Sample use cases include: Golden training sets management Reproducibility (dataset", "recipe' option on dataset level (3-dots action button) to match recipes between datasets", "for data management - clone, merge, slice & dice your files, to create", "similar items. * Datasets with different recipes - Datasets with different default recipes", "# Data Versioning Dataloop's powerful data versioning provides you with unique tools for", "annotations from different datasets on the same item. * Different datasets (not clones)", "annotations, and metadata will be merged. This means that you will see annotations", "datasets in case of any error without losing the data. ## Clone Datasets", "Use our versioning feature as a way to save data (items, annotations, metadata)", "and protected. When cloning a dataset, you can add a destination dataset, remote", "dataset, you can add a destination dataset, remote file path, and more... \"\"\"", "original binary and not a new copy of the original, so your cloud", "the original binary and not a new copy of the original, so your", "Versioning Dataloop's powerful data versioning provides you with unique tools for data management", "the original, so your cloud data remains safe and protected. When cloning a", "powerful data versioning provides you with unique tools for data management - clone,", "binary and not a new copy of the original, so your cloud data", "and not a new copy of the original, so your cloud data remains", "files, to create multiple versions for various applications. Sample use cases include: Golden", "level (3-dots action button) to match recipes between datasets and be able to", "snapshot) Experimentation (creating subsets from different kinds) Task/Assignment management Data Version \"Snapshot\" -", "file path, and more... \"\"\" def section2(): \"\"\" ## Merge Datasets Dataset merging", "(not clones) with similar recipes - items will be summed up, which will", "will cause duplication of similar items. * Datasets with different recipes - Datasets", "items. * Datasets with different recipes - Datasets with different default recipes cannot", "dataset with the same files as the original. Files are actually a reference", "your files, to create multiple versions for various applications. Sample use cases include:", "process. For example, a snapshot can serve as a roll-back mechanism to original", "summed up, which will cause duplication of similar items. * Datasets with different", "reference to the original binary and not a new copy of the original,", "are. * Cloned Datasets - items, annotations, and metadata will be merged. This", "a new dataset with the same files as the original. Files are actually", "serve as a roll-back mechanism to original datasets in case of any error", "## Merge Datasets Dataset merging outcome depends on how similar or different the", "case of any error without losing the data. ## Clone Datasets Cloning a", "a snapshot can serve as a roll-back mechanism to original datasets in case", "new copy of the original, so your cloud data remains safe and protected.", "as a way to save data (items, annotations, metadata) before any major process.", "cause duplication of similar items. * Datasets with different recipes - Datasets with", "\"\"\" def section2(): \"\"\" ## Merge Datasets Dataset merging outcome depends on how", "actually a reference to the original binary and not a new copy of", "same item. * Different datasets (not clones) with similar recipes - items will", "versions for various applications. Sample use cases include: Golden training sets management Reproducibility", "annotations, metadata) before any major process. For example, a snapshot can serve as", "Cloned Datasets - items, annotations, and metadata will be merged. This means that", "\"\"\" # Data Versioning Dataloop's powerful data versioning provides you with unique tools", "or different the datasets are. * Cloned Datasets - items, annotations, and metadata", "For example, a snapshot can serve as a roll-back mechanism to original datasets", "from different kinds) Task/Assignment management Data Version \"Snapshot\" - Use our versioning feature", "losing the data. ## Clone Datasets Cloning a dataset creates a new dataset", "metadata will be merged. This means that you will see annotations from different", "so your cloud data remains safe and protected. When cloning a dataset, you", "mechanism to original datasets in case of any error without losing the data.", "how similar or different the datasets are. * Cloned Datasets - items, annotations,", "use cases include: Golden training sets management Reproducibility (dataset training snapshot) Experimentation (creating", "Datasets Dataset merging outcome depends on how similar or different the datasets are.", "any error without losing the data. ## Clone Datasets Cloning a dataset creates", "dataset creates a new dataset with the same files as the original. Files", "create multiple versions for various applications. Sample use cases include: Golden training sets", "Golden training sets management Reproducibility (dataset training snapshot) Experimentation (creating subsets from different", "original, so your cloud data remains safe and protected. When cloning a dataset,", "you can add a destination dataset, remote file path, and more... \"\"\" def", "remote file path, and more... \"\"\" def section2(): \"\"\" ## Merge Datasets Dataset", "training sets management Reproducibility (dataset training snapshot) Experimentation (creating subsets from different kinds)", "default recipes cannot be merged. Use the 'Switch recipe' option on dataset level", "section2(): \"\"\" ## Merge Datasets Dataset merging outcome depends on how similar or", "metadata) before any major process. For example, a snapshot can serve as a", "Datasets - items, annotations, and metadata will be merged. This means that you", "which will cause duplication of similar items. * Datasets with different recipes -", "different kinds) Task/Assignment management Data Version \"Snapshot\" - Use our versioning feature as", "way to save data (items, annotations, metadata) before any major process. For example,", "the 'Switch recipe' option on dataset level (3-dots action button) to match recipes", "dice your files, to create multiple versions for various applications. Sample use cases", "roll-back mechanism to original datasets in case of any error without losing the", "Datasets Cloning a dataset creates a new dataset with the same files as", "the datasets are. * Cloned Datasets - items, annotations, and metadata will be", "Data Version \"Snapshot\" - Use our versioning feature as a way to save", "Dataset merging outcome depends on how similar or different the datasets are. *", "copy of the original, so your cloud data remains safe and protected. When", "include: Golden training sets management Reproducibility (dataset training snapshot) Experimentation (creating subsets from", "Use the 'Switch recipe' option on dataset level (3-dots action button) to match", "Task/Assignment management Data Version \"Snapshot\" - Use our versioning feature as a way", "subsets from different kinds) Task/Assignment management Data Version \"Snapshot\" - Use our versioning", "be summed up, which will cause duplication of similar items. * Datasets with", "recipes - Datasets with different default recipes cannot be merged. Use the 'Switch", "your cloud data remains safe and protected. When cloning a dataset, you can", "This means that you will see annotations from different datasets on the same", "see annotations from different datasets on the same item. * Different datasets (not", "be merged. Use the 'Switch recipe' option on dataset level (3-dots action button)", "of any error without losing the data. ## Clone Datasets Cloning a dataset", "with the same files as the original. Files are actually a reference to", "files as the original. Files are actually a reference to the original binary", "original. Files are actually a reference to the original binary and not a", "(items, annotations, metadata) before any major process. For example, a snapshot can serve", "as the original. Files are actually a reference to the original binary and", "a dataset, you can add a destination dataset, remote file path, and more...", "the data. ## Clone Datasets Cloning a dataset creates a new dataset with", "(3-dots action button) to match recipes between datasets and be able to merge", "different datasets on the same item. * Different datasets (not clones) with similar", "you with unique tools for data management - clone, merge, slice & dice", "- Datasets with different default recipes cannot be merged. Use the 'Switch recipe'", "different the datasets are. * Cloned Datasets - items, annotations, and metadata will", "\"Snapshot\" - Use our versioning feature as a way to save data (items,", "cannot be merged. Use the 'Switch recipe' option on dataset level (3-dots action", "on dataset level (3-dots action button) to match recipes between datasets and be", "datasets (not clones) with similar recipes - items will be summed up, which", "more... \"\"\" def section2(): \"\"\" ## Merge Datasets Dataset merging outcome depends on", "of the original, so your cloud data remains safe and protected. When cloning", "error without losing the data. ## Clone Datasets Cloning a dataset creates a", "duplication of similar items. * Datasets with different recipes - Datasets with different", "and more... \"\"\" def section2(): \"\"\" ## Merge Datasets Dataset merging outcome depends", "remains safe and protected. When cloning a dataset, you can add a destination", "of similar items. * Datasets with different recipes - Datasets with different default", "Sample use cases include: Golden training sets management Reproducibility (dataset training snapshot) Experimentation", "items, annotations, and metadata will be merged. This means that you will see", "cloning a dataset, you can add a destination dataset, remote file path, and", "any major process. For example, a snapshot can serve as a roll-back mechanism", "merged. Use the 'Switch recipe' option on dataset level (3-dots action button) to", "merging outcome depends on how similar or different the datasets are. * Cloned", "def section1(): \"\"\" # Data Versioning Dataloop's powerful data versioning provides you with", "depends on how similar or different the datasets are. * Cloned Datasets -", "& dice your files, to create multiple versions for various applications. Sample use", "Experimentation (creating subsets from different kinds) Task/Assignment management Data Version \"Snapshot\" - Use", "original datasets in case of any error without losing the data. ## Clone", "Clone Datasets Cloning a dataset creates a new dataset with the same files", "management Data Version \"Snapshot\" - Use our versioning feature as a way to", "- clone, merge, slice & dice your files, to create multiple versions for", "to the original binary and not a new copy of the original, so", "not a new copy of the original, so your cloud data remains safe", "will be merged. This means that you will see annotations from different datasets", "provides you with unique tools for data management - clone, merge, slice &", "kinds) Task/Assignment management Data Version \"Snapshot\" - Use our versioning feature as a", "- items will be summed up, which will cause duplication of similar items.", "items will be summed up, which will cause duplication of similar items. *", "slice & dice your files, to create multiple versions for various applications. Sample", "to save data (items, annotations, metadata) before any major process. For example, a", "from different datasets on the same item. * Different datasets (not clones) with", "example, a snapshot can serve as a roll-back mechanism to original datasets in", "same files as the original. Files are actually a reference to the original", "similar or different the datasets are. * Cloned Datasets - items, annotations, and", "- Use our versioning feature as a way to save data (items, annotations,", "as a roll-back mechanism to original datasets in case of any error without", "on the same item. * Different datasets (not clones) with similar recipes -", "a roll-back mechanism to original datasets in case of any error without losing", "feature as a way to save data (items, annotations, metadata) before any major", "the same item. * Different datasets (not clones) with similar recipes - items", "with different recipes - Datasets with different default recipes cannot be merged. Use", "means that you will see annotations from different datasets on the same item.", "data remains safe and protected. When cloning a dataset, you can add a", "add a destination dataset, remote file path, and more... \"\"\" def section2(): \"\"\"", "that you will see annotations from different datasets on the same item. *", "clones) with similar recipes - items will be summed up, which will cause", "up, which will cause duplication of similar items. * Datasets with different recipes", "different default recipes cannot be merged. Use the 'Switch recipe' option on dataset", "recipes cannot be merged. Use the 'Switch recipe' option on dataset level (3-dots", "item. * Different datasets (not clones) with similar recipes - items will be", "training snapshot) Experimentation (creating subsets from different kinds) Task/Assignment management Data Version \"Snapshot\"", "in case of any error without losing the data. ## Clone Datasets Cloning", "versioning provides you with unique tools for data management - clone, merge, slice", "'Switch recipe' option on dataset level (3-dots action button) to match recipes between", "save data (items, annotations, metadata) before any major process. For example, a snapshot", "are actually a reference to the original binary and not a new copy", "tools for data management - clone, merge, slice & dice your files, to", "a dataset creates a new dataset with the same files as the original.", "data management - clone, merge, slice & dice your files, to create multiple", "be merged. This means that you will see annotations from different datasets on", "Datasets with different default recipes cannot be merged. Use the 'Switch recipe' option", "merge, slice & dice your files, to create multiple versions for various applications.", "on how similar or different the datasets are. * Cloned Datasets - items,", "Cloning a dataset creates a new dataset with the same files as the", "versioning feature as a way to save data (items, annotations, metadata) before any", "similar recipes - items will be summed up, which will cause duplication of", "cloud data remains safe and protected. When cloning a dataset, you can add", "datasets are. * Cloned Datasets - items, annotations, and metadata will be merged.", "(dataset training snapshot) Experimentation (creating subsets from different kinds) Task/Assignment management Data Version" ]
[ "ansible.utils as utils import ansible.errors as errors def flatten_hash_to_list(terms): ret = [] for", "distributed in the hope that it will be useful, # but WITHOUT ANY", "= basedir def run(self, terms, inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if", "file is part of Ansible # # Ansible is free software: you can", "PURPOSE. See the # GNU General Public License for more details. # #", "either version 3 of the License, or # (at your option) any later", "by # the Free Software Foundation, either version 3 of the License, or", "= [] for key in terms: ret.append({'key': key, 'value': terms[key]}) return ret class", "version 3 of the License, or # (at your option) any later version.", "terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if not isinstance(terms, dict): raise errors.AnsibleError(\"with_dict expects a", "published by # the Free Software Foundation, either version 3 of the License,", "later version. # # Ansible is distributed in the hope that it will", "# # Ansible is free software: you can redistribute it and/or modify #", "it and/or modify # it under the terms of the GNU General Public", "as utils import ansible.errors as errors def flatten_hash_to_list(terms): ret = [] for key", "# it under the terms of the GNU General Public License as published", "terms of the GNU General Public License as published by # the Free", "GNU General Public License as published by # the Free Software Foundation, either", "it under the terms of the GNU General Public License as published by", "Ansible is distributed in the hope that it will be useful, # but", "see <http://www.gnu.org/licenses/>. from ansible.utils import safe_eval import ansible.utils as utils import ansible.errors as", "is part of Ansible # # Ansible is free software: you can redistribute", "License for more details. # # You should have received a copy of", "free software: you can redistribute it and/or modify # it under the terms", "return ret class LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self,", "**kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if not isinstance(terms, dict): raise errors.AnsibleError(\"with_dict expects", "= utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if not isinstance(terms, dict): raise errors.AnsibleError(\"with_dict expects a dict\")", "<NAME> <<EMAIL>> # # This file is part of Ansible # # Ansible", "Ansible # # Ansible is free software: you can redistribute it and/or modify", "you can redistribute it and/or modify # it under the terms of the", "a copy of the GNU General Public License # along with Ansible. If", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General", "of the GNU General Public License as published by # the Free Software", "redistribute it and/or modify # it under the terms of the GNU General", "Public License as published by # the Free Software Foundation, either version 3", "modify # it under the terms of the GNU General Public License as", "import ansible.errors as errors def flatten_hash_to_list(terms): ret = [] for key in terms:", "of the License, or # (at your option) any later version. # #", "the Free Software Foundation, either version 3 of the License, or # (at", "should have received a copy of the GNU General Public License # along", "any later version. # # Ansible is distributed in the hope that it", "PARTICULAR PURPOSE. See the # GNU General Public License for more details. #", "terms: ret.append({'key': key, 'value': terms[key]}) return ret class LookupModule(object): def __init__(self, basedir=None, **kwargs):", "safe_eval import ansible.utils as utils import ansible.errors as errors def flatten_hash_to_list(terms): ret =", "ret = [] for key in terms: ret.append({'key': key, 'value': terms[key]}) return ret", "Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible.utils import safe_eval import ansible.utils as utils", "terms, inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if not isinstance(terms, dict): raise", "is free software: you can redistribute it and/or modify # it under the", "without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR", "def flatten_hash_to_list(terms): ret = [] for key in terms: ret.append({'key': key, 'value': terms[key]})", "or # (at your option) any later version. # # Ansible is distributed", "hope that it will be useful, # but WITHOUT ANY WARRANTY; without even", "from ansible.utils import safe_eval import ansible.utils as utils import ansible.errors as errors def", "utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if not isinstance(terms, dict): raise errors.AnsibleError(\"with_dict expects a dict\") return", "def run(self, terms, inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if not isinstance(terms,", "part of Ansible # # Ansible is free software: you can redistribute it", "the GNU General Public License as published by # the Free Software Foundation,", "ansible.utils import safe_eval import ansible.utils as utils import ansible.errors as errors def flatten_hash_to_list(terms):", "inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if not isinstance(terms, dict): raise errors.AnsibleError(\"with_dict", "[] for key in terms: ret.append({'key': key, 'value': terms[key]}) return ret class LookupModule(object):", "version. # # Ansible is distributed in the hope that it will be", "warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #", "You should have received a copy of the GNU General Public License #", "# the Free Software Foundation, either version 3 of the License, or #", "See the # GNU General Public License for more details. # # You", "# (c) 2014, <NAME> <<EMAIL>> # # This file is part of Ansible", "def __init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self, terms, inject=None, **kwargs): terms", "with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible.utils import safe_eval import ansible.utils as", "Foundation, either version 3 of the License, or # (at your option) any", "details. # # You should have received a copy of the GNU General", "GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from", "This file is part of Ansible # # Ansible is free software: you", "WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A", "useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of #", "as errors def flatten_hash_to_list(terms): ret = [] for key in terms: ret.append({'key': key,", "<<EMAIL>> # # This file is part of Ansible # # Ansible is", "that it will be useful, # but WITHOUT ANY WARRANTY; without even the", "# This file is part of Ansible # # Ansible is free software:", "License as published by # the Free Software Foundation, either version 3 of", "utils import ansible.errors as errors def flatten_hash_to_list(terms): ret = [] for key in", "flatten_hash_to_list(terms): ret = [] for key in terms: ret.append({'key': key, 'value': terms[key]}) return", "the terms of the GNU General Public License as published by # the", "General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible.utils", "Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible.utils import", "Ansible is free software: you can redistribute it and/or modify # it under", "for key in terms: ret.append({'key': key, 'value': terms[key]}) return ret class LookupModule(object): def", "as published by # the Free Software Foundation, either version 3 of the", "even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "class LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self, terms, inject=None,", "of Ansible # # Ansible is free software: you can redistribute it and/or", "implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR", "key in terms: ret.append({'key': key, 'value': terms[key]}) return ret class LookupModule(object): def __init__(self,", "License, or # (at your option) any later version. # # Ansible is", "# # This file is part of Ansible # # Ansible is free", "the License, or # (at your option) any later version. # # Ansible", "it will be useful, # but WITHOUT ANY WARRANTY; without even the implied", "WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS", "terms[key]}) return ret class LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir = basedir def", "the hope that it will be useful, # but WITHOUT ANY WARRANTY; without", "along with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible.utils import safe_eval import ansible.utils", "(at your option) any later version. # # Ansible is distributed in the", "# # You should have received a copy of the GNU General Public", "(c) 2014, <NAME> <<EMAIL>> # # This file is part of Ansible #", "General Public License for more details. # # You should have received a", "FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more", "'value': terms[key]}) return ret class LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir = basedir", "self.basedir = basedir def run(self, terms, inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)", "be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of", "have received a copy of the GNU General Public License # along with", "the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "basedir=None, **kwargs): self.basedir = basedir def run(self, terms, inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms,", "# You should have received a copy of the GNU General Public License", "Public License for more details. # # You should have received a copy", "__init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self, terms, inject=None, **kwargs): terms =", "ret.append({'key': key, 'value': terms[key]}) return ret class LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir", "in terms: ret.append({'key': key, 'value': terms[key]}) return ret class LookupModule(object): def __init__(self, basedir=None,", "copy of the GNU General Public License # along with Ansible. If not,", "is distributed in the hope that it will be useful, # but WITHOUT", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public", "import ansible.utils as utils import ansible.errors as errors def flatten_hash_to_list(terms): ret = []", "received a copy of the GNU General Public License # along with Ansible.", "in the hope that it will be useful, # but WITHOUT ANY WARRANTY;", "<http://www.gnu.org/licenses/>. from ansible.utils import safe_eval import ansible.utils as utils import ansible.errors as errors", "basedir def run(self, terms, inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if not", "FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for", "the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>.", "and/or modify # it under the terms of the GNU General Public License", "self.basedir, inject) if not isinstance(terms, dict): raise errors.AnsibleError(\"with_dict expects a dict\") return flatten_hash_to_list(terms)", "License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible.utils import safe_eval", "the # GNU General Public License for more details. # # You should", "3 of the License, or # (at your option) any later version. #", "# # Ansible is distributed in the hope that it will be useful,", "A PARTICULAR PURPOSE. See the # GNU General Public License for more details.", "or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License", "for more details. # # You should have received a copy of the", "LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self, terms, inject=None, **kwargs):", "run(self, terms, inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if not isinstance(terms, dict):", "<gh_stars>1-10 # (c) 2014, <NAME> <<EMAIL>> # # This file is part of", "option) any later version. # # Ansible is distributed in the hope that", "# Ansible is free software: you can redistribute it and/or modify # it", "key, 'value': terms[key]}) return ret class LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir =", "**kwargs): self.basedir = basedir def run(self, terms, inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir,", "import safe_eval import ansible.utils as utils import ansible.errors as errors def flatten_hash_to_list(terms): ret", "# Ansible is distributed in the hope that it will be useful, #", "will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty", "of the GNU General Public License # along with Ansible. If not, see", "General Public License as published by # the Free Software Foundation, either version", "If not, see <http://www.gnu.org/licenses/>. from ansible.utils import safe_eval import ansible.utils as utils import", "software: you can redistribute it and/or modify # it under the terms of", "your option) any later version. # # Ansible is distributed in the hope", "more details. # # You should have received a copy of the GNU", "# along with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible.utils import safe_eval import", "of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU", "not, see <http://www.gnu.org/licenses/>. from ansible.utils import safe_eval import ansible.utils as utils import ansible.errors", "GNU General Public License for more details. # # You should have received", "under the terms of the GNU General Public License as published by #", "# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY", "ret class LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self, terms,", "# GNU General Public License for more details. # # You should have", "# (at your option) any later version. # # Ansible is distributed in", "can redistribute it and/or modify # it under the terms of the GNU", "Free Software Foundation, either version 3 of the License, or # (at your", "Software Foundation, either version 3 of the License, or # (at your option)", "errors def flatten_hash_to_list(terms): ret = [] for key in terms: ret.append({'key': key, 'value':", "2014, <NAME> <<EMAIL>> # # This file is part of Ansible # #", "ansible.errors as errors def flatten_hash_to_list(terms): ret = [] for key in terms: ret.append({'key':", "but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or" ]
[ "seconds.', line) reindeer, speed, time, rest = m.groups() distances[reindeer] = cycle([int(speed)] * int(time)", "r'but then must rest for (\\d+) seconds.', line) reindeer, speed, time, rest =", "fly (\\d+) km/s for (\\d+) seconds, ' r'but then must rest for (\\d+)", "(\\d+) seconds, ' r'but then must rest for (\\d+) seconds.', line) reindeer, speed,", "itertools import cycle, islice import re distances = {} for line in open('input.txt'):", "m.groups() distances[reindeer] = cycle([int(speed)] * int(time) + [0] * int(rest)) print(max(sum(islice(seconds, 0, 2503))", "= m.groups() distances[reindeer] = cycle([int(speed)] * int(time) + [0] * int(rest)) print(max(sum(islice(seconds, 0,", "cycle, islice import re distances = {} for line in open('input.txt'): m =", "seconds, ' r'but then must rest for (\\d+) seconds.', line) reindeer, speed, time,", "(\\d+) km/s for (\\d+) seconds, ' r'but then must rest for (\\d+) seconds.',", "from itertools import cycle, islice import re distances = {} for line in", "= cycle([int(speed)] * int(time) + [0] * int(rest)) print(max(sum(islice(seconds, 0, 2503)) for seconds", "{} for line in open('input.txt'): m = re.match(r'(\\w+) can fly (\\d+) km/s for", "for line in open('input.txt'): m = re.match(r'(\\w+) can fly (\\d+) km/s for (\\d+)", "re.match(r'(\\w+) can fly (\\d+) km/s for (\\d+) seconds, ' r'but then must rest", "m = re.match(r'(\\w+) can fly (\\d+) km/s for (\\d+) seconds, ' r'but then", "for (\\d+) seconds, ' r'but then must rest for (\\d+) seconds.', line) reindeer,", "in open('input.txt'): m = re.match(r'(\\w+) can fly (\\d+) km/s for (\\d+) seconds, '", "then must rest for (\\d+) seconds.', line) reindeer, speed, time, rest = m.groups()", "speed, time, rest = m.groups() distances[reindeer] = cycle([int(speed)] * int(time) + [0] *", "islice import re distances = {} for line in open('input.txt'): m = re.match(r'(\\w+)", "km/s for (\\d+) seconds, ' r'but then must rest for (\\d+) seconds.', line)", "re distances = {} for line in open('input.txt'): m = re.match(r'(\\w+) can fly", "line) reindeer, speed, time, rest = m.groups() distances[reindeer] = cycle([int(speed)] * int(time) +", "time, rest = m.groups() distances[reindeer] = cycle([int(speed)] * int(time) + [0] * int(rest))", "rest for (\\d+) seconds.', line) reindeer, speed, time, rest = m.groups() distances[reindeer] =", "* int(time) + [0] * int(rest)) print(max(sum(islice(seconds, 0, 2503)) for seconds in distances.values()))", "cycle([int(speed)] * int(time) + [0] * int(rest)) print(max(sum(islice(seconds, 0, 2503)) for seconds in", "can fly (\\d+) km/s for (\\d+) seconds, ' r'but then must rest for", "reindeer, speed, time, rest = m.groups() distances[reindeer] = cycle([int(speed)] * int(time) + [0]", "rest = m.groups() distances[reindeer] = cycle([int(speed)] * int(time) + [0] * int(rest)) print(max(sum(islice(seconds,", "= re.match(r'(\\w+) can fly (\\d+) km/s for (\\d+) seconds, ' r'but then must", "line in open('input.txt'): m = re.match(r'(\\w+) can fly (\\d+) km/s for (\\d+) seconds,", "distances[reindeer] = cycle([int(speed)] * int(time) + [0] * int(rest)) print(max(sum(islice(seconds, 0, 2503)) for", "' r'but then must rest for (\\d+) seconds.', line) reindeer, speed, time, rest", "open('input.txt'): m = re.match(r'(\\w+) can fly (\\d+) km/s for (\\d+) seconds, ' r'but", "distances = {} for line in open('input.txt'): m = re.match(r'(\\w+) can fly (\\d+)", "must rest for (\\d+) seconds.', line) reindeer, speed, time, rest = m.groups() distances[reindeer]", "= {} for line in open('input.txt'): m = re.match(r'(\\w+) can fly (\\d+) km/s", "(\\d+) seconds.', line) reindeer, speed, time, rest = m.groups() distances[reindeer] = cycle([int(speed)] *", "for (\\d+) seconds.', line) reindeer, speed, time, rest = m.groups() distances[reindeer] = cycle([int(speed)]", "import re distances = {} for line in open('input.txt'): m = re.match(r'(\\w+) can", "import cycle, islice import re distances = {} for line in open('input.txt'): m" ]
[ "# Uncomment the next two lines to enable the admin: from django.contrib import", "admin: from django.contrib import admin from django.conf.urls.static import static from django.conf import settings", "url from dolove import views # Uncomment the next two lines to enable", "next two lines to enable the admin: from django.contrib import admin from django.conf.urls.static", "two lines to enable the admin: from django.contrib import admin from django.conf.urls.static import", "url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'), url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'), url(r'yoga_price$', views.yoga_price, name='yoga_price'), url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', include(admin.site.urls))", "Uncomment the next two lines to enable the admin: from django.contrib import admin", "views.doula_description, name='doula_description'), url(r'doula_services$', views.doula_services, name='doula_services'), url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'), url(r'photo_price$', views.photo_price, name='photo_price'), url(r'yoga_class$', views.yoga_class,", "django.conf.urls.static import static from django.conf import settings admin.autodiscover() urlpatterns = [ url(r'^$', views.home,", "name='yoga_class'), url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'), url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'), url(r'yoga_price$', views.yoga_price, name='yoga_price'), url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/',", "import admin from django.conf.urls.static import static from django.conf import settings admin.autodiscover() urlpatterns =", "import include, url from dolove import views # Uncomment the next two lines", "the admin: from django.contrib import admin from django.conf.urls.static import static from django.conf import", "url(r'about$', views.about, name='about'), url(r'doula_description$', views.doula_description, name='doula_description'), url(r'doula_services$', views.doula_services, name='doula_services'), url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'), url(r'photo_price$',", "name='photo_gallery'), url(r'photo_price$', views.photo_price, name='photo_price'), url(r'yoga_class$', views.yoga_class, name='yoga_class'), url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'), url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'),", "django.conf.urls import include, url from dolove import views # Uncomment the next two", "from django.conf.urls.static import static from django.conf import settings admin.autodiscover() urlpatterns = [ url(r'^$',", "url(r'doula_services$', views.doula_services, name='doula_services'), url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'), url(r'photo_price$', views.photo_price, name='photo_price'), url(r'yoga_class$', views.yoga_class, name='yoga_class'), url(r'yoga_locations$',", "views.doula_services, name='doula_services'), url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'), url(r'photo_price$', views.photo_price, name='photo_price'), url(r'yoga_class$', views.yoga_class, name='yoga_class'), url(r'yoga_locations$', views.yoga_locations,", "enable the admin: from django.contrib import admin from django.conf.urls.static import static from django.conf", "url(r'doula_description$', views.doula_description, name='doula_description'), url(r'doula_services$', views.doula_services, name='doula_services'), url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'), url(r'photo_price$', views.photo_price, name='photo_price'), url(r'yoga_class$',", "name='home'), url(r'about$', views.about, name='about'), url(r'doula_description$', views.doula_description, name='doula_description'), url(r'doula_services$', views.doula_services, name='doula_services'), url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'),", "lines to enable the admin: from django.contrib import admin from django.conf.urls.static import static", "import static from django.conf import settings admin.autodiscover() urlpatterns = [ url(r'^$', views.home, name='home'),", "name='about'), url(r'doula_description$', views.doula_description, name='doula_description'), url(r'doula_services$', views.doula_services, name='doula_services'), url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'), url(r'photo_price$', views.photo_price, name='photo_price'),", "url(r'yoga_class$', views.yoga_class, name='yoga_class'), url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'), url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'), url(r'yoga_price$', views.yoga_price, name='yoga_price'), url(r'^admin/doc/',", "to enable the admin: from django.contrib import admin from django.conf.urls.static import static from", "urlpatterns = [ url(r'^$', views.home, name='home'), url(r'about$', views.about, name='about'), url(r'doula_description$', views.doula_description, name='doula_description'), url(r'doula_services$',", "dolove import views # Uncomment the next two lines to enable the admin:", "static from django.conf import settings admin.autodiscover() urlpatterns = [ url(r'^$', views.home, name='home'), url(r'about$',", "from django.conf.urls import include, url from dolove import views # Uncomment the next", "import views # Uncomment the next two lines to enable the admin: from", "views.photo_price, name='photo_price'), url(r'yoga_class$', views.yoga_class, name='yoga_class'), url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'), url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'), url(r'yoga_price$', views.yoga_price,", "admin from django.conf.urls.static import static from django.conf import settings admin.autodiscover() urlpatterns = [", "url(r'photo_price$', views.photo_price, name='photo_price'), url(r'yoga_class$', views.yoga_class, name='yoga_class'), url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'), url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'), url(r'yoga_price$',", "the next two lines to enable the admin: from django.contrib import admin from", "name='photo_price'), url(r'yoga_class$', views.yoga_class, name='yoga_class'), url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'), url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'), url(r'yoga_price$', views.yoga_price, name='yoga_price'),", "views.yoga_locations, name='yoga_locations'), url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'), url(r'yoga_price$', views.yoga_price, name='yoga_price'), url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', include(admin.site.urls)) ]", "<reponame>jugovich/teresajugovich from django.conf.urls import include, url from dolove import views # Uncomment the", "from django.conf import settings admin.autodiscover() urlpatterns = [ url(r'^$', views.home, name='home'), url(r'about$', views.about,", "include, url from dolove import views # Uncomment the next two lines to", "views.about, name='about'), url(r'doula_description$', views.doula_description, name='doula_description'), url(r'doula_services$', views.doula_services, name='doula_services'), url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'), url(r'photo_price$', views.photo_price,", "import settings admin.autodiscover() urlpatterns = [ url(r'^$', views.home, name='home'), url(r'about$', views.about, name='about'), url(r'doula_description$',", "url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'), url(r'yoga_price$', views.yoga_price, name='yoga_price'), url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', include(admin.site.urls)) ] + static(settings.STATIC_URL,", "views.photo_gallery, name='photo_gallery'), url(r'photo_price$', views.photo_price, name='photo_price'), url(r'yoga_class$', views.yoga_class, name='yoga_class'), url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'), url(r'yoga_schedule$', views.yoga_schedule,", "django.conf import settings admin.autodiscover() urlpatterns = [ url(r'^$', views.home, name='home'), url(r'about$', views.about, name='about'),", "name='yoga_locations'), url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'), url(r'yoga_price$', views.yoga_price, name='yoga_price'), url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', include(admin.site.urls)) ] +", "views.yoga_class, name='yoga_class'), url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'), url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'), url(r'yoga_price$', views.yoga_price, name='yoga_price'), url(r'^admin/doc/', include('django.contrib.admindocs.urls')),", "name='doula_description'), url(r'doula_services$', views.doula_services, name='doula_services'), url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'), url(r'photo_price$', views.photo_price, name='photo_price'), url(r'yoga_class$', views.yoga_class, name='yoga_class'),", "admin.autodiscover() urlpatterns = [ url(r'^$', views.home, name='home'), url(r'about$', views.about, name='about'), url(r'doula_description$', views.doula_description, name='doula_description'),", "from django.contrib import admin from django.conf.urls.static import static from django.conf import settings admin.autodiscover()", "views.yoga_schedule, name='yoga_schedule'), url(r'yoga_price$', views.yoga_price, name='yoga_price'), url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', include(admin.site.urls)) ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)", "views # Uncomment the next two lines to enable the admin: from django.contrib", "name='doula_services'), url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'), url(r'photo_price$', views.photo_price, name='photo_price'), url(r'yoga_class$', views.yoga_class, name='yoga_class'), url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'),", "settings admin.autodiscover() urlpatterns = [ url(r'^$', views.home, name='home'), url(r'about$', views.about, name='about'), url(r'doula_description$', views.doula_description,", "views.home, name='home'), url(r'about$', views.about, name='about'), url(r'doula_description$', views.doula_description, name='doula_description'), url(r'doula_services$', views.doula_services, name='doula_services'), url(r'photo_gallery$', views.photo_gallery,", "url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'), url(r'photo_price$', views.photo_price, name='photo_price'), url(r'yoga_class$', views.yoga_class, name='yoga_class'), url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'), url(r'yoga_schedule$',", "from dolove import views # Uncomment the next two lines to enable the", "= [ url(r'^$', views.home, name='home'), url(r'about$', views.about, name='about'), url(r'doula_description$', views.doula_description, name='doula_description'), url(r'doula_services$', views.doula_services,", "[ url(r'^$', views.home, name='home'), url(r'about$', views.about, name='about'), url(r'doula_description$', views.doula_description, name='doula_description'), url(r'doula_services$', views.doula_services, name='doula_services'),", "django.contrib import admin from django.conf.urls.static import static from django.conf import settings admin.autodiscover() urlpatterns", "url(r'^$', views.home, name='home'), url(r'about$', views.about, name='about'), url(r'doula_description$', views.doula_description, name='doula_description'), url(r'doula_services$', views.doula_services, name='doula_services'), url(r'photo_gallery$'," ]
[ "PASSWORD = \"<PASSWORD>\" today = dt.datetime.now() today_tuple = (today.month, today.day) data = pandas.read_csv(\"birthdays.csv\")", "contents = contents.replace(\"[NAME]\", birthday_person[\"name\"]) with smtplib.SMTP_SSL(\"smtp.gmail.com\") as connection: connection.login(MY_EMAIL, PASSWORD) connection.sendmail(from_addr=MY_EMAIL, to_addrs=birthday_person[\"email\"], msg=f\"Subject:", "import smtplib from astroid import Pass MY_EMAIL = \"your_email\" PASSWORD = \"<PASSWORD>\" today", "letter.read() contents = contents.replace(\"[NAME]\", birthday_person[\"name\"]) with smtplib.SMTP_SSL(\"smtp.gmail.com\") as connection: connection.login(MY_EMAIL, PASSWORD) connection.sendmail(from_addr=MY_EMAIL, to_addrs=birthday_person[\"email\"],", "pandas import random import smtplib from astroid import Pass MY_EMAIL = \"your_email\" PASSWORD", "for (index, data_row) in data.iterrows()} if today_tuple in birthday_dict: birthday_person = birthday_dict[today_tuple] file_path", "Pass MY_EMAIL = \"your_email\" PASSWORD = \"<PASSWORD>\" today = dt.datetime.now() today_tuple = (today.month,", "data = pandas.read_csv(\"birthdays.csv\") birthday_dict = {(data_row.month, data_row.day): data_row for (index, data_row) in data.iterrows()}", "data_row.day): data_row for (index, data_row) in data.iterrows()} if today_tuple in birthday_dict: birthday_person =", "today_tuple in birthday_dict: birthday_person = birthday_dict[today_tuple] file_path = f\"letter_templates/letter_{random.randint(1, 3)}.txt\" with open(file_path) as", "\"<PASSWORD>\" today = dt.datetime.now() today_tuple = (today.month, today.day) data = pandas.read_csv(\"birthdays.csv\") birthday_dict =", "contents.replace(\"[NAME]\", birthday_person[\"name\"]) with smtplib.SMTP_SSL(\"smtp.gmail.com\") as connection: connection.login(MY_EMAIL, PASSWORD) connection.sendmail(from_addr=MY_EMAIL, to_addrs=birthday_person[\"email\"], msg=f\"Subject: Happy Birthday!", "import random import smtplib from astroid import Pass MY_EMAIL = \"your_email\" PASSWORD =", "= {(data_row.month, data_row.day): data_row for (index, data_row) in data.iterrows()} if today_tuple in birthday_dict:", "today_tuple = (today.month, today.day) data = pandas.read_csv(\"birthdays.csv\") birthday_dict = {(data_row.month, data_row.day): data_row for", "astroid import Pass MY_EMAIL = \"your_email\" PASSWORD = \"<PASSWORD>\" today = dt.datetime.now() today_tuple", "letter: contents = letter.read() contents = contents.replace(\"[NAME]\", birthday_person[\"name\"]) with smtplib.SMTP_SSL(\"smtp.gmail.com\") as connection: connection.login(MY_EMAIL,", "datetime as dt import pandas import random import smtplib from astroid import Pass", "birthday_dict: birthday_person = birthday_dict[today_tuple] file_path = f\"letter_templates/letter_{random.randint(1, 3)}.txt\" with open(file_path) as letter: contents", "= f\"letter_templates/letter_{random.randint(1, 3)}.txt\" with open(file_path) as letter: contents = letter.read() contents = contents.replace(\"[NAME]\",", "with smtplib.SMTP_SSL(\"smtp.gmail.com\") as connection: connection.login(MY_EMAIL, PASSWORD) connection.sendmail(from_addr=MY_EMAIL, to_addrs=birthday_person[\"email\"], msg=f\"Subject: Happy Birthday! \\n\\n\" f\"{contents}\")", "import pandas import random import smtplib from astroid import Pass MY_EMAIL = \"your_email\"", "pandas.read_csv(\"birthdays.csv\") birthday_dict = {(data_row.month, data_row.day): data_row for (index, data_row) in data.iterrows()} if today_tuple", "as letter: contents = letter.read() contents = contents.replace(\"[NAME]\", birthday_person[\"name\"]) with smtplib.SMTP_SSL(\"smtp.gmail.com\") as connection:", "(today.month, today.day) data = pandas.read_csv(\"birthdays.csv\") birthday_dict = {(data_row.month, data_row.day): data_row for (index, data_row)", "from astroid import Pass MY_EMAIL = \"your_email\" PASSWORD = \"<PASSWORD>\" today = dt.datetime.now()", "birthday_dict = {(data_row.month, data_row.day): data_row for (index, data_row) in data.iterrows()} if today_tuple in", "data_row for (index, data_row) in data.iterrows()} if today_tuple in birthday_dict: birthday_person = birthday_dict[today_tuple]", "birthday_dict[today_tuple] file_path = f\"letter_templates/letter_{random.randint(1, 3)}.txt\" with open(file_path) as letter: contents = letter.read() contents", "f\"letter_templates/letter_{random.randint(1, 3)}.txt\" with open(file_path) as letter: contents = letter.read() contents = contents.replace(\"[NAME]\", birthday_person[\"name\"])", "today.day) data = pandas.read_csv(\"birthdays.csv\") birthday_dict = {(data_row.month, data_row.day): data_row for (index, data_row) in", "open(file_path) as letter: contents = letter.read() contents = contents.replace(\"[NAME]\", birthday_person[\"name\"]) with smtplib.SMTP_SSL(\"smtp.gmail.com\") as", "birthday_person = birthday_dict[today_tuple] file_path = f\"letter_templates/letter_{random.randint(1, 3)}.txt\" with open(file_path) as letter: contents =", "in data.iterrows()} if today_tuple in birthday_dict: birthday_person = birthday_dict[today_tuple] file_path = f\"letter_templates/letter_{random.randint(1, 3)}.txt\"", "smtplib from astroid import Pass MY_EMAIL = \"your_email\" PASSWORD = \"<PASSWORD>\" today =", "with open(file_path) as letter: contents = letter.read() contents = contents.replace(\"[NAME]\", birthday_person[\"name\"]) with smtplib.SMTP_SSL(\"smtp.gmail.com\")", "MY_EMAIL = \"your_email\" PASSWORD = \"<PASSWORD>\" today = dt.datetime.now() today_tuple = (today.month, today.day)", "data_row) in data.iterrows()} if today_tuple in birthday_dict: birthday_person = birthday_dict[today_tuple] file_path = f\"letter_templates/letter_{random.randint(1,", "= \"your_email\" PASSWORD = \"<PASSWORD>\" today = dt.datetime.now() today_tuple = (today.month, today.day) data", "= birthday_dict[today_tuple] file_path = f\"letter_templates/letter_{random.randint(1, 3)}.txt\" with open(file_path) as letter: contents = letter.read()", "import Pass MY_EMAIL = \"your_email\" PASSWORD = \"<PASSWORD>\" today = dt.datetime.now() today_tuple =", "dt import pandas import random import smtplib from astroid import Pass MY_EMAIL =", "as dt import pandas import random import smtplib from astroid import Pass MY_EMAIL", "= pandas.read_csv(\"birthdays.csv\") birthday_dict = {(data_row.month, data_row.day): data_row for (index, data_row) in data.iterrows()} if", "3)}.txt\" with open(file_path) as letter: contents = letter.read() contents = contents.replace(\"[NAME]\", birthday_person[\"name\"]) with", "\"your_email\" PASSWORD = \"<PASSWORD>\" today = dt.datetime.now() today_tuple = (today.month, today.day) data =", "dt.datetime.now() today_tuple = (today.month, today.day) data = pandas.read_csv(\"birthdays.csv\") birthday_dict = {(data_row.month, data_row.day): data_row", "if today_tuple in birthday_dict: birthday_person = birthday_dict[today_tuple] file_path = f\"letter_templates/letter_{random.randint(1, 3)}.txt\" with open(file_path)", "contents = letter.read() contents = contents.replace(\"[NAME]\", birthday_person[\"name\"]) with smtplib.SMTP_SSL(\"smtp.gmail.com\") as connection: connection.login(MY_EMAIL, PASSWORD)", "{(data_row.month, data_row.day): data_row for (index, data_row) in data.iterrows()} if today_tuple in birthday_dict: birthday_person", "(index, data_row) in data.iterrows()} if today_tuple in birthday_dict: birthday_person = birthday_dict[today_tuple] file_path =", "file_path = f\"letter_templates/letter_{random.randint(1, 3)}.txt\" with open(file_path) as letter: contents = letter.read() contents =", "birthday_person[\"name\"]) with smtplib.SMTP_SSL(\"smtp.gmail.com\") as connection: connection.login(MY_EMAIL, PASSWORD) connection.sendmail(from_addr=MY_EMAIL, to_addrs=birthday_person[\"email\"], msg=f\"Subject: Happy Birthday! \\n\\n\"", "= (today.month, today.day) data = pandas.read_csv(\"birthdays.csv\") birthday_dict = {(data_row.month, data_row.day): data_row for (index,", "today = dt.datetime.now() today_tuple = (today.month, today.day) data = pandas.read_csv(\"birthdays.csv\") birthday_dict = {(data_row.month,", "= contents.replace(\"[NAME]\", birthday_person[\"name\"]) with smtplib.SMTP_SSL(\"smtp.gmail.com\") as connection: connection.login(MY_EMAIL, PASSWORD) connection.sendmail(from_addr=MY_EMAIL, to_addrs=birthday_person[\"email\"], msg=f\"Subject: Happy", "= dt.datetime.now() today_tuple = (today.month, today.day) data = pandas.read_csv(\"birthdays.csv\") birthday_dict = {(data_row.month, data_row.day):", "= \"<PASSWORD>\" today = dt.datetime.now() today_tuple = (today.month, today.day) data = pandas.read_csv(\"birthdays.csv\") birthday_dict", "import datetime as dt import pandas import random import smtplib from astroid import", "random import smtplib from astroid import Pass MY_EMAIL = \"your_email\" PASSWORD = \"<PASSWORD>\"", "= letter.read() contents = contents.replace(\"[NAME]\", birthday_person[\"name\"]) with smtplib.SMTP_SSL(\"smtp.gmail.com\") as connection: connection.login(MY_EMAIL, PASSWORD) connection.sendmail(from_addr=MY_EMAIL,", "in birthday_dict: birthday_person = birthday_dict[today_tuple] file_path = f\"letter_templates/letter_{random.randint(1, 3)}.txt\" with open(file_path) as letter:", "data.iterrows()} if today_tuple in birthday_dict: birthday_person = birthday_dict[today_tuple] file_path = f\"letter_templates/letter_{random.randint(1, 3)}.txt\" with" ]
[ "(subsequent calls will use xout from first integration). y0 : array_like Passed on", "array_like Positive, monotonically increasing 1D array. x : array_like Passed on to ``odesys.integrate``", "ValueError(\"Neither atol nor rtol are allowed in kwargs\") if not np.all(atols > 0)", "to obey strict positive monotonicity\") if atols.size < 4: raise ValueError(\"Pointless doing linear", "0): raise ValueError(\"atols & rtols need to obey strict positive monotonicity\") if atols.size", "= [] for iy in range(result0.yout.shape[-1]): if np.all(diffs[:, iy] == 0): yerrs.append(0) else:", "doing linear interpolation on less than 3 points\") if atols.size < 6: warnings.warn(\"Statistics", "np def fit_factory(discard=1): def fit(x, y): p = np.polyfit(x, y, 1) v =", "from first integration). y0 : array_like Passed on to ``odesys.integrate``. params : array_like", "if rtols is None: rtols = atols atols, rtols = map(np.asarray, (atols, rtols))", "1) return fit def integrate_tolerance_series(odesys, atols, rtols, x, y0, params=(), fit=lambda x, y:", "math import exp import numpy as np def fit_factory(discard=1): def fit(x, y): p", "np.all(rtols > 0): raise ValueError(\"atols & rtols need to > 0\") if not", "+ rtol*np.abs(r.yout[ix, :]) for r, atol, rtol in zip([result0] + results, atols, rtols)])", "import numpy as np def fit_factory(discard=1): def fit(x, y): p = np.polyfit(x, y,", "of Result instances extra : dict errest : 2D array of error estimates", "> 0): raise ValueError(\"atols & rtols need to obey strict positive monotonicity\") if", "0) or not np.all(np.diff(rtols) > 0): raise ValueError(\"atols & rtols need to obey", "errest : 2D array of error estimates for result0.yout \"\"\" if atols is", "fit : callable val : callable \\\\*\\\\*kwargs: Passed on to ``odesys.integrate``. Returns -------", "integration). y0 : array_like Passed on to ``odesys.integrate``. params : array_like Passed on", "= np.array([atol + rtol*np.abs(r.yout[ix, :]) for r, atol, rtol in zip([result0] + results,", "np.all(diffs[:, iy] == 0): yerrs.append(0) else: p = fit(ln_tols[1:, iy], ln_absd[:, iy]) yerrs.append(exp(val(p,", "ntols)] errest = [] for ix, vx in enumerate(result0.xout): diffs = np.array([result0.yout[ix, :]", "fit(x, y): p = np.polyfit(x, y, 1) v = np.polyval(p, x) e =", "to ``odesys.integrate`` for first set of tolerances. (subsequent calls will use xout from", "y, 1) v = np.polyval(p, x) e = np.abs(y - v) drop_idxs =", "not np.all(atols > 0) or not np.all(rtols > 0): raise ValueError(\"atols & rtols", "iy] == 0): yerrs.append(0) else: p = fit(ln_tols[1:, iy], ln_absd[:, iy]) yerrs.append(exp(val(p, ln_tols[0,", "calls will use xout from first integration). y0 : array_like Passed on to", "[] for iy in range(result0.yout.shape[-1]): if np.all(diffs[:, iy] == 0): yerrs.append(0) else: p", "\\\\*\\\\*kwargs: Passed on to ``odesys.integrate``. Returns ------- result0 : Result results : list", "Result instances extra : dict errest : 2D array of error estimates for", "be of same length\") if 'atol' in kwargs or 'rtol' in kwargs: raise", "np.all(atols > 0) or not np.all(rtols > 0): raise ValueError(\"atols & rtols need", "to > 0\") if not np.all(np.diff(atols) > 0) or not np.all(np.diff(rtols) > 0):", "in kwargs\") if not np.all(atols > 0) or not np.all(rtols > 0): raise", "6: warnings.warn(\"Statistics will be (very) shaky when doing linear \" \"interpolation on less", "on to ``odesys.integrate``. fit : callable val : callable \\\\*\\\\*kwargs: Passed on to", "**kwargs) results = [odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i], **kwargs) for i in range(1,", "same length\") if 'atol' in kwargs or 'rtol' in kwargs: raise ValueError(\"Neither atol", "from __future__ import (absolute_import, division, print_function) import warnings from math import exp import", "1) v = np.polyval(p, x) e = np.abs(y - v) drop_idxs = np.argsort(e)[-discard]", "**kwargs): \"\"\" Parameters ---------- odesys : :class:`ODESys` atols : array_like Positive, monotonically increasing", "= [odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i], **kwargs) for i in range(1, ntols)] errest", "increasing 1D array. x : array_like Passed on to ``odesys.integrate`` for first set", "rtols.shape: raise ValueError(\"atols & rtols need to be of same length\") if 'atol'", "shaky when doing linear \" \"interpolation on less than 5 points.\") ntols =", "\"\"\" Parameters ---------- odesys : :class:`ODESys` atols : array_like Positive, monotonically increasing 1D", "import warnings from math import exp import numpy as np def fit_factory(discard=1): def", "array_like Positive, monotonically increasing 1D array. rtols : array_like Positive, monotonically increasing 1D", ": array_like Passed on to ``odesys.integrate``. fit : callable val : callable \\\\*\\\\*kwargs:", "\"\"\" if atols is None: atols = rtols if rtols is None: rtols", "rtol*np.abs(r.yout[ix, :]) for r, atol, rtol in zip([result0] + results, atols, rtols)]) ln_tols", ": Result results : list of Result instances extra : dict errest :", "raise NotImplementedError(\"Assuming 1-dimensional array\") if atols.shape != rtols.shape: raise ValueError(\"atols & rtols need", "need to > 0\") if not np.all(np.diff(atols) > 0) or not np.all(np.diff(rtols) >", "if 'atol' in kwargs or 'rtol' in kwargs: raise ValueError(\"Neither atol nor rtol", "1: raise NotImplementedError(\"Assuming 1-dimensional array\") if atols.shape != rtols.shape: raise ValueError(\"atols & rtols", "v = np.polyval(p, x) e = np.abs(y - v) drop_idxs = np.argsort(e)[-discard] return", "y: np.polyfit(x, y, 1), val=np.polyval, **kwargs): \"\"\" Parameters ---------- odesys : :class:`ODESys` atols", "p = np.polyfit(x, y, 1) v = np.polyval(p, x) e = np.abs(y -", "rtols, x, y0, params=(), fit=lambda x, y: np.polyfit(x, y, 1), val=np.polyval, **kwargs): \"\"\"", "results : list of Result instances extra : dict errest : 2D array", "array_like Passed on to ``odesys.integrate``. params : array_like Passed on to ``odesys.integrate``. fit", "rtols need to be of same length\") if 'atol' in kwargs or 'rtol'", "less than 3 points\") if atols.size < 6: warnings.warn(\"Statistics will be (very) shaky", "raise ValueError(\"Pointless doing linear interpolation on less than 3 points\") if atols.size <", "!= 1: raise NotImplementedError(\"Assuming 1-dimensional array\") if atols.shape != rtols.shape: raise ValueError(\"atols &", "& rtols need to obey strict positive monotonicity\") if atols.size < 4: raise", "drop_idxs), np.delete(y, drop_idxs), 1) return fit def integrate_tolerance_series(odesys, atols, rtols, x, y0, params=(),", ": array_like Passed on to ``odesys.integrate``. params : array_like Passed on to ``odesys.integrate``.", "np.polyval(p, x) e = np.abs(y - v) drop_idxs = np.argsort(e)[-discard] return np.polyfit(np.delete(x, drop_idxs),", "points\") if atols.size < 6: warnings.warn(\"Statistics will be (very) shaky when doing linear", "(absolute_import, division, print_function) import warnings from math import exp import numpy as np", "fit=lambda x, y: np.polyfit(x, y, 1), val=np.polyval, **kwargs): \"\"\" Parameters ---------- odesys :", "- v) drop_idxs = np.argsort(e)[-discard] return np.polyfit(np.delete(x, drop_idxs), np.delete(y, drop_idxs), 1) return fit", "[odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i], **kwargs) for i in range(1, ntols)] errest =", "kwargs or 'rtol' in kwargs: raise ValueError(\"Neither atol nor rtol are allowed in", "kwargs: raise ValueError(\"Neither atol nor rtol are allowed in kwargs\") if not np.all(atols", "monotonically increasing 1D array. x : array_like Passed on to ``odesys.integrate`` for first", "4: raise ValueError(\"Pointless doing linear interpolation on less than 3 points\") if atols.size", "rtol in zip([result0] + results, atols, rtols)]) ln_tols = np.log(tols).astype(np.float64) ln_absd = np.log(np.abs(diffs)).astype(np.float64)", "np.array([atol + rtol*np.abs(r.yout[ix, :]) for r, atol, rtol in zip([result0] + results, atols,", "is None: rtols = atols atols, rtols = map(np.asarray, (atols, rtols)) if atols.ndim", "params=(), fit=lambda x, y: np.polyfit(x, y, 1), val=np.polyval, **kwargs): \"\"\" Parameters ---------- odesys", "kwargs\") if not np.all(atols > 0) or not np.all(rtols > 0): raise ValueError(\"atols", "atol, rtol in zip([result0] + results, atols, rtols)]) ln_tols = np.log(tols).astype(np.float64) ln_absd =", ": array_like Passed on to ``odesys.integrate`` for first set of tolerances. (subsequent calls", "np.all(np.diff(atols) > 0) or not np.all(np.diff(rtols) > 0): raise ValueError(\"atols & rtols need", "for i in range(1, ntols)] errest = [] for ix, vx in enumerate(result0.xout):", "list of Result instances extra : dict errest : 2D array of error", "atols, rtols, x, y0, params=(), fit=lambda x, y: np.polyfit(x, y, 1), val=np.polyval, **kwargs):", "< 6: warnings.warn(\"Statistics will be (very) shaky when doing linear \" \"interpolation on", "vx in enumerate(result0.xout): diffs = np.array([result0.yout[ix, :] - r.yout[ix, :] for r in", "range(result0.yout.shape[-1]): if np.all(diffs[:, iy] == 0): yerrs.append(0) else: p = fit(ln_tols[1:, iy], ln_absd[:,", "results = [odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i], **kwargs) for i in range(1, ntols)]", "if np.all(diffs[:, iy] == 0): yerrs.append(0) else: p = fit(ln_tols[1:, iy], ln_absd[:, iy])", "rtols)) if atols.ndim != 1: raise NotImplementedError(\"Assuming 1-dimensional array\") if atols.shape != rtols.shape:", "or 'rtol' in kwargs: raise ValueError(\"Neither atol nor rtol are allowed in kwargs\")", "in kwargs: raise ValueError(\"Neither atol nor rtol are allowed in kwargs\") if not", "for r, atol, rtol in zip([result0] + results, atols, rtols)]) ln_tols = np.log(tols).astype(np.float64)", ": :class:`ODESys` atols : array_like Positive, monotonically increasing 1D array. rtols : array_like", "'rtol' in kwargs: raise ValueError(\"Neither atol nor rtol are allowed in kwargs\") if", "r, atol, rtol in zip([result0] + results, atols, rtols)]) ln_tols = np.log(tols).astype(np.float64) ln_absd", "result0 = odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0], **kwargs) results = [odesys.integrate(result0.xout, y0, params,", "as np def fit_factory(discard=1): def fit(x, y): p = np.polyfit(x, y, 1) v", "= rtols if rtols is None: rtols = atols atols, rtols = map(np.asarray,", "extra : dict errest : 2D array of error estimates for result0.yout \"\"\"", "def integrate_tolerance_series(odesys, atols, rtols, x, y0, params=(), fit=lambda x, y: np.polyfit(x, y, 1),", "atols.size < 6: warnings.warn(\"Statistics will be (very) shaky when doing linear \" \"interpolation", "params : array_like Passed on to ``odesys.integrate``. fit : callable val : callable", "return np.polyfit(np.delete(x, drop_idxs), np.delete(y, drop_idxs), 1) return fit def integrate_tolerance_series(odesys, atols, rtols, x,", "np.log(np.abs(diffs)).astype(np.float64) yerrs = [] for iy in range(result0.yout.shape[-1]): if np.all(diffs[:, iy] == 0):", "1D array. x : array_like Passed on to ``odesys.integrate`` for first set of", "result0 : Result results : list of Result instances extra : dict errest", "return fit def integrate_tolerance_series(odesys, atols, rtols, x, y0, params=(), fit=lambda x, y: np.polyfit(x,", "np.argsort(e)[-discard] return np.polyfit(np.delete(x, drop_idxs), np.delete(y, drop_idxs), 1) return fit def integrate_tolerance_series(odesys, atols, rtols,", "or not np.all(rtols > 0): raise ValueError(\"atols & rtols need to > 0\")", "atols is None: atols = rtols if rtols is None: rtols = atols", "in kwargs or 'rtol' in kwargs: raise ValueError(\"Neither atol nor rtol are allowed", "for ix, vx in enumerate(result0.xout): diffs = np.array([result0.yout[ix, :] - r.yout[ix, :] for", "array. x : array_like Passed on to ``odesys.integrate`` for first set of tolerances.", "to be of same length\") if 'atol' in kwargs or 'rtol' in kwargs:", "linear \" \"interpolation on less than 5 points.\") ntols = atols.size result0 =", "y0, params, atol=atols[0], rtol=rtols[0], **kwargs) results = [odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i], **kwargs)", "x : array_like Passed on to ``odesys.integrate`` for first set of tolerances. (subsequent", "x, y: np.polyfit(x, y, 1), val=np.polyval, **kwargs): \"\"\" Parameters ---------- odesys : :class:`ODESys`", "rtols need to obey strict positive monotonicity\") if atols.size < 4: raise ValueError(\"Pointless", "allowed in kwargs\") if not np.all(atols > 0) or not np.all(rtols > 0):", "enumerate(result0.xout): diffs = np.array([result0.yout[ix, :] - r.yout[ix, :] for r in results]) tols", "on to ``odesys.integrate``. params : array_like Passed on to ``odesys.integrate``. fit : callable", "---------- odesys : :class:`ODESys` atols : array_like Positive, monotonically increasing 1D array. rtols", "if atols.size < 4: raise ValueError(\"Pointless doing linear interpolation on less than 3", "atols.size < 4: raise ValueError(\"Pointless doing linear interpolation on less than 3 points\")", "array\") if atols.shape != rtols.shape: raise ValueError(\"atols & rtols need to be of", "else: p = fit(ln_tols[1:, iy], ln_absd[:, iy]) yerrs.append(exp(val(p, ln_tols[0, iy]))) errest.append(yerrs) return result0,", "= [] for ix, vx in enumerate(result0.xout): diffs = np.array([result0.yout[ix, :] - r.yout[ix,", "params, atol=atols[i], rtol=rtols[i], **kwargs) for i in range(1, ntols)] errest = [] for", "``odesys.integrate`` for first set of tolerances. (subsequent calls will use xout from first", "\"interpolation on less than 5 points.\") ntols = atols.size result0 = odesys.integrate(x, y0,", "x) e = np.abs(y - v) drop_idxs = np.argsort(e)[-discard] return np.polyfit(np.delete(x, drop_idxs), np.delete(y,", "in range(result0.yout.shape[-1]): if np.all(diffs[:, iy] == 0): yerrs.append(0) else: p = fit(ln_tols[1:, iy],", "``odesys.integrate``. fit : callable val : callable \\\\*\\\\*kwargs: Passed on to ``odesys.integrate``. Returns", "of same length\") if 'atol' in kwargs or 'rtol' in kwargs: raise ValueError(\"Neither", "warnings.warn(\"Statistics will be (very) shaky when doing linear \" \"interpolation on less than", "division, print_function) import warnings from math import exp import numpy as np def", "params, atol=atols[0], rtol=rtols[0], **kwargs) results = [odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i], **kwargs) for", "atols atols, rtols = map(np.asarray, (atols, rtols)) if atols.ndim != 1: raise NotImplementedError(\"Assuming", "array_like Passed on to ``odesys.integrate``. fit : callable val : callable \\\\*\\\\*kwargs: Passed", ": list of Result instances extra : dict errest : 2D array of", "& rtols need to > 0\") if not np.all(np.diff(atols) > 0) or not", "fit def integrate_tolerance_series(odesys, atols, rtols, x, y0, params=(), fit=lambda x, y: np.polyfit(x, y,", "rtols need to > 0\") if not np.all(np.diff(atols) > 0) or not np.all(np.diff(rtols)", "coding: utf-8 -*- from __future__ import (absolute_import, division, print_function) import warnings from math", "not np.all(rtols > 0): raise ValueError(\"atols & rtols need to > 0\") if", "first set of tolerances. (subsequent calls will use xout from first integration). y0", "or not np.all(np.diff(rtols) > 0): raise ValueError(\"atols & rtols need to obey strict", "drop_idxs = np.argsort(e)[-discard] return np.polyfit(np.delete(x, drop_idxs), np.delete(y, drop_idxs), 1) return fit def integrate_tolerance_series(odesys,", "than 5 points.\") ntols = atols.size result0 = odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0],", "atols, rtols)]) ln_tols = np.log(tols).astype(np.float64) ln_absd = np.log(np.abs(diffs)).astype(np.float64) yerrs = [] for iy", "odesys : :class:`ODESys` atols : array_like Positive, monotonically increasing 1D array. rtols :", "= np.log(tols).astype(np.float64) ln_absd = np.log(np.abs(diffs)).astype(np.float64) yerrs = [] for iy in range(result0.yout.shape[-1]): if", "== 0): yerrs.append(0) else: p = fit(ln_tols[1:, iy], ln_absd[:, iy]) yerrs.append(exp(val(p, ln_tols[0, iy])))", "!= rtols.shape: raise ValueError(\"atols & rtols need to be of same length\") if", "ValueError(\"atols & rtols need to > 0\") if not np.all(np.diff(atols) > 0) or", "ntols = atols.size result0 = odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0], **kwargs) results =", "length\") if 'atol' in kwargs or 'rtol' in kwargs: raise ValueError(\"Neither atol nor", "be (very) shaky when doing linear \" \"interpolation on less than 5 points.\")", ": array_like Positive, monotonically increasing 1D array. rtols : array_like Positive, monotonically increasing", "atols, rtols = map(np.asarray, (atols, rtols)) if atols.ndim != 1: raise NotImplementedError(\"Assuming 1-dimensional", "iy in range(result0.yout.shape[-1]): if np.all(diffs[:, iy] == 0): yerrs.append(0) else: p = fit(ln_tols[1:,", ":class:`ODESys` atols : array_like Positive, monotonically increasing 1D array. rtols : array_like Positive,", "y0, params=(), fit=lambda x, y: np.polyfit(x, y, 1), val=np.polyval, **kwargs): \"\"\" Parameters ----------", "if atols.ndim != 1: raise NotImplementedError(\"Assuming 1-dimensional array\") if atols.shape != rtols.shape: raise", "obey strict positive monotonicity\") if atols.size < 4: raise ValueError(\"Pointless doing linear interpolation", "= np.argsort(e)[-discard] return np.polyfit(np.delete(x, drop_idxs), np.delete(y, drop_idxs), 1) return fit def integrate_tolerance_series(odesys, atols,", "use xout from first integration). y0 : array_like Passed on to ``odesys.integrate``. params", "array of error estimates for result0.yout \"\"\" if atols is None: atols =", "if atols is None: atols = rtols if rtols is None: rtols =", "atols.shape != rtols.shape: raise ValueError(\"atols & rtols need to be of same length\")", "rtols : array_like Positive, monotonically increasing 1D array. x : array_like Passed on", "on less than 3 points\") if atols.size < 6: warnings.warn(\"Statistics will be (very)", "import (absolute_import, division, print_function) import warnings from math import exp import numpy as", "& rtols need to be of same length\") if 'atol' in kwargs or", "in range(1, ntols)] errest = [] for ix, vx in enumerate(result0.xout): diffs =", "yerrs = [] for iy in range(result0.yout.shape[-1]): if np.all(diffs[:, iy] == 0): yerrs.append(0)", "1-dimensional array\") if atols.shape != rtols.shape: raise ValueError(\"atols & rtols need to be", "Passed on to ``odesys.integrate`` for first set of tolerances. (subsequent calls will use", "doing linear \" \"interpolation on less than 5 points.\") ntols = atols.size result0", "[] for ix, vx in enumerate(result0.xout): diffs = np.array([result0.yout[ix, :] - r.yout[ix, :]", "rtols is None: rtols = atols atols, rtols = map(np.asarray, (atols, rtols)) if", "ln_absd = np.log(np.abs(diffs)).astype(np.float64) yerrs = [] for iy in range(result0.yout.shape[-1]): if np.all(diffs[:, iy]", "- r.yout[ix, :] for r in results]) tols = np.array([atol + rtol*np.abs(r.yout[ix, :])", "when doing linear \" \"interpolation on less than 5 points.\") ntols = atols.size", "linear interpolation on less than 3 points\") if atols.size < 6: warnings.warn(\"Statistics will", "integrate_tolerance_series(odesys, atols, rtols, x, y0, params=(), fit=lambda x, y: np.polyfit(x, y, 1), val=np.polyval,", "> 0) or not np.all(rtols > 0): raise ValueError(\"atols & rtols need to", "odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0], **kwargs) results = [odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i],", "rtols = map(np.asarray, (atols, rtols)) if atols.ndim != 1: raise NotImplementedError(\"Assuming 1-dimensional array\")", "> 0): raise ValueError(\"atols & rtols need to > 0\") if not np.all(np.diff(atols)", "atols.ndim != 1: raise NotImplementedError(\"Assuming 1-dimensional array\") if atols.shape != rtols.shape: raise ValueError(\"atols", "yerrs.append(0) else: p = fit(ln_tols[1:, iy], ln_absd[:, iy]) yerrs.append(exp(val(p, ln_tols[0, iy]))) errest.append(yerrs) return", ": 2D array of error estimates for result0.yout \"\"\" if atols is None:", "range(1, ntols)] errest = [] for ix, vx in enumerate(result0.xout): diffs = np.array([result0.yout[ix,", "y0 : array_like Passed on to ``odesys.integrate``. params : array_like Passed on to", "> 0\") if not np.all(np.diff(atols) > 0) or not np.all(np.diff(rtols) > 0): raise", "array_like Passed on to ``odesys.integrate`` for first set of tolerances. (subsequent calls will", "zip([result0] + results, atols, rtols)]) ln_tols = np.log(tols).astype(np.float64) ln_absd = np.log(np.abs(diffs)).astype(np.float64) yerrs =", "exp import numpy as np def fit_factory(discard=1): def fit(x, y): p = np.polyfit(x,", "nor rtol are allowed in kwargs\") if not np.all(atols > 0) or not", "in results]) tols = np.array([atol + rtol*np.abs(r.yout[ix, :]) for r, atol, rtol in", "of tolerances. (subsequent calls will use xout from first integration). y0 : array_like", "if not np.all(atols > 0) or not np.all(rtols > 0): raise ValueError(\"atols &", "to ``odesys.integrate``. fit : callable val : callable \\\\*\\\\*kwargs: Passed on to ``odesys.integrate``.", "rtols = atols atols, rtols = map(np.asarray, (atols, rtols)) if atols.ndim != 1:", "< 4: raise ValueError(\"Pointless doing linear interpolation on less than 3 points\") if", "x, y0, params=(), fit=lambda x, y: np.polyfit(x, y, 1), val=np.polyval, **kwargs): \"\"\" Parameters", "r.yout[ix, :] for r in results]) tols = np.array([atol + rtol*np.abs(r.yout[ix, :]) for", "r in results]) tols = np.array([atol + rtol*np.abs(r.yout[ix, :]) for r, atol, rtol", "results, atols, rtols)]) ln_tols = np.log(tols).astype(np.float64) ln_absd = np.log(np.abs(diffs)).astype(np.float64) yerrs = [] for", "\" \"interpolation on less than 5 points.\") ntols = atols.size result0 = odesys.integrate(x,", "raise ValueError(\"atols & rtols need to obey strict positive monotonicity\") if atols.size <", "1D array. rtols : array_like Positive, monotonically increasing 1D array. x : array_like", "ln_tols = np.log(tols).astype(np.float64) ln_absd = np.log(np.abs(diffs)).astype(np.float64) yerrs = [] for iy in range(result0.yout.shape[-1]):", "instances extra : dict errest : 2D array of error estimates for result0.yout", "atols = rtols if rtols is None: rtols = atols atols, rtols =", "= atols.size result0 = odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0], **kwargs) results = [odesys.integrate(result0.xout,", "raise ValueError(\"atols & rtols need to > 0\") if not np.all(np.diff(atols) > 0)", "3 points\") if atols.size < 6: warnings.warn(\"Statistics will be (very) shaky when doing", "for result0.yout \"\"\" if atols is None: atols = rtols if rtols is", "error estimates for result0.yout \"\"\" if atols is None: atols = rtols if", "``odesys.integrate``. Returns ------- result0 : Result results : list of Result instances extra", "rtol=rtols[0], **kwargs) results = [odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i], **kwargs) for i in", "v) drop_idxs = np.argsort(e)[-discard] return np.polyfit(np.delete(x, drop_idxs), np.delete(y, drop_idxs), 1) return fit def", "need to obey strict positive monotonicity\") if atols.size < 4: raise ValueError(\"Pointless doing", "atol=atols[i], rtol=rtols[i], **kwargs) for i in range(1, ntols)] errest = [] for ix,", "to ``odesys.integrate``. params : array_like Passed on to ``odesys.integrate``. fit : callable val", "(atols, rtols)) if atols.ndim != 1: raise NotImplementedError(\"Assuming 1-dimensional array\") if atols.shape !=", "Passed on to ``odesys.integrate``. Returns ------- result0 : Result results : list of", "# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function) import warnings", "on to ``odesys.integrate`` for first set of tolerances. (subsequent calls will use xout", "in enumerate(result0.xout): diffs = np.array([result0.yout[ix, :] - r.yout[ix, :] for r in results])", "np.abs(y - v) drop_idxs = np.argsort(e)[-discard] return np.polyfit(np.delete(x, drop_idxs), np.delete(y, drop_idxs), 1) return", "y): p = np.polyfit(x, y, 1) v = np.polyval(p, x) e = np.abs(y", "'atol' in kwargs or 'rtol' in kwargs: raise ValueError(\"Neither atol nor rtol are", "monotonically increasing 1D array. rtols : array_like Positive, monotonically increasing 1D array. x", "warnings from math import exp import numpy as np def fit_factory(discard=1): def fit(x,", "np.log(tols).astype(np.float64) ln_absd = np.log(np.abs(diffs)).astype(np.float64) yerrs = [] for iy in range(result0.yout.shape[-1]): if np.all(diffs[:,", "than 3 points\") if atols.size < 6: warnings.warn(\"Statistics will be (very) shaky when", ":]) for r, atol, rtol in zip([result0] + results, atols, rtols)]) ln_tols =", "= np.abs(y - v) drop_idxs = np.argsort(e)[-discard] return np.polyfit(np.delete(x, drop_idxs), np.delete(y, drop_idxs), 1)", "on less than 5 points.\") ntols = atols.size result0 = odesys.integrate(x, y0, params,", "points.\") ntols = atols.size result0 = odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0], **kwargs) results", "from math import exp import numpy as np def fit_factory(discard=1): def fit(x, y):", "= np.log(np.abs(diffs)).astype(np.float64) yerrs = [] for iy in range(result0.yout.shape[-1]): if np.all(diffs[:, iy] ==", "numpy as np def fit_factory(discard=1): def fit(x, y): p = np.polyfit(x, y, 1)", "atol nor rtol are allowed in kwargs\") if not np.all(atols > 0) or", "on to ``odesys.integrate``. Returns ------- result0 : Result results : list of Result", "Returns ------- result0 : Result results : list of Result instances extra :", "val=np.polyval, **kwargs): \"\"\" Parameters ---------- odesys : :class:`ODESys` atols : array_like Positive, monotonically", "Passed on to ``odesys.integrate``. fit : callable val : callable \\\\*\\\\*kwargs: Passed on", "2D array of error estimates for result0.yout \"\"\" if atols is None: atols", "Result results : list of Result instances extra : dict errest : 2D", "__future__ import (absolute_import, division, print_function) import warnings from math import exp import numpy", "atols : array_like Positive, monotonically increasing 1D array. rtols : array_like Positive, monotonically", "+ results, atols, rtols)]) ln_tols = np.log(tols).astype(np.float64) ln_absd = np.log(np.abs(diffs)).astype(np.float64) yerrs = []", "atol=atols[0], rtol=rtols[0], **kwargs) results = [odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i], **kwargs) for i", "fit(ln_tols[1:, iy], ln_absd[:, iy]) yerrs.append(exp(val(p, ln_tols[0, iy]))) errest.append(yerrs) return result0, results, {'errest': np.array(errest)}", "NotImplementedError(\"Assuming 1-dimensional array\") if atols.shape != rtols.shape: raise ValueError(\"atols & rtols need to", "0) or not np.all(rtols > 0): raise ValueError(\"atols & rtols need to >", "print_function) import warnings from math import exp import numpy as np def fit_factory(discard=1):", "need to be of same length\") if 'atol' in kwargs or 'rtol' in", "errest = [] for ix, vx in enumerate(result0.xout): diffs = np.array([result0.yout[ix, :] -", "p = fit(ln_tols[1:, iy], ln_absd[:, iy]) yerrs.append(exp(val(p, ln_tols[0, iy]))) errest.append(yerrs) return result0, results,", "None: rtols = atols atols, rtols = map(np.asarray, (atols, rtols)) if atols.ndim !=", "------- result0 : Result results : list of Result instances extra : dict", "if not np.all(np.diff(atols) > 0) or not np.all(np.diff(rtols) > 0): raise ValueError(\"atols &", "if atols.shape != rtols.shape: raise ValueError(\"atols & rtols need to be of same", ": dict errest : 2D array of error estimates for result0.yout \"\"\" if", "Parameters ---------- odesys : :class:`ODESys` atols : array_like Positive, monotonically increasing 1D array.", "map(np.asarray, (atols, rtols)) if atols.ndim != 1: raise NotImplementedError(\"Assuming 1-dimensional array\") if atols.shape", "will use xout from first integration). y0 : array_like Passed on to ``odesys.integrate``.", "np.polyfit(x, y, 1), val=np.polyval, **kwargs): \"\"\" Parameters ---------- odesys : :class:`ODESys` atols :", "is None: atols = rtols if rtols is None: rtols = atols atols,", "def fit_factory(discard=1): def fit(x, y): p = np.polyfit(x, y, 1) v = np.polyval(p,", "ValueError(\"Pointless doing linear interpolation on less than 3 points\") if atols.size < 6:", "in zip([result0] + results, atols, rtols)]) ln_tols = np.log(tols).astype(np.float64) ln_absd = np.log(np.abs(diffs)).astype(np.float64) yerrs", "utf-8 -*- from __future__ import (absolute_import, division, print_function) import warnings from math import", "Passed on to ``odesys.integrate``. params : array_like Passed on to ``odesys.integrate``. fit :", "raise ValueError(\"Neither atol nor rtol are allowed in kwargs\") if not np.all(atols >", "first integration). y0 : array_like Passed on to ``odesys.integrate``. params : array_like Passed", "are allowed in kwargs\") if not np.all(atols > 0) or not np.all(rtols >", "0): yerrs.append(0) else: p = fit(ln_tols[1:, iy], ln_absd[:, iy]) yerrs.append(exp(val(p, ln_tols[0, iy]))) errest.append(yerrs)", "np.polyfit(np.delete(x, drop_idxs), np.delete(y, drop_idxs), 1) return fit def integrate_tolerance_series(odesys, atols, rtols, x, y0,", "monotonicity\") if atols.size < 4: raise ValueError(\"Pointless doing linear interpolation on less than", "None: atols = rtols if rtols is None: rtols = atols atols, rtols", "estimates for result0.yout \"\"\" if atols is None: atols = rtols if rtols", "results]) tols = np.array([atol + rtol*np.abs(r.yout[ix, :]) for r, atol, rtol in zip([result0]", "Positive, monotonically increasing 1D array. rtols : array_like Positive, monotonically increasing 1D array.", "diffs = np.array([result0.yout[ix, :] - r.yout[ix, :] for r in results]) tols =", "to ``odesys.integrate``. Returns ------- result0 : Result results : list of Result instances", "0): raise ValueError(\"atols & rtols need to > 0\") if not np.all(np.diff(atols) >", "np.delete(y, drop_idxs), 1) return fit def integrate_tolerance_series(odesys, atols, rtols, x, y0, params=(), fit=lambda", "tols = np.array([atol + rtol*np.abs(r.yout[ix, :]) for r, atol, rtol in zip([result0] +", "= odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0], **kwargs) results = [odesys.integrate(result0.xout, y0, params, atol=atols[i],", "result0.yout \"\"\" if atols is None: atols = rtols if rtols is None:", "-*- from __future__ import (absolute_import, division, print_function) import warnings from math import exp", "y0, params, atol=atols[i], rtol=rtols[i], **kwargs) for i in range(1, ntols)] errest = []", "for first set of tolerances. (subsequent calls will use xout from first integration).", "= map(np.asarray, (atols, rtols)) if atols.ndim != 1: raise NotImplementedError(\"Assuming 1-dimensional array\") if", "np.polyfit(x, y, 1) v = np.polyval(p, x) e = np.abs(y - v) drop_idxs", "= np.array([result0.yout[ix, :] - r.yout[ix, :] for r in results]) tols = np.array([atol", "(very) shaky when doing linear \" \"interpolation on less than 5 points.\") ntols", "callable val : callable \\\\*\\\\*kwargs: Passed on to ``odesys.integrate``. Returns ------- result0 :", "val : callable \\\\*\\\\*kwargs: Passed on to ``odesys.integrate``. Returns ------- result0 : Result", "increasing 1D array. rtols : array_like Positive, monotonically increasing 1D array. x :", "1), val=np.polyval, **kwargs): \"\"\" Parameters ---------- odesys : :class:`ODESys` atols : array_like Positive,", "of error estimates for result0.yout \"\"\" if atols is None: atols = rtols", "np.all(np.diff(rtols) > 0): raise ValueError(\"atols & rtols need to obey strict positive monotonicity\")", "interpolation on less than 3 points\") if atols.size < 6: warnings.warn(\"Statistics will be", "= atols atols, rtols = map(np.asarray, (atols, rtols)) if atols.ndim != 1: raise", ":] for r in results]) tols = np.array([atol + rtol*np.abs(r.yout[ix, :]) for r,", "rtol=rtols[i], **kwargs) for i in range(1, ntols)] errest = [] for ix, vx", "callable \\\\*\\\\*kwargs: Passed on to ``odesys.integrate``. Returns ------- result0 : Result results :", "not np.all(np.diff(atols) > 0) or not np.all(np.diff(rtols) > 0): raise ValueError(\"atols & rtols", "set of tolerances. (subsequent calls will use xout from first integration). y0 :", "-*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function) import warnings from", "> 0) or not np.all(np.diff(rtols) > 0): raise ValueError(\"atols & rtols need to", "ix, vx in enumerate(result0.xout): diffs = np.array([result0.yout[ix, :] - r.yout[ix, :] for r", "0\") if not np.all(np.diff(atols) > 0) or not np.all(np.diff(rtols) > 0): raise ValueError(\"atols", "ValueError(\"atols & rtols need to be of same length\") if 'atol' in kwargs", "``odesys.integrate``. params : array_like Passed on to ``odesys.integrate``. fit : callable val :", "e = np.abs(y - v) drop_idxs = np.argsort(e)[-discard] return np.polyfit(np.delete(x, drop_idxs), np.delete(y, drop_idxs),", ": array_like Positive, monotonically increasing 1D array. x : array_like Passed on to", "rtol are allowed in kwargs\") if not np.all(atols > 0) or not np.all(rtols", "y, 1), val=np.polyval, **kwargs): \"\"\" Parameters ---------- odesys : :class:`ODESys` atols : array_like", "strict positive monotonicity\") if atols.size < 4: raise ValueError(\"Pointless doing linear interpolation on", "tolerances. (subsequent calls will use xout from first integration). y0 : array_like Passed", "Positive, monotonically increasing 1D array. x : array_like Passed on to ``odesys.integrate`` for", "rtols if rtols is None: rtols = atols atols, rtols = map(np.asarray, (atols,", "import exp import numpy as np def fit_factory(discard=1): def fit(x, y): p =", "for r in results]) tols = np.array([atol + rtol*np.abs(r.yout[ix, :]) for r, atol,", "= fit(ln_tols[1:, iy], ln_absd[:, iy]) yerrs.append(exp(val(p, ln_tols[0, iy]))) errest.append(yerrs) return result0, results, {'errest':", "rtols)]) ln_tols = np.log(tols).astype(np.float64) ln_absd = np.log(np.abs(diffs)).astype(np.float64) yerrs = [] for iy in", "xout from first integration). y0 : array_like Passed on to ``odesys.integrate``. params :", "positive monotonicity\") if atols.size < 4: raise ValueError(\"Pointless doing linear interpolation on less", "i in range(1, ntols)] errest = [] for ix, vx in enumerate(result0.xout): diffs", "array. rtols : array_like Positive, monotonically increasing 1D array. x : array_like Passed", ":] - r.yout[ix, :] for r in results]) tols = np.array([atol + rtol*np.abs(r.yout[ix,", "atols.size result0 = odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0], **kwargs) results = [odesys.integrate(result0.xout, y0,", "not np.all(np.diff(rtols) > 0): raise ValueError(\"atols & rtols need to obey strict positive", "dict errest : 2D array of error estimates for result0.yout \"\"\" if atols", "fit_factory(discard=1): def fit(x, y): p = np.polyfit(x, y, 1) v = np.polyval(p, x)", "drop_idxs), 1) return fit def integrate_tolerance_series(odesys, atols, rtols, x, y0, params=(), fit=lambda x,", "if atols.size < 6: warnings.warn(\"Statistics will be (very) shaky when doing linear \"", ": callable val : callable \\\\*\\\\*kwargs: Passed on to ``odesys.integrate``. Returns ------- result0", "will be (very) shaky when doing linear \" \"interpolation on less than 5", ": callable \\\\*\\\\*kwargs: Passed on to ``odesys.integrate``. Returns ------- result0 : Result results", "raise ValueError(\"atols & rtols need to be of same length\") if 'atol' in", "**kwargs) for i in range(1, ntols)] errest = [] for ix, vx in", "np.array([result0.yout[ix, :] - r.yout[ix, :] for r in results]) tols = np.array([atol +", "def fit(x, y): p = np.polyfit(x, y, 1) v = np.polyval(p, x) e", "less than 5 points.\") ntols = atols.size result0 = odesys.integrate(x, y0, params, atol=atols[0],", "for iy in range(result0.yout.shape[-1]): if np.all(diffs[:, iy] == 0): yerrs.append(0) else: p =", "= np.polyval(p, x) e = np.abs(y - v) drop_idxs = np.argsort(e)[-discard] return np.polyfit(np.delete(x,", "= np.polyfit(x, y, 1) v = np.polyval(p, x) e = np.abs(y - v)", "5 points.\") ntols = atols.size result0 = odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0], **kwargs)", "ValueError(\"atols & rtols need to obey strict positive monotonicity\") if atols.size < 4:" ]
[ "PremiumizeMe(MultiAccount): __name__ = \"PremiumizeMe\" __type__ = \"account\" __version__ = \"0.30\" __status__ = \"testing\"", "= \"0.5\" __config__ = [ (\"mh_mode\", \"all;listed;unlisted\", \"Filter hosters to use\", \"all\"), (\"mh_list\",", "= \"0.30\" __status__ = \"testing\" __pyload_version__ = \"0.5\" __config__ = [ (\"mh_mode\", \"all;listed;unlisted\",", "10) if res[\"result\"][\"type\"] != \"free\": premium = True return { \"validuntil\": validuntil, \"trafficleft\":", "signin(self, user, password, data): res = self.api_respond(\"accountstatus\", user, password) if res[\"status\"] != 200:", "\"params[pass]\": password} for key, val in kwargs.items(): get_params[\"params[{}]\".format(key)] = val json_data = self.load(self.API_URL,", "res[\"result\"][\"tldlist\"] def grab_info(self, user, password, data): validuntil = None trafficleft = None premium", "= \"account\" __version__ = \"0.30\" __status__ = \"testing\" __pyload_version__ = \"0.5\" __config__ =", "user, password, data): res = self.api_respond(\"hosterlist\", user, password) if res[\"status\"] != 200: return", "return res[\"result\"][\"tldlist\"] def grab_info(self, user, password, data): validuntil = None trafficleft = None", "True return { \"validuntil\": validuntil, \"trafficleft\": trafficleft, \"premium\": premium, } def signin(self, user,", "validuntil, \"trafficleft\": trafficleft, \"premium\": premium, } def signin(self, user, password, data): res =", "get_params = {\"method\": method, \"params[login]\": user, \"params[pass]\": password} for key, val in kwargs.items():", "\"Reload interval in hours\", 12), ] __description__ = \"\"\"Premiumize.me account plugin\"\"\" __license__ =", "\"Filter hosters to use\", \"all\"), (\"mh_list\", \"str\", \"Hoster list (comma separated)\", \"\"), (\"mh_interval\",", "] # See https://www.premiumize.me/static/api/api.html API_URL = \"https://api.premiumize.me/pm-api/v1.php\" def api_respond(self, method, user, password, **kwargs):", "} def signin(self, user, password, data): res = self.api_respond(\"accountstatus\", user, password) if res[\"status\"]", "val in kwargs.items(): get_params[\"params[{}]\".format(key)] = val json_data = self.load(self.API_URL, get=get_params) return json.loads(json_data) def", "res[\"result\"][\"type\"] != \"free\": premium = True return { \"validuntil\": validuntil, \"trafficleft\": trafficleft, \"premium\":", "max(0, res[\"result\"][\"trafficleft_bytes\"] >> 10) if res[\"result\"][\"type\"] != \"free\": premium = True return {", "password, **kwargs): get_params = {\"method\": method, \"params[login]\": user, \"params[pass]\": password} for key, val", "self.api_respond(\"accountstatus\", user, password) if res[\"status\"] == 200: validuntil = float(res[\"result\"][\"expires\"]) # TODO: Remove", "\"\"\"Premiumize.me account plugin\"\"\" __license__ = \"GPLv3\" __authors__ = [ (\"<NAME>\", \"<EMAIL>\"), (\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"),", "user, password) if res[\"status\"] == 200: validuntil = float(res[\"result\"][\"expires\"]) # TODO: Remove `>>", "res = self.api_respond(\"accountstatus\", user, password) if res[\"status\"] == 200: validuntil = float(res[\"result\"][\"expires\"]) #", "**kwargs): get_params = {\"method\": method, \"params[login]\": user, \"params[pass]\": password} for key, val in", "== 200: validuntil = float(res[\"result\"][\"expires\"]) # TODO: Remove `>> 10` in 0.6.x trafficleft", "\"testing\" __pyload_version__ = \"0.5\" __config__ = [ (\"mh_mode\", \"all;listed;unlisted\", \"Filter hosters to use\",", "None premium = False res = self.api_respond(\"accountstatus\", user, password) if res[\"status\"] == 200:", "key, val in kwargs.items(): get_params[\"params[{}]\".format(key)] = val json_data = self.load(self.API_URL, get=get_params) return json.loads(json_data)", "] __description__ = \"\"\"Premiumize.me account plugin\"\"\" __license__ = \"GPLv3\" __authors__ = [ (\"<NAME>\",", "= \"\"\"Premiumize.me account plugin\"\"\" __license__ = \"GPLv3\" __authors__ = [ (\"<NAME>\", \"<EMAIL>\"), (\"GammaC0de\",", "to use\", \"all\"), (\"mh_list\", \"str\", \"Hoster list (comma separated)\", \"\"), (\"mh_interval\", \"int\", \"Reload", "!= 200: return [] return res[\"result\"][\"tldlist\"] def grab_info(self, user, password, data): validuntil =", "= False res = self.api_respond(\"accountstatus\", user, password) if res[\"status\"] == 200: validuntil =", ">> 10) if res[\"result\"][\"type\"] != \"free\": premium = True return { \"validuntil\": validuntil,", "12), ] __description__ = \"\"\"Premiumize.me account plugin\"\"\" __license__ = \"GPLv3\" __authors__ = [", "get_params[\"params[{}]\".format(key)] = val json_data = self.load(self.API_URL, get=get_params) return json.loads(json_data) def grab_hosters(self, user, password,", "return json.loads(json_data) def grab_hosters(self, user, password, data): res = self.api_respond(\"hosterlist\", user, password) if", "password, data): res = self.api_respond(\"hosterlist\", user, password) if res[\"status\"] != 200: return []", "[ (\"<NAME>\", \"<EMAIL>\"), (\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"), ] # See https://www.premiumize.me/static/api/api.html API_URL = \"https://api.premiumize.me/pm-api/v1.php\" def", "def grab_hosters(self, user, password, data): res = self.api_respond(\"hosterlist\", user, password) if res[\"status\"] !=", "= {\"method\": method, \"params[login]\": user, \"params[pass]\": password} for key, val in kwargs.items(): get_params[\"params[{}]\".format(key)]", "for key, val in kwargs.items(): get_params[\"params[{}]\".format(key)] = val json_data = self.load(self.API_URL, get=get_params) return", "class PremiumizeMe(MultiAccount): __name__ = \"PremiumizeMe\" __type__ = \"account\" __version__ = \"0.30\" __status__ =", "res[\"status\"] != 200: return [] return res[\"result\"][\"tldlist\"] def grab_info(self, user, password, data): validuntil", "validuntil = float(res[\"result\"][\"expires\"]) # TODO: Remove `>> 10` in 0.6.x trafficleft = max(0,", "password, data): validuntil = None trafficleft = None premium = False res =", "(\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"), ] # See https://www.premiumize.me/static/api/api.html API_URL = \"https://api.premiumize.me/pm-api/v1.php\" def api_respond(self, method, user,", "kwargs.items(): get_params[\"params[{}]\".format(key)] = val json_data = self.load(self.API_URL, get=get_params) return json.loads(json_data) def grab_hosters(self, user,", "res = self.api_respond(\"hosterlist\", user, password) if res[\"status\"] != 200: return [] return res[\"result\"][\"tldlist\"]", "def signin(self, user, password, data): res = self.api_respond(\"accountstatus\", user, password) if res[\"status\"] !=", "= True return { \"validuntil\": validuntil, \"trafficleft\": trafficleft, \"premium\": premium, } def signin(self,", "if res[\"status\"] == 200: validuntil = float(res[\"result\"][\"expires\"]) # TODO: Remove `>> 10` in", "= \"testing\" __pyload_version__ = \"0.5\" __config__ = [ (\"mh_mode\", \"all;listed;unlisted\", \"Filter hosters to", "\"GPLv3\" __authors__ = [ (\"<NAME>\", \"<EMAIL>\"), (\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"), ] # See https://www.premiumize.me/static/api/api.html API_URL", "# TODO: Remove `>> 10` in 0.6.x trafficleft = max(0, res[\"result\"][\"trafficleft_bytes\"] >> 10)", "data): res = self.api_respond(\"hosterlist\", user, password) if res[\"status\"] != 200: return [] return", "separated)\", \"\"), (\"mh_interval\", \"int\", \"Reload interval in hours\", 12), ] __description__ = \"\"\"Premiumize.me", "(\"mh_interval\", \"int\", \"Reload interval in hours\", 12), ] __description__ = \"\"\"Premiumize.me account plugin\"\"\"", "TODO: Remove `>> 10` in 0.6.x trafficleft = max(0, res[\"result\"][\"trafficleft_bytes\"] >> 10) if", "password} for key, val in kwargs.items(): get_params[\"params[{}]\".format(key)] = val json_data = self.load(self.API_URL, get=get_params)", "def grab_info(self, user, password, data): validuntil = None trafficleft = None premium =", "\"account\" __version__ = \"0.30\" __status__ = \"testing\" __pyload_version__ = \"0.5\" __config__ = [", "__name__ = \"PremiumizeMe\" __type__ = \"account\" __version__ = \"0.30\" __status__ = \"testing\" __pyload_version__", "user, password, data): res = self.api_respond(\"accountstatus\", user, password) if res[\"status\"] != 200: self.fail_login(res[\"statusmessage\"])", "password) if res[\"status\"] == 200: validuntil = float(res[\"result\"][\"expires\"]) # TODO: Remove `>> 10`", "= self.api_respond(\"hosterlist\", user, password) if res[\"status\"] != 200: return [] return res[\"result\"][\"tldlist\"] def", "\"0.30\" __status__ = \"testing\" __pyload_version__ = \"0.5\" __config__ = [ (\"mh_mode\", \"all;listed;unlisted\", \"Filter", "MultiAccount class PremiumizeMe(MultiAccount): __name__ = \"PremiumizeMe\" __type__ = \"account\" __version__ = \"0.30\" __status__", "account plugin\"\"\" __license__ = \"GPLv3\" __authors__ = [ (\"<NAME>\", \"<EMAIL>\"), (\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"), ]", "from ..base.multi_account import MultiAccount class PremiumizeMe(MultiAccount): __name__ = \"PremiumizeMe\" __type__ = \"account\" __version__", "utf-8 -*- import json from ..base.multi_account import MultiAccount class PremiumizeMe(MultiAccount): __name__ = \"PremiumizeMe\"", "method, user, password, **kwargs): get_params = {\"method\": method, \"params[login]\": user, \"params[pass]\": password} for", "{\"method\": method, \"params[login]\": user, \"params[pass]\": password} for key, val in kwargs.items(): get_params[\"params[{}]\".format(key)] =", "[ (\"mh_mode\", \"all;listed;unlisted\", \"Filter hosters to use\", \"all\"), (\"mh_list\", \"str\", \"Hoster list (comma", "method, \"params[login]\": user, \"params[pass]\": password} for key, val in kwargs.items(): get_params[\"params[{}]\".format(key)] = val", "= [ (\"mh_mode\", \"all;listed;unlisted\", \"Filter hosters to use\", \"all\"), (\"mh_list\", \"str\", \"Hoster list", "def api_respond(self, method, user, password, **kwargs): get_params = {\"method\": method, \"params[login]\": user, \"params[pass]\":", "API_URL = \"https://api.premiumize.me/pm-api/v1.php\" def api_respond(self, method, user, password, **kwargs): get_params = {\"method\": method,", "`>> 10` in 0.6.x trafficleft = max(0, res[\"result\"][\"trafficleft_bytes\"] >> 10) if res[\"result\"][\"type\"] !=", "import json from ..base.multi_account import MultiAccount class PremiumizeMe(MultiAccount): __name__ = \"PremiumizeMe\" __type__ =", "import MultiAccount class PremiumizeMe(MultiAccount): __name__ = \"PremiumizeMe\" __type__ = \"account\" __version__ = \"0.30\"", "user, password, **kwargs): get_params = {\"method\": method, \"params[login]\": user, \"params[pass]\": password} for key,", "api_respond(self, method, user, password, **kwargs): get_params = {\"method\": method, \"params[login]\": user, \"params[pass]\": password}", "<gh_stars>1-10 # -*- coding: utf-8 -*- import json from ..base.multi_account import MultiAccount class", "__type__ = \"account\" __version__ = \"0.30\" __status__ = \"testing\" __pyload_version__ = \"0.5\" __config__", "= val json_data = self.load(self.API_URL, get=get_params) return json.loads(json_data) def grab_hosters(self, user, password, data):", "user, \"params[pass]\": password} for key, val in kwargs.items(): get_params[\"params[{}]\".format(key)] = val json_data =", "in hours\", 12), ] __description__ = \"\"\"Premiumize.me account plugin\"\"\" __license__ = \"GPLv3\" __authors__", "plugin\"\"\" __license__ = \"GPLv3\" __authors__ = [ (\"<NAME>\", \"<EMAIL>\"), (\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"), ] #", "__pyload_version__ = \"0.5\" __config__ = [ (\"mh_mode\", \"all;listed;unlisted\", \"Filter hosters to use\", \"all\"),", "user, password) if res[\"status\"] != 200: return [] return res[\"result\"][\"tldlist\"] def grab_info(self, user,", "return [] return res[\"result\"][\"tldlist\"] def grab_info(self, user, password, data): validuntil = None trafficleft", "= max(0, res[\"result\"][\"trafficleft_bytes\"] >> 10) if res[\"result\"][\"type\"] != \"free\": premium = True return", "!= \"free\": premium = True return { \"validuntil\": validuntil, \"trafficleft\": trafficleft, \"premium\": premium,", "\"trafficleft\": trafficleft, \"premium\": premium, } def signin(self, user, password, data): res = self.api_respond(\"accountstatus\",", "if res[\"result\"][\"type\"] != \"free\": premium = True return { \"validuntil\": validuntil, \"trafficleft\": trafficleft,", "= \"PremiumizeMe\" __type__ = \"account\" __version__ = \"0.30\" __status__ = \"testing\" __pyload_version__ =", "get=get_params) return json.loads(json_data) def grab_hosters(self, user, password, data): res = self.api_respond(\"hosterlist\", user, password)", "-*- import json from ..base.multi_account import MultiAccount class PremiumizeMe(MultiAccount): __name__ = \"PremiumizeMe\" __type__", "= self.load(self.API_URL, get=get_params) return json.loads(json_data) def grab_hosters(self, user, password, data): res = self.api_respond(\"hosterlist\",", "if res[\"status\"] != 200: return [] return res[\"result\"][\"tldlist\"] def grab_info(self, user, password, data):", "interval in hours\", 12), ] __description__ = \"\"\"Premiumize.me account plugin\"\"\" __license__ = \"GPLv3\"", "list (comma separated)\", \"\"), (\"mh_interval\", \"int\", \"Reload interval in hours\", 12), ] __description__", "\"PremiumizeMe\" __type__ = \"account\" __version__ = \"0.30\" __status__ = \"testing\" __pyload_version__ = \"0.5\"", "200: return [] return res[\"result\"][\"tldlist\"] def grab_info(self, user, password, data): validuntil = None", "\"free\": premium = True return { \"validuntil\": validuntil, \"trafficleft\": trafficleft, \"premium\": premium, }", "grab_hosters(self, user, password, data): res = self.api_respond(\"hosterlist\", user, password) if res[\"status\"] != 200:", "..base.multi_account import MultiAccount class PremiumizeMe(MultiAccount): __name__ = \"PremiumizeMe\" __type__ = \"account\" __version__ =", "json_data = self.load(self.API_URL, get=get_params) return json.loads(json_data) def grab_hosters(self, user, password, data): res =", "= None trafficleft = None premium = False res = self.api_respond(\"accountstatus\", user, password)", "__status__ = \"testing\" __pyload_version__ = \"0.5\" __config__ = [ (\"mh_mode\", \"all;listed;unlisted\", \"Filter hosters", "__description__ = \"\"\"Premiumize.me account plugin\"\"\" __license__ = \"GPLv3\" __authors__ = [ (\"<NAME>\", \"<EMAIL>\"),", "See https://www.premiumize.me/static/api/api.html API_URL = \"https://api.premiumize.me/pm-api/v1.php\" def api_respond(self, method, user, password, **kwargs): get_params =", "validuntil = None trafficleft = None premium = False res = self.api_respond(\"accountstatus\", user,", "coding: utf-8 -*- import json from ..base.multi_account import MultiAccount class PremiumizeMe(MultiAccount): __name__ =", "res[\"status\"] == 200: validuntil = float(res[\"result\"][\"expires\"]) # TODO: Remove `>> 10` in 0.6.x", "10` in 0.6.x trafficleft = max(0, res[\"result\"][\"trafficleft_bytes\"] >> 10) if res[\"result\"][\"type\"] != \"free\":", "# -*- coding: utf-8 -*- import json from ..base.multi_account import MultiAccount class PremiumizeMe(MultiAccount):", "(comma separated)\", \"\"), (\"mh_interval\", \"int\", \"Reload interval in hours\", 12), ] __description__ =", "\"nitzo2001[AT]yahoo[DOT]com\"), ] # See https://www.premiumize.me/static/api/api.html API_URL = \"https://api.premiumize.me/pm-api/v1.php\" def api_respond(self, method, user, password,", "-*- coding: utf-8 -*- import json from ..base.multi_account import MultiAccount class PremiumizeMe(MultiAccount): __name__", "200: validuntil = float(res[\"result\"][\"expires\"]) # TODO: Remove `>> 10` in 0.6.x trafficleft =", "0.6.x trafficleft = max(0, res[\"result\"][\"trafficleft_bytes\"] >> 10) if res[\"result\"][\"type\"] != \"free\": premium =", "trafficleft, \"premium\": premium, } def signin(self, user, password, data): res = self.api_respond(\"accountstatus\", user,", "https://www.premiumize.me/static/api/api.html API_URL = \"https://api.premiumize.me/pm-api/v1.php\" def api_respond(self, method, user, password, **kwargs): get_params = {\"method\":", "(\"<NAME>\", \"<EMAIL>\"), (\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"), ] # See https://www.premiumize.me/static/api/api.html API_URL = \"https://api.premiumize.me/pm-api/v1.php\" def api_respond(self,", "\"str\", \"Hoster list (comma separated)\", \"\"), (\"mh_interval\", \"int\", \"Reload interval in hours\", 12),", "(\"mh_mode\", \"all;listed;unlisted\", \"Filter hosters to use\", \"all\"), (\"mh_list\", \"str\", \"Hoster list (comma separated)\",", "False res = self.api_respond(\"accountstatus\", user, password) if res[\"status\"] == 200: validuntil = float(res[\"result\"][\"expires\"])", "data): validuntil = None trafficleft = None premium = False res = self.api_respond(\"accountstatus\",", "\"https://api.premiumize.me/pm-api/v1.php\" def api_respond(self, method, user, password, **kwargs): get_params = {\"method\": method, \"params[login]\": user,", "Remove `>> 10` in 0.6.x trafficleft = max(0, res[\"result\"][\"trafficleft_bytes\"] >> 10) if res[\"result\"][\"type\"]", "= \"GPLv3\" __authors__ = [ (\"<NAME>\", \"<EMAIL>\"), (\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"), ] # See https://www.premiumize.me/static/api/api.html", "\"validuntil\": validuntil, \"trafficleft\": trafficleft, \"premium\": premium, } def signin(self, user, password, data): res", "user, password, data): validuntil = None trafficleft = None premium = False res", "__license__ = \"GPLv3\" __authors__ = [ (\"<NAME>\", \"<EMAIL>\"), (\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"), ] # See", "res[\"result\"][\"trafficleft_bytes\"] >> 10) if res[\"result\"][\"type\"] != \"free\": premium = True return { \"validuntil\":", "\"all;listed;unlisted\", \"Filter hosters to use\", \"all\"), (\"mh_list\", \"str\", \"Hoster list (comma separated)\", \"\"),", "= [ (\"<NAME>\", \"<EMAIL>\"), (\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"), ] # See https://www.premiumize.me/static/api/api.html API_URL = \"https://api.premiumize.me/pm-api/v1.php\"", "self.api_respond(\"hosterlist\", user, password) if res[\"status\"] != 200: return [] return res[\"result\"][\"tldlist\"] def grab_info(self,", "\"<EMAIL>\"), (\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"), ] # See https://www.premiumize.me/static/api/api.html API_URL = \"https://api.premiumize.me/pm-api/v1.php\" def api_respond(self, method,", "hours\", 12), ] __description__ = \"\"\"Premiumize.me account plugin\"\"\" __license__ = \"GPLv3\" __authors__ =", "in kwargs.items(): get_params[\"params[{}]\".format(key)] = val json_data = self.load(self.API_URL, get=get_params) return json.loads(json_data) def grab_hosters(self,", "(\"mh_list\", \"str\", \"Hoster list (comma separated)\", \"\"), (\"mh_interval\", \"int\", \"Reload interval in hours\",", "val json_data = self.load(self.API_URL, get=get_params) return json.loads(json_data) def grab_hosters(self, user, password, data): res", "None trafficleft = None premium = False res = self.api_respond(\"accountstatus\", user, password) if", "return { \"validuntil\": validuntil, \"trafficleft\": trafficleft, \"premium\": premium, } def signin(self, user, password,", "__config__ = [ (\"mh_mode\", \"all;listed;unlisted\", \"Filter hosters to use\", \"all\"), (\"mh_list\", \"str\", \"Hoster", "= \"https://api.premiumize.me/pm-api/v1.php\" def api_respond(self, method, user, password, **kwargs): get_params = {\"method\": method, \"params[login]\":", "premium = True return { \"validuntil\": validuntil, \"trafficleft\": trafficleft, \"premium\": premium, } def", "\"params[login]\": user, \"params[pass]\": password} for key, val in kwargs.items(): get_params[\"params[{}]\".format(key)] = val json_data", "{ \"validuntil\": validuntil, \"trafficleft\": trafficleft, \"premium\": premium, } def signin(self, user, password, data):", "premium, } def signin(self, user, password, data): res = self.api_respond(\"accountstatus\", user, password) if", "\"int\", \"Reload interval in hours\", 12), ] __description__ = \"\"\"Premiumize.me account plugin\"\"\" __license__", "= self.api_respond(\"accountstatus\", user, password) if res[\"status\"] == 200: validuntil = float(res[\"result\"][\"expires\"]) # TODO:", "__authors__ = [ (\"<NAME>\", \"<EMAIL>\"), (\"GammaC0de\", \"nitzo2001[AT]yahoo[DOT]com\"), ] # See https://www.premiumize.me/static/api/api.html API_URL =", "json from ..base.multi_account import MultiAccount class PremiumizeMe(MultiAccount): __name__ = \"PremiumizeMe\" __type__ = \"account\"", "password) if res[\"status\"] != 200: return [] return res[\"result\"][\"tldlist\"] def grab_info(self, user, password,", "hosters to use\", \"all\"), (\"mh_list\", \"str\", \"Hoster list (comma separated)\", \"\"), (\"mh_interval\", \"int\",", "grab_info(self, user, password, data): validuntil = None trafficleft = None premium = False", "json.loads(json_data) def grab_hosters(self, user, password, data): res = self.api_respond(\"hosterlist\", user, password) if res[\"status\"]", "\"Hoster list (comma separated)\", \"\"), (\"mh_interval\", \"int\", \"Reload interval in hours\", 12), ]", "trafficleft = max(0, res[\"result\"][\"trafficleft_bytes\"] >> 10) if res[\"result\"][\"type\"] != \"free\": premium = True", "__version__ = \"0.30\" __status__ = \"testing\" __pyload_version__ = \"0.5\" __config__ = [ (\"mh_mode\",", "use\", \"all\"), (\"mh_list\", \"str\", \"Hoster list (comma separated)\", \"\"), (\"mh_interval\", \"int\", \"Reload interval", "# See https://www.premiumize.me/static/api/api.html API_URL = \"https://api.premiumize.me/pm-api/v1.php\" def api_respond(self, method, user, password, **kwargs): get_params", "= None premium = False res = self.api_respond(\"accountstatus\", user, password) if res[\"status\"] ==", "\"all\"), (\"mh_list\", \"str\", \"Hoster list (comma separated)\", \"\"), (\"mh_interval\", \"int\", \"Reload interval in", "\"0.5\" __config__ = [ (\"mh_mode\", \"all;listed;unlisted\", \"Filter hosters to use\", \"all\"), (\"mh_list\", \"str\",", "\"\"), (\"mh_interval\", \"int\", \"Reload interval in hours\", 12), ] __description__ = \"\"\"Premiumize.me account", "\"premium\": premium, } def signin(self, user, password, data): res = self.api_respond(\"accountstatus\", user, password)", "float(res[\"result\"][\"expires\"]) # TODO: Remove `>> 10` in 0.6.x trafficleft = max(0, res[\"result\"][\"trafficleft_bytes\"] >>", "self.load(self.API_URL, get=get_params) return json.loads(json_data) def grab_hosters(self, user, password, data): res = self.api_respond(\"hosterlist\", user,", "in 0.6.x trafficleft = max(0, res[\"result\"][\"trafficleft_bytes\"] >> 10) if res[\"result\"][\"type\"] != \"free\": premium", "[] return res[\"result\"][\"tldlist\"] def grab_info(self, user, password, data): validuntil = None trafficleft =", "= float(res[\"result\"][\"expires\"]) # TODO: Remove `>> 10` in 0.6.x trafficleft = max(0, res[\"result\"][\"trafficleft_bytes\"]", "trafficleft = None premium = False res = self.api_respond(\"accountstatus\", user, password) if res[\"status\"]", "premium = False res = self.api_respond(\"accountstatus\", user, password) if res[\"status\"] == 200: validuntil" ]
[ "from django.urls import path from . import views urlpatterns = [ path('item/', views.ItemListCreate.as_view()),", "django.urls import path from . import views urlpatterns = [ path('item/', views.ItemListCreate.as_view()), ]", "<gh_stars>0 from django.urls import path from . import views urlpatterns = [ path('item/'," ]
[ "os.path.join('.', 'requirements.txt') with open(reqs_path, 'r') as f: requirements = [line.rstrip() for line in", "version='0.1.0', description='Sample package for Python-Guide.org', long_description=readme, author='<NAME>', author_email='<EMAIL>', install_requires=read_requirements(), url='test', license=license, packages=find_packages(exclude=('tests', 'docs'))", "requirements setup( name='sample', version='0.1.0', description='Sample package for Python-Guide.org', long_description=readme, author='<NAME>', author_email='<EMAIL>', install_requires=read_requirements(), url='test',", "requirements from requirements.txt.\"\"\" reqs_path = os.path.join('.', 'requirements.txt') with open(reqs_path, 'r') as f: requirements", "in f] return requirements setup( name='sample', version='0.1.0', description='Sample package for Python-Guide.org', long_description=readme, author='<NAME>',", "List: \"\"\"Parse requirements from requirements.txt.\"\"\" reqs_path = os.path.join('.', 'requirements.txt') with open(reqs_path, 'r') as", "'r') as f: requirements = [line.rstrip() for line in f] return requirements setup(", "description='Sample package for Python-Guide.org', long_description=readme, author='<NAME>', author_email='<EMAIL>', install_requires=read_requirements(), url='test', license=license, packages=find_packages(exclude=('tests', 'docs')) )", "'requirements.txt') with open(reqs_path, 'r') as f: requirements = [line.rstrip() for line in f]", "with open(reqs_path, 'r') as f: requirements = [line.rstrip() for line in f] return", "\"\"\"Parse requirements from requirements.txt.\"\"\" reqs_path = os.path.join('.', 'requirements.txt') with open(reqs_path, 'r') as f:", "# -*- coding: utf-8 -*- # Learn more: https://github.com/kennethreitz/setup.py import os, sys from", "open(reqs_path, 'r') as f: requirements = [line.rstrip() for line in f] return requirements", "-*- coding: utf-8 -*- # Learn more: https://github.com/kennethreitz/setup.py import os, sys from setuptools", "[line.rstrip() for line in f] return requirements setup( name='sample', version='0.1.0', description='Sample package for", "requirements = [line.rstrip() for line in f] return requirements setup( name='sample', version='0.1.0', description='Sample", "requirements.txt.\"\"\" reqs_path = os.path.join('.', 'requirements.txt') with open(reqs_path, 'r') as f: requirements = [line.rstrip()", "reqs_path = os.path.join('.', 'requirements.txt') with open(reqs_path, 'r') as f: requirements = [line.rstrip() for", "return requirements setup( name='sample', version='0.1.0', description='Sample package for Python-Guide.org', long_description=readme, author='<NAME>', author_email='<EMAIL>', install_requires=read_requirements(),", "-*- # Learn more: https://github.com/kennethreitz/setup.py import os, sys from setuptools import setup, find_packages", "os, sys from setuptools import setup, find_packages def read_requirements() -> List: \"\"\"Parse requirements", "coding: utf-8 -*- # Learn more: https://github.com/kennethreitz/setup.py import os, sys from setuptools import", "from requirements.txt.\"\"\" reqs_path = os.path.join('.', 'requirements.txt') with open(reqs_path, 'r') as f: requirements =", "setuptools import setup, find_packages def read_requirements() -> List: \"\"\"Parse requirements from requirements.txt.\"\"\" reqs_path", "from setuptools import setup, find_packages def read_requirements() -> List: \"\"\"Parse requirements from requirements.txt.\"\"\"", "# Learn more: https://github.com/kennethreitz/setup.py import os, sys from setuptools import setup, find_packages def", "https://github.com/kennethreitz/setup.py import os, sys from setuptools import setup, find_packages def read_requirements() -> List:", "-> List: \"\"\"Parse requirements from requirements.txt.\"\"\" reqs_path = os.path.join('.', 'requirements.txt') with open(reqs_path, 'r')", "more: https://github.com/kennethreitz/setup.py import os, sys from setuptools import setup, find_packages def read_requirements() ->", "read_requirements() -> List: \"\"\"Parse requirements from requirements.txt.\"\"\" reqs_path = os.path.join('.', 'requirements.txt') with open(reqs_path,", "utf-8 -*- # Learn more: https://github.com/kennethreitz/setup.py import os, sys from setuptools import setup,", "import setup, find_packages def read_requirements() -> List: \"\"\"Parse requirements from requirements.txt.\"\"\" reqs_path =", "for line in f] return requirements setup( name='sample', version='0.1.0', description='Sample package for Python-Guide.org',", "= os.path.join('.', 'requirements.txt') with open(reqs_path, 'r') as f: requirements = [line.rstrip() for line", "def read_requirements() -> List: \"\"\"Parse requirements from requirements.txt.\"\"\" reqs_path = os.path.join('.', 'requirements.txt') with", "name='sample', version='0.1.0', description='Sample package for Python-Guide.org', long_description=readme, author='<NAME>', author_email='<EMAIL>', install_requires=read_requirements(), url='test', license=license, packages=find_packages(exclude=('tests',", "line in f] return requirements setup( name='sample', version='0.1.0', description='Sample package for Python-Guide.org', long_description=readme,", "setup, find_packages def read_requirements() -> List: \"\"\"Parse requirements from requirements.txt.\"\"\" reqs_path = os.path.join('.',", "= [line.rstrip() for line in f] return requirements setup( name='sample', version='0.1.0', description='Sample package", "import os, sys from setuptools import setup, find_packages def read_requirements() -> List: \"\"\"Parse", "find_packages def read_requirements() -> List: \"\"\"Parse requirements from requirements.txt.\"\"\" reqs_path = os.path.join('.', 'requirements.txt')", "f: requirements = [line.rstrip() for line in f] return requirements setup( name='sample', version='0.1.0',", "Learn more: https://github.com/kennethreitz/setup.py import os, sys from setuptools import setup, find_packages def read_requirements()", "f] return requirements setup( name='sample', version='0.1.0', description='Sample package for Python-Guide.org', long_description=readme, author='<NAME>', author_email='<EMAIL>',", "setup( name='sample', version='0.1.0', description='Sample package for Python-Guide.org', long_description=readme, author='<NAME>', author_email='<EMAIL>', install_requires=read_requirements(), url='test', license=license,", "sys from setuptools import setup, find_packages def read_requirements() -> List: \"\"\"Parse requirements from", "as f: requirements = [line.rstrip() for line in f] return requirements setup( name='sample'," ]
[ "model.to_json() assert json == REGISTRY def test_registry_model_add_image(): model = RegistryModel.from_json(REGISTRY) image = ImageModel.from_json(IMAGE1)", "\"value1\"}, \"Architecture\": \"ppc64le\", \"Digest\": \"sha256:beebee\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\":", "'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1], 'ModuleBuilds': ['baobab-1.2.3-3020190603102507'], 'PackageBuilds': ['baobab-1.2.3-1'],", "'2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1], 'ModuleBuilds': ['baobab-1.2.3-3020190603102507'], 'PackageBuilds': ['baobab-1.2.3-1'], } def test_registry_model(): model", "'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1] } FLATPAK_BUILD = {", "} LIST1 = { \"Digest\": \"sha256:booboo\", \"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\": [IMAGE1, IMAGE2], \"Tags\": [\"latest\"],", "['baobab-1.2.3-1'], } def test_registry_model(): model = RegistryModel.from_json(REGISTRY) json = model.to_json() assert json ==", "RegistryModel IMAGE1 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"amd64\", \"Digest\": \"sha256:baabaa\", \"Labels\": {\"key2\":", "'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1], 'ModuleBuilds': ['baobab-1.2.3-3020190603102507'], 'PackageBuilds': ['baobab-1.2.3-1'], } def", "'UserName': 'jdoe', 'Images': [IMAGE1], 'ModuleBuilds': ['baobab-1.2.3-3020190603102507'], 'PackageBuilds': ['baobab-1.2.3-1'], } def test_registry_model(): model =", "[IMAGE1, IMAGE2], \"Tags\": [\"latest\"], } REGISTRY = { \"Repositories\": [ { \"Name\": \"aisleriot\",", "\"Repositories\": [ { \"Name\": \"aisleriot\", \"Images\": [ IMAGE1, IMAGE2, ], \"Lists\": [ LIST1", "\"Images\": [ IMAGE1, IMAGE2, ], \"Lists\": [ LIST1 ], } ] } IMAGE_BUILD", "{ \"Digest\": \"sha256:booboo\", \"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\": [IMAGE1, IMAGE2], \"Tags\": [\"latest\"], } REGISTRY =", "model = RegistryModel.from_json(REGISTRY) json = model.to_json() assert json == REGISTRY def test_registry_model_add_image(): model", "'baobab' def test_image_build_from_json(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert isinstance(image, ImageBuildModel) flatpak = ImageBuildModel.from_json(FLATPAK_BUILD) assert", "} IMAGE2 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"ppc64le\", \"Digest\": \"sha256:beebee\", \"Labels\": {\"key2\":", "['baobab-1.2.3-3020190603102507'], 'PackageBuilds': ['baobab-1.2.3-1'], } def test_registry_model(): model = RegistryModel.from_json(REGISTRY) json = model.to_json() assert", "\"Tags\": [\"tag2\"] } LIST1 = { \"Digest\": \"sha256:booboo\", \"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\": [IMAGE1, IMAGE2],", "\"Digest\": \"sha256:baabaa\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag1\"], 'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345'", "json == REGISTRY def test_registry_model_add_image(): model = RegistryModel.from_json(REGISTRY) image = ImageModel.from_json(IMAGE1) model.add_image('aisleriot2', image)", "'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1] } FLATPAK_BUILD =", "} def test_registry_model(): model = RegistryModel.from_json(REGISTRY) json = model.to_json() assert json == REGISTRY", "\"Lists\": [ LIST1 ], } ] } IMAGE_BUILD = { 'BuildId': 12345, 'Nvr':", "== 'baobab' def test_image_build_from_json(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert isinstance(image, ImageBuildModel) flatpak = ImageBuildModel.from_json(FLATPAK_BUILD)", "test_registry_model(): model = RegistryModel.from_json(REGISTRY) json = model.to_json() assert json == REGISTRY def test_registry_model_add_image():", "def test_image_build_repository(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert image.repository == 'baobab' def test_image_build_from_json(): image =", "\"Tags\": [\"tag1\"], 'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345' } IMAGE2 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"ppc64le\",", "\"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag2\"] } LIST1 = { \"Digest\": \"sha256:booboo\",", "REGISTRY = { \"Repositories\": [ { \"Name\": \"aisleriot\", \"Images\": [ IMAGE1, IMAGE2, ],", "\"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag2\"] } LIST1 = { \"Digest\": \"sha256:booboo\", \"MediaType\":", "{ 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1]", "\"Digest\": \"sha256:beebee\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag2\"] } LIST1", "} FLATPAK_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName':", "[ { \"Name\": \"aisleriot\", \"Images\": [ IMAGE1, IMAGE2, ], \"Lists\": [ LIST1 ],", "'UserName': 'jdoe', 'Images': [IMAGE1] } FLATPAK_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source':", "model = RegistryModel.from_json(REGISTRY) image = ImageModel.from_json(IMAGE1) model.add_image('aisleriot2', image) assert model.repositories['aisleriot2'].images[image.digest] == image def", "model.add_image('aisleriot2', image) assert model.repositories['aisleriot2'].images[image.digest] == image def test_image_build_repository(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert image.repository", "= { \"Repositories\": [ { \"Name\": \"aisleriot\", \"Images\": [ IMAGE1, IMAGE2, ], \"Lists\":", "\"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"ppc64le\", \"Digest\": \"sha256:beebee\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\":", "test_registry_model_add_image(): model = RegistryModel.from_json(REGISTRY) image = ImageModel.from_json(IMAGE1) model.add_image('aisleriot2', image) assert model.repositories['aisleriot2'].images[image.digest] == image", "'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1] } FLATPAK_BUILD = { 'BuildId': 12345,", "RegistryModel.from_json(REGISTRY) json = model.to_json() assert json == REGISTRY def test_registry_model_add_image(): model = RegistryModel.from_json(REGISTRY)", "\"Tags\": [\"latest\"], } REGISTRY = { \"Repositories\": [ { \"Name\": \"aisleriot\", \"Images\": [", "\"linux\", \"Tags\": [\"tag1\"], 'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345' } IMAGE2 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\":", "RegistryModel.from_json(REGISTRY) image = ImageModel.from_json(IMAGE1) model.add_image('aisleriot2', image) assert model.repositories['aisleriot2'].images[image.digest] == image def test_image_build_repository(): image", "{ 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1],", "\"Name\": \"aisleriot\", \"Images\": [ IMAGE1, IMAGE2, ], \"Lists\": [ LIST1 ], } ]", "\"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag1\"], 'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345' } IMAGE2 = { \"Annotations\":", "= RegistryModel.from_json(REGISTRY) json = model.to_json() assert json == REGISTRY def test_registry_model_add_image(): model =", "[ IMAGE1, IMAGE2, ], \"Lists\": [ LIST1 ], } ] } IMAGE_BUILD =", "'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1], 'ModuleBuilds': ['baobab-1.2.3-3020190603102507'], 'PackageBuilds':", "\"ppc64le\", \"Digest\": \"sha256:beebee\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag2\"] }", "'jdoe', 'Images': [IMAGE1] } FLATPAK_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST',", "\"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag2\"] } LIST1 = {", "'candidate-registry.fedoraproject.org/baobab@sha256:12345' } IMAGE2 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"ppc64le\", \"Digest\": \"sha256:beebee\", \"Labels\":", "\"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag1\"], 'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345' } IMAGE2 = {", "12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1], 'ModuleBuilds': ['baobab-1.2.3-3020190603102507'],", "image.repository == 'baobab' def test_image_build_from_json(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert isinstance(image, ImageBuildModel) flatpak =", "model.repositories['aisleriot2'].images[image.digest] == image def test_image_build_repository(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert image.repository == 'baobab' def", "12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1] } FLATPAK_BUILD", "} REGISTRY = { \"Repositories\": [ { \"Name\": \"aisleriot\", \"Images\": [ IMAGE1, IMAGE2,", "test_image_build_from_json(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert isinstance(image, ImageBuildModel) flatpak = ImageBuildModel.from_json(FLATPAK_BUILD) assert isinstance(flatpak, FlatpakBuildModel)", "ImageBuildModel, RegistryModel IMAGE1 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"amd64\", \"Digest\": \"sha256:baabaa\", \"Labels\":", "\"Architecture\": \"ppc64le\", \"Digest\": \"sha256:beebee\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag2\"]", "import FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel IMAGE1 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"amd64\",", "{\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag2\"] } LIST1 = { \"Digest\":", "IMAGE1, IMAGE2, ], \"Lists\": [ LIST1 ], } ] } IMAGE_BUILD = {", "== REGISTRY def test_registry_model_add_image(): model = RegistryModel.from_json(REGISTRY) image = ImageModel.from_json(IMAGE1) model.add_image('aisleriot2', image) assert", "assert model.repositories['aisleriot2'].images[image.digest] == image def test_image_build_repository(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert image.repository == 'baobab'", "'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1], 'ModuleBuilds': ['baobab-1.2.3-3020190603102507'], 'PackageBuilds': ['baobab-1.2.3-1'], }", "assert json == REGISTRY def test_registry_model_add_image(): model = RegistryModel.from_json(REGISTRY) image = ImageModel.from_json(IMAGE1) model.add_image('aisleriot2',", "def test_image_build_from_json(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert isinstance(image, ImageBuildModel) flatpak = ImageBuildModel.from_json(FLATPAK_BUILD) assert isinstance(flatpak,", "= { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"ppc64le\", \"Digest\": \"sha256:beebee\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\":", "\"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\": [IMAGE1, IMAGE2], \"Tags\": [\"latest\"], } REGISTRY = { \"Repositories\": [", "'PackageBuilds': ['baobab-1.2.3-1'], } def test_registry_model(): model = RegistryModel.from_json(REGISTRY) json = model.to_json() assert json", "[\"latest\"], } REGISTRY = { \"Repositories\": [ { \"Name\": \"aisleriot\", \"Images\": [ IMAGE1,", "], \"Lists\": [ LIST1 ], } ] } IMAGE_BUILD = { 'BuildId': 12345,", "= { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"amd64\", \"Digest\": \"sha256:baabaa\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\":", "def test_registry_model(): model = RegistryModel.from_json(REGISTRY) json = model.to_json() assert json == REGISTRY def", "\"Architecture\": \"amd64\", \"Digest\": \"sha256:baabaa\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag1\"],", "image = ImageModel.from_json(IMAGE1) model.add_image('aisleriot2', image) assert model.repositories['aisleriot2'].images[image.digest] == image def test_image_build_repository(): image =", "[\"tag1\"], 'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345' } IMAGE2 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"ppc64le\", \"Digest\":", "\"aisleriot\", \"Images\": [ IMAGE1, IMAGE2, ], \"Lists\": [ LIST1 ], } ] }", "{ \"Name\": \"aisleriot\", \"Images\": [ IMAGE1, IMAGE2, ], \"Lists\": [ LIST1 ], }", "{\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag1\"], 'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345' } IMAGE2 =", "IMAGE1 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"amd64\", \"Digest\": \"sha256:baabaa\", \"Labels\": {\"key2\": \"value2\"},", "\"value1\"}, \"Architecture\": \"amd64\", \"Digest\": \"sha256:baabaa\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\":", "image = ImageBuildModel.from_json(IMAGE_BUILD) assert image.repository == 'baobab' def test_image_build_from_json(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert", "ImageModel.from_json(IMAGE1) model.add_image('aisleriot2', image) assert model.repositories['aisleriot2'].images[image.digest] == image def test_image_build_repository(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert", "== image def test_image_build_repository(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert image.repository == 'baobab' def test_image_build_from_json():", "IMAGE2], \"Tags\": [\"latest\"], } REGISTRY = { \"Repositories\": [ { \"Name\": \"aisleriot\", \"Images\":", "[ LIST1 ], } ] } IMAGE_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1',", "} IMAGE_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName':", "], } ] } IMAGE_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST',", "'ModuleBuilds': ['baobab-1.2.3-3020190603102507'], 'PackageBuilds': ['baobab-1.2.3-1'], } def test_registry_model(): model = RegistryModel.from_json(REGISTRY) json = model.to_json()", "IMAGE2 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"ppc64le\", \"Digest\": \"sha256:beebee\", \"Labels\": {\"key2\": \"value2\"},", "{ \"Repositories\": [ { \"Name\": \"aisleriot\", \"Images\": [ IMAGE1, IMAGE2, ], \"Lists\": [", "{\"key1\": \"value1\"}, \"Architecture\": \"ppc64le\", \"Digest\": \"sha256:beebee\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\",", "'Images': [IMAGE1], 'ModuleBuilds': ['baobab-1.2.3-3020190603102507'], 'PackageBuilds': ['baobab-1.2.3-1'], } def test_registry_model(): model = RegistryModel.from_json(REGISTRY) json", "'2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1] } FLATPAK_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1',", "IMAGE_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe',", "test_image_build_repository(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert image.repository == 'baobab' def test_image_build_from_json(): image = ImageBuildModel.from_json(IMAGE_BUILD)", "LIST1 ], } ] } IMAGE_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source':", "= model.to_json() assert json == REGISTRY def test_registry_model_add_image(): model = RegistryModel.from_json(REGISTRY) image =", "\"Digest\": \"sha256:booboo\", \"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\": [IMAGE1, IMAGE2], \"Tags\": [\"latest\"], } REGISTRY = {", "\"OS\": \"linux\", \"Tags\": [\"tag2\"] } LIST1 = { \"Digest\": \"sha256:booboo\", \"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\":", "image) assert model.repositories['aisleriot2'].images[image.digest] == image def test_image_build_repository(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert image.repository ==", "\"sha256:baabaa\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag1\"], 'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345' }", "= { \"Digest\": \"sha256:booboo\", \"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\": [IMAGE1, IMAGE2], \"Tags\": [\"latest\"], } REGISTRY", "\"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\": [IMAGE1, IMAGE2], \"Tags\": [\"latest\"], } REGISTRY = { \"Repositories\": [ {", "FLATPAK_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe',", "def test_registry_model_add_image(): model = RegistryModel.from_json(REGISTRY) image = ImageModel.from_json(IMAGE1) model.add_image('aisleriot2', image) assert model.repositories['aisleriot2'].images[image.digest] ==", "FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel IMAGE1 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"amd64\", \"Digest\":", "} ] } IMAGE_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime':", "= { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images':", "assert image.repository == 'baobab' def test_image_build_from_json(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert isinstance(image, ImageBuildModel) flatpak", "image def test_image_build_repository(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert image.repository == 'baobab' def test_image_build_from_json(): image", "'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1], 'ModuleBuilds':", "\"linux\", \"Tags\": [\"tag2\"] } LIST1 = { \"Digest\": \"sha256:booboo\", \"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\": [IMAGE1,", "from flatpak_indexer.models import FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel IMAGE1 = { \"Annotations\": {\"key1\": \"value1\"},", "ImageModel, ImageBuildModel, RegistryModel IMAGE1 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"amd64\", \"Digest\": \"sha256:baabaa\",", "\"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag1\"], 'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345' } IMAGE2 = { \"Annotations\": {\"key1\":", "\"OS\": \"linux\", \"Tags\": [\"tag1\"], 'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345' } IMAGE2 = { \"Annotations\": {\"key1\": \"value1\"},", "\"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag2\"] } LIST1 = { \"Digest\": \"sha256:booboo\", \"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\",", "LIST1 = { \"Digest\": \"sha256:booboo\", \"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\": [IMAGE1, IMAGE2], \"Tags\": [\"latest\"], }", "\"Images\": [IMAGE1, IMAGE2], \"Tags\": [\"latest\"], } REGISTRY = { \"Repositories\": [ { \"Name\":", "\"sha256:booboo\", \"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\": [IMAGE1, IMAGE2], \"Tags\": [\"latest\"], } REGISTRY = { \"Repositories\":", "'jdoe', 'Images': [IMAGE1], 'ModuleBuilds': ['baobab-1.2.3-3020190603102507'], 'PackageBuilds': ['baobab-1.2.3-1'], } def test_registry_model(): model = RegistryModel.from_json(REGISTRY)", "'Images': [IMAGE1] } FLATPAK_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime':", "= RegistryModel.from_json(REGISTRY) image = ImageModel.from_json(IMAGE1) model.add_image('aisleriot2', image) assert model.repositories['aisleriot2'].images[image.digest] == image def test_image_build_repository():", "flatpak_indexer.models import FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel IMAGE1 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\":", "[IMAGE1] } FLATPAK_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00',", "'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1] } FLATPAK_BUILD = { 'BuildId':", "] } IMAGE_BUILD = { 'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00',", "{\"key1\": \"value1\"}, \"Architecture\": \"amd64\", \"Digest\": \"sha256:baabaa\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\",", "\"sha256:beebee\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag2\"] } LIST1 =", "'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1], 'ModuleBuilds': ['baobab-1.2.3-3020190603102507'], 'PackageBuilds': ['baobab-1.2.3-1'], } def test_registry_model():", "= ImageModel.from_json(IMAGE1) model.add_image('aisleriot2', image) assert model.repositories['aisleriot2'].images[image.digest] == image def test_image_build_repository(): image = ImageBuildModel.from_json(IMAGE_BUILD)", "'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345' } IMAGE2 = { \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"ppc64le\", \"Digest\": \"sha256:beebee\",", "{ \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"amd64\", \"Digest\": \"sha256:baabaa\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\",", "REGISTRY def test_registry_model_add_image(): model = RegistryModel.from_json(REGISTRY) image = ImageModel.from_json(IMAGE1) model.add_image('aisleriot2', image) assert model.repositories['aisleriot2'].images[image.digest]", "{ \"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"ppc64le\", \"Digest\": \"sha256:beebee\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\",", "ImageBuildModel.from_json(IMAGE_BUILD) assert image.repository == 'baobab' def test_image_build_from_json(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert isinstance(image, ImageBuildModel)", "= ImageBuildModel.from_json(IMAGE_BUILD) assert image.repository == 'baobab' def test_image_build_from_json(): image = ImageBuildModel.from_json(IMAGE_BUILD) assert isinstance(image,", "\"amd64\", \"Digest\": \"sha256:baabaa\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag1\"], 'PullSpec':", "\"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\": \"linux\", \"Tags\": [\"tag1\"], 'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345' } IMAGE2", "'BuildId': 12345, 'Nvr': 'testrepo-1.2.3-1', 'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST', 'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1] }", "'CompletionTime': '2020-07-31T16:26:22+00:00', 'UserName': 'jdoe', 'Images': [IMAGE1] } FLATPAK_BUILD = { 'BuildId': 12345, 'Nvr':", "[IMAGE1], 'ModuleBuilds': ['baobab-1.2.3-3020190603102507'], 'PackageBuilds': ['baobab-1.2.3-1'], } def test_registry_model(): model = RegistryModel.from_json(REGISTRY) json =", "json = model.to_json() assert json == REGISTRY def test_registry_model_add_image(): model = RegistryModel.from_json(REGISTRY) image", "IMAGE2, ], \"Lists\": [ LIST1 ], } ] } IMAGE_BUILD = { 'BuildId':", "\"Annotations\": {\"key1\": \"value1\"}, \"Architecture\": \"amd64\", \"Digest\": \"sha256:baabaa\", \"Labels\": {\"key2\": \"value2\"}, \"MediaType\": \"application/vnd.docker.distribution.manifest.v2+json\", \"OS\":", "[\"tag2\"] } LIST1 = { \"Digest\": \"sha256:booboo\", \"MediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\", \"Images\": [IMAGE1, IMAGE2], \"Tags\":" ]
[ "return None for path in scriptPaths: testPath = path +'/' + _filename if", "return cmds.file(q=True, sn=True).split('/')[-1] def getPathFindFile(_filename): u''' MAYA_SCRIPT_PATH안에 파일이 있으면 경로값을, 없으면 None을 반환", "한칸 읽어오기(unicode) return cmds.file(q=True, sn=True).split('/')[-1] def getPathFindFile(_filename): u''' MAYA_SCRIPT_PATH안에 파일이 있으면 경로값을, 없으면", "방식 scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") if not scriptPaths: return None for path in", "[] for r, d, f in os.walk(_path): for file in f: #패스가 같을", "이쁘게 바꿔준다 아니면 경로 앞에 r을 붙여줘라 ''' return _path.replace(\"\\\\\",\"/\") def clearOutput(): u'''", "잘 먹힘 User/Documents/Maya 폴더의 경로를 반환 ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] if", "!= '': return os.path.join(maya_app_dir, path) else: return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME']", "os.path import abspath import maya.app.general.resourceBrowser as resourceBrowser # 기능 ############################################################################# def convertSlash(_path): u'''", "r을 붙여줘라 ''' return _path.replace(\"\\\\\",\"/\") def clearOutput(): u''' Clear History for Script Editor", "def getNameMayaFile(): u''' Maya파일의 이름 ''' #파일이름 얻어오기 /로 나눠서 맨끝에서 한칸 읽어오기(unicode)", "읽어오기(unicode) return cmds.file(q=True, sn=True).split('/')[-1] def getPathFindFile(_filename): u''' MAYA_SCRIPT_PATH안에 파일이 있으면 경로값을, 없으면 None을", "같을 경우 만( 고로 같은 폴더 녀석들만 골라쥼) if r == _path: file_list.append(os.path.join(r,file).replace('\\\\','/'))", "# scriptPath = os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는 다른 방식 scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") if", "def getMayaVersion(): u''' 마야의 버전을 반환합니다. 2018 이런식으로 int 값임 ''' return int(cmds.about(v", "file in f: #패스가 같을 경우 만( 고로 같은 폴더 녀석들만 골라쥼) if", "-1: return path.replace('prefs/markingMenus','scripts') def getPathIMTool(): u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") ''' #scriptPath = scriptPath.replace('/IMUtility/common.py','') return", "아니면 경로 앞에 r을 붙여줘라 ''' return _path.replace(\"\\\\\",\"/\") def clearOutput(): u''' Clear History", "return file_list # 경로 ############################################################################# def getPathMayaFile(): u''' Maya파일의 절대경로 ''' #파일이름 포함", "return cmds.internalVar(userAppDir=True) def getPathDocumentMaya(): u''' * Dos에서도 잘 먹힘 C:/Users/rationalcat/Documents/maya/ ''' if os.environ.get('MAYA_APP_DIR'):", "else: return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathMaya2018Script(): u''' maya/2018/Script 경로 반환 ''' scriptPaths =", "u''' MAYA_SCRIPT_PATH안에 파일이 있으면 경로값을, 없으면 None을 반환 Script안 서브폴더까지는 검색하지 않음 '''", "get_dirfiles_sub(_path): ''' sub 폴더 포함 ''' file_list = [] for r, d, f", "''' sub 폴더 포함 ''' file_list = [] for r, d, f in", "os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] if path != '': return os.path.join(maya_app_dir, path) else: return", "resourceBrowser # 기능 ############################################################################# def convertSlash(_path): u''' 파일 경로를 윈도우에서 바로 복사할 경우", "# 파일 리스트 얻기 def get_dirfiles(_path): file_list = [] for r, d, f", "얻어오기 /로 나눠서 맨끝에서 한칸 읽어오기(unicode) return cmds.file(q=True, sn=True).split('/')[-1] def getPathFindFile(_filename): u''' MAYA_SCRIPT_PATH안에", "file in f: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list # 경로 ############################################################################# def getPathMayaFile(): u''' Maya파일의", "'': return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path) else: return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathMaya2018Script(): u''' maya/2018/Script 경로", "u''' * Dos에서도 잘 먹힘 User/Documents/Maya 폴더의 경로를 반환 ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir", "return os.path.join(maya_app_dir, path) else: return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home", "maya_app_dir = os.environ['MAYA_APP_DIR'] return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home =", "for Script Editor ''' cmds.scriptEditorInfo(clearHistory=True) def forPrint(_list): u''' 그냥 리스트를 나열해줍니다 ''' for", "반환합니다. 2018 이런식으로 int 값임 ''' return int(cmds.about(v = True).split(\"-\")[0].split(\" \")[0]) def get_resource_path():", "이런식으로 int 값임 ''' return int(cmds.about(v = True).split(\"-\")[0].split(\" \")[0]) def get_resource_path(): resource_browser =", "!= -1: return path.replace('prefs/markingMenus','scripts') def getPathIMTool(): u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") ''' #scriptPath = scriptPath.replace('/IMUtility/common.py','')", "None for path in scriptPaths: testPath = path +'/' + _filename if os.path.exists(testPath):", "from os.path import abspath import maya.app.general.resourceBrowser as resourceBrowser # 기능 ############################################################################# def convertSlash(_path):", "path) else: return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE']", "얻기 def get_dirfiles(_path): file_list = [] for r, d, f in os.walk(_path): for", "sub 폴더 포함 ''' file_list = [] for r, d, f in os.walk(_path):", "/로 이쁘게 바꿔준다 아니면 경로 앞에 r을 붙여줘라 ''' return _path.replace(\"\\\\\",\"/\") def clearOutput():", "값임 ''' return int(cmds.about(v = True).split(\"-\")[0].split(\" \")[0]) def get_resource_path(): resource_browser = resourceBrowser.resourceBrowser() resource_path", "* Dos에서도 잘 먹힘 C:/Users/rationalcat/Documents/maya/ ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] return maya_app_dir", "get_dirfiles(_path): file_list = [] for r, d, f in os.walk(_path): for file in", "d, f in os.walk(_path): for file in f: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list # 경로", "file_list # 경로 ############################################################################# def getPathMayaFile(): u''' Maya파일의 절대경로 ''' #파일이름 포함 풀경로", "_list: print item # 파일 리스트 얻기 def get_dirfiles(_path): file_list = [] for", "forPrint(_list): u''' 그냥 리스트를 나열해줍니다 ''' for item in _list: print item #", "서브폴더까지는 검색하지 않음 ''' # scriptPath = os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는 다른 방식 scriptPaths", "골라쥼) if r == _path: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list def get_dirfiles_sub(_path): ''' sub 폴더", "경로 ############################################################################# def getPathMayaFile(): u''' Maya파일의 절대경로 ''' #파일이름 포함 풀경로 얻어오기 return", "scriptPath = os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는 다른 방식 scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") if not", "''' #파일이름 얻어오기 /로 나눠서 맨끝에서 한칸 읽어오기(unicode) return cmds.file(q=True, sn=True).split('/')[-1] def getPathFindFile(_filename):", "item in _list: print item # 파일 리스트 얻기 def get_dirfiles(_path): file_list =", "else: home = os.environ['USERPROFILE'] if path != '': return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path) else: return", "get_module_dir_path(): ''' IMTool 폴더 :return: ''' return (os.path.dirname(__file__).replace(\"\\\\\",\"/\")) # 정보 ############################################################################# def getMayaVersion():", "파일 경로를 윈도우에서 바로 복사할 경우 \\를 /로 이쁘게 바꿔준다 아니면 경로 앞에", "for file in f: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list # 경로 ############################################################################# def getPathMayaFile(): u'''", "in os.walk(_path): for file in f: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list # 경로 ############################################################################# def", "= os.environ['MAYA_APP_DIR'] if path != '': return os.path.join(maya_app_dir, path) else: return maya_app_dir if", "abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") ''' #scriptPath = scriptPath.replace('/IMUtility/common.py','') return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def get_module_dir_path(): ''' IMTool 폴더 :return:", "utf-8 -*- import sys, os import maya.cmds as cmds import maya.mel as mel", "scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") if not scriptPaths: return None for path in scriptPaths:", "testPath return None def get_maya_AppDir(): ''' C:/Users/rationalcat/Documents/maya/ :return: ''' return cmds.internalVar(userAppDir=True) def getPathDocumentMaya():", "return testPath return None def get_maya_AppDir(): ''' C:/Users/rationalcat/Documents/maya/ :return: ''' return cmds.internalVar(userAppDir=True) def", "path.replace('prefs/markingMenus','scripts') def getPathIMTool(): u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") ''' #scriptPath = scriptPath.replace('/IMUtility/common.py','') return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def", "바로 복사할 경우 \\를 /로 이쁘게 바꿔준다 아니면 경로 앞에 r을 붙여줘라 '''", "path +'/' + _filename if os.path.exists(testPath): return testPath return None def get_maya_AppDir(): '''", "u''' maya/2018/Script 경로 반환 ''' scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") for path in scriptPaths:", "기능 ############################################################################# def convertSlash(_path): u''' 파일 경로를 윈도우에서 바로 복사할 경우 \\를 /로", "def get_dirfiles(_path): file_list = [] for r, d, f in os.walk(_path): for file", "f in os.walk(_path): for file in f: #패스가 같을 경우 만( 고로 같은", "os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathDocumentMaya_Add(path):", "cmds.file(q=True, sn=True) def getNameMayaFile(): u''' Maya파일의 이름 ''' #파일이름 얻어오기 /로 나눠서 맨끝에서", "* Dos에서도 잘 먹힘 User/Documents/Maya 폴더의 경로를 반환 ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir =", "마야의 버전을 반환합니다. 2018 이런식으로 int 값임 ''' return int(cmds.about(v = True).split(\"-\")[0].split(\" \")[0])", "file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list def get_dirfiles_sub(_path): ''' sub 폴더 포함 ''' file_list = []", "cmds.scriptEditorInfo(clearHistory=True) def forPrint(_list): u''' 그냥 리스트를 나열해줍니다 ''' for item in _list: print", "maya.cmds as cmds import maya.mel as mel from inspect import getsourcefile from os.path", "+'/' + _filename if os.path.exists(testPath): return testPath return None def get_maya_AppDir(): ''' C:/Users/rationalcat/Documents/maya/", "r, d, f in os.walk(_path): for file in f: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list #", "u''' 마야의 버전을 반환합니다. 2018 이런식으로 int 값임 ''' return int(cmds.about(v = True).split(\"-\")[0].split(\"", "바꿔준다 아니면 경로 앞에 r을 붙여줘라 ''' return _path.replace(\"\\\\\",\"/\") def clearOutput(): u''' Clear", "getPathDocumentMaya(): u''' * Dos에서도 잘 먹힘 C:/Users/rationalcat/Documents/maya/ ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR']", "Script안 서브폴더까지는 검색하지 않음 ''' # scriptPath = os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는 다른 방식", "= os.environ['MAYA_APP_DIR'] return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE']", "os.path.join(maya_app_dir, path) else: return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home =", "== _path: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list def get_dirfiles_sub(_path): ''' sub 폴더 포함 ''' file_list", "int(cmds.about(v = True).split(\"-\")[0].split(\" \")[0]) def get_resource_path(): resource_browser = resourceBrowser.resourceBrowser() resource_path = resource_browser.run() return", "for item in _list: print item # 파일 리스트 얻기 def get_dirfiles(_path): file_list", "inspect import getsourcefile from os.path import abspath import maya.app.general.resourceBrowser as resourceBrowser # 기능", "+ _filename if os.path.exists(testPath): return testPath return None def get_maya_AppDir(): ''' C:/Users/rationalcat/Documents/maya/ :return:", "붙여줘라 ''' return _path.replace(\"\\\\\",\"/\") def clearOutput(): u''' Clear History for Script Editor '''", "= os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는 다른 방식 scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") if not scriptPaths:", "''' #파일이름 포함 풀경로 얻어오기 return cmds.file(q=True, sn=True) def getNameMayaFile(): u''' Maya파일의 이름", "return None def get_maya_AppDir(): ''' C:/Users/rationalcat/Documents/maya/ :return: ''' return cmds.internalVar(userAppDir=True) def getPathDocumentMaya(): u'''", "return file_list def get_dirfiles_sub(_path): ''' sub 폴더 포함 ''' file_list = [] for", "get_maya_AppDir(): ''' C:/Users/rationalcat/Documents/maya/ :return: ''' return cmds.internalVar(userAppDir=True) def getPathDocumentMaya(): u''' * Dos에서도 잘", "이름 ''' #파일이름 얻어오기 /로 나눠서 맨끝에서 한칸 읽어오기(unicode) return cmds.file(q=True, sn=True).split('/')[-1] def", "리스트 얻기 def get_dirfiles(_path): file_list = [] for r, d, f in os.walk(_path):", "u''' Maya파일의 이름 ''' #파일이름 얻어오기 /로 나눠서 맨끝에서 한칸 읽어오기(unicode) return cmds.file(q=True,", "return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] if path", "return int(cmds.about(v = True).split(\"-\")[0].split(\" \")[0]) def get_resource_path(): resource_browser = resourceBrowser.resourceBrowser() resource_path = resource_browser.run()", "경로를 윈도우에서 바로 복사할 경우 \\를 /로 이쁘게 바꿔준다 아니면 경로 앞에 r을", "f: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list # 경로 ############################################################################# def getPathMayaFile(): u''' Maya파일의 절대경로 '''", "f in os.walk(_path): for file in f: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list # 경로 #############################################################################", ":return: ''' return cmds.internalVar(userAppDir=True) def getPathDocumentMaya(): u''' * Dos에서도 잘 먹힘 C:/Users/rationalcat/Documents/maya/ '''", "maya.app.general.resourceBrowser as resourceBrowser # 기능 ############################################################################# def convertSlash(_path): u''' 파일 경로를 윈도우에서 바로", "반환 ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] if path != '': return os.path.join(maya_app_dir,", "import abspath import maya.app.general.resourceBrowser as resourceBrowser # 기능 ############################################################################# def convertSlash(_path): u''' 파일", "그냥 리스트를 나열해줍니다 ''' for item in _list: print item # 파일 리스트", "_path: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list def get_dirfiles_sub(_path): ''' sub 폴더 포함 ''' file_list =", "u''' Maya파일의 절대경로 ''' #파일이름 포함 풀경로 얻어오기 return cmds.file(q=True, sn=True) def getNameMayaFile():", "if path != '': return os.path.join(maya_app_dir, path) else: return maya_app_dir if os.environ.get('HOME'): home", "return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def get_module_dir_path(): ''' IMTool 폴더 :return: ''' return (os.path.dirname(__file__).replace(\"\\\\\",\"/\")) # 정보", "path != '': return os.path.join(maya_app_dir, path) else: return maya_app_dir if os.environ.get('HOME'): home =", "mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") if not scriptPaths: return None for path in scriptPaths: testPath =", "# 기능 ############################################################################# def convertSlash(_path): u''' 파일 경로를 윈도우에서 바로 복사할 경우 \\를", "in scriptPaths: if path.find('markingMenus') != -1: return path.replace('prefs/markingMenus','scripts') def getPathIMTool(): u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\")", "u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") ''' #scriptPath = scriptPath.replace('/IMUtility/common.py','') return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def get_module_dir_path(): ''' IMTool", "먹힘 User/Documents/Maya 폴더의 경로를 반환 ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] if path", "import getsourcefile from os.path import abspath import maya.app.general.resourceBrowser as resourceBrowser # 기능 #############################################################################", "else: home = os.environ['USERPROFILE'] return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathDocumentMaya_Add(path): u''' * Dos에서도 잘", "(os.path.dirname(__file__).replace(\"\\\\\",\"/\")) # 정보 ############################################################################# def getMayaVersion(): u''' 마야의 버전을 반환합니다. 2018 이런식으로 int", "maya/2018/Script 경로 반환 ''' scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") for path in scriptPaths: if", "if not scriptPaths: return None for path in scriptPaths: testPath = path +'/'", "os.environ['MAYA_APP_DIR'] if path != '': return os.path.join(maya_app_dir, path) else: return maya_app_dir if os.environ.get('HOME'):", "폴더 녀석들만 골라쥼) if r == _path: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list def get_dirfiles_sub(_path): '''", "return _path.replace(\"\\\\\",\"/\") def clearOutput(): u''' Clear History for Script Editor ''' cmds.scriptEditorInfo(clearHistory=True) def", "\"MAYA_SCRIPT_PATH\"').split(\";\") for path in scriptPaths: if path.find('markingMenus') != -1: return path.replace('prefs/markingMenus','scripts') def getPathIMTool():", "정보 ############################################################################# def getMayaVersion(): u''' 마야의 버전을 반환합니다. 2018 이런식으로 int 값임 '''", "''' # scriptPath = os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는 다른 방식 scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\")", "not scriptPaths: return None for path in scriptPaths: testPath = path +'/' +", "return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathMaya2018Script(): u''' maya/2018/Script 경로 반환 ''' scriptPaths = mel.eval('getenv", "os.walk(_path): for file in f: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list # 경로 ############################################################################# def getPathMayaFile():", "scriptPath.replace('/IMUtility/common.py','') return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def get_module_dir_path(): ''' IMTool 폴더 :return: ''' return (os.path.dirname(__file__).replace(\"\\\\\",\"/\")) #", "u''' 그냥 리스트를 나열해줍니다 ''' for item in _list: print item # 파일", "''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] if path != '': return os.path.join(maya_app_dir, path)", "maya_app_dir = os.environ['MAYA_APP_DIR'] if path != '': return os.path.join(maya_app_dir, path) else: return maya_app_dir", "def getPathIMTool(): u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") ''' #scriptPath = scriptPath.replace('/IMUtility/common.py','') return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def get_module_dir_path():", "sn=True).split('/')[-1] def getPathFindFile(_filename): u''' MAYA_SCRIPT_PATH안에 파일이 있으면 경로값을, 없으면 None을 반환 Script안 서브폴더까지는", "''' #scriptPath = scriptPath.replace('/IMUtility/common.py','') return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def get_module_dir_path(): ''' IMTool 폴더 :return: '''", "경로값을, 없으면 None을 반환 Script안 서브폴더까지는 검색하지 않음 ''' # scriptPath = os.environ['MAYA_SCRIPT_PATH']", "복사할 경우 \\를 /로 이쁘게 바꿔준다 아니면 경로 앞에 r을 붙여줘라 ''' return", "if path != '': return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path) else: return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathMaya2018Script():", "sys, os import maya.cmds as cmds import maya.mel as mel from inspect import", "[] for r, d, f in os.walk(_path): for file in f: file_list.append(os.path.join(r,file).replace('\\\\','/')) return", "mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") for path in scriptPaths: if path.find('markingMenus') != -1: return path.replace('prefs/markingMenus','scripts') def", "경로 반환 ''' scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") for path in scriptPaths: if path.find('markingMenus')", "maya.mel as mel from inspect import getsourcefile from os.path import abspath import maya.app.general.resourceBrowser", "getPathMaya2018Script(): u''' maya/2018/Script 경로 반환 ''' scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") for path in", "os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathMaya2018Script(): u''' maya/2018/Script 경로 반환 ''' scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\")", "os.environ['HOME'] else: home = os.environ['USERPROFILE'] return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathDocumentMaya_Add(path): u''' * Dos에서도", "def get_maya_AppDir(): ''' C:/Users/rationalcat/Documents/maya/ :return: ''' return cmds.internalVar(userAppDir=True) def getPathDocumentMaya(): u''' * Dos에서도", "C:/Users/rationalcat/Documents/maya/ ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] return maya_app_dir if os.environ.get('HOME'): home =", "폴더 포함 ''' file_list = [] for r, d, f in os.walk(_path): for", "file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list # 경로 ############################################################################# def getPathMayaFile(): u''' Maya파일의 절대경로 ''' #파일이름", "u''' * Dos에서도 잘 먹힘 C:/Users/rationalcat/Documents/maya/ ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] return", "return cmds.file(q=True, sn=True) def getNameMayaFile(): u''' Maya파일의 이름 ''' #파일이름 얻어오기 /로 나눠서", "cmds.file(q=True, sn=True).split('/')[-1] def getPathFindFile(_filename): u''' MAYA_SCRIPT_PATH안에 파일이 있으면 경로값을, 없으면 None을 반환 Script안", "_path.replace(\"\\\\\",\"/\") def clearOutput(): u''' Clear History for Script Editor ''' cmds.scriptEditorInfo(clearHistory=True) def forPrint(_list):", "scriptPaths: return None for path in scriptPaths: testPath = path +'/' + _filename", "sn=True) def getNameMayaFile(): u''' Maya파일의 이름 ''' #파일이름 얻어오기 /로 나눠서 맨끝에서 한칸", "MAYA_SCRIPT_PATH안에 파일이 있으면 경로값을, 없으면 None을 반환 Script안 서브폴더까지는 검색하지 않음 ''' #", "얻어오기 return cmds.file(q=True, sn=True) def getNameMayaFile(): u''' Maya파일의 이름 ''' #파일이름 얻어오기 /로", "Script Editor ''' cmds.scriptEditorInfo(clearHistory=True) def forPrint(_list): u''' 그냥 리스트를 나열해줍니다 ''' for item", "os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathDocumentMaya_Add(path): u''' * Dos에서도 잘 먹힘 User/Documents/Maya 폴더의 경로를 반환", "def getPathDocumentMaya_Add(path): u''' * Dos에서도 잘 먹힘 User/Documents/Maya 폴더의 경로를 반환 ''' if", "경우 만( 고로 같은 폴더 녀석들만 골라쥼) if r == _path: file_list.append(os.path.join(r,file).replace('\\\\','/')) return", "= os.environ['USERPROFILE'] return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathDocumentMaya_Add(path): u''' * Dos에서도 잘 먹힘 User/Documents/Maya", "getPathMayaFile(): u''' Maya파일의 절대경로 ''' #파일이름 포함 풀경로 얻어오기 return cmds.file(q=True, sn=True) def", "파일 리스트 얻기 def get_dirfiles(_path): file_list = [] for r, d, f in", "윈도우에서 바로 복사할 경우 \\를 /로 이쁘게 바꿔준다 아니면 경로 앞에 r을 붙여줘라", "import maya.mel as mel from inspect import getsourcefile from os.path import abspath import", "= os.environ['HOME'] else: home = os.environ['USERPROFILE'] if path != '': return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path)", "''' C:/Users/rationalcat/Documents/maya/ :return: ''' return cmds.internalVar(userAppDir=True) def getPathDocumentMaya(): u''' * Dos에서도 잘 먹힘", "History for Script Editor ''' cmds.scriptEditorInfo(clearHistory=True) def forPrint(_list): u''' 그냥 리스트를 나열해줍니다 '''", "for r, d, f in os.walk(_path): for file in f: #패스가 같을 경우", "않음 ''' # scriptPath = os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는 다른 방식 scriptPaths = mel.eval('getenv", "''' for item in _list: print item # 파일 리스트 얻기 def get_dirfiles(_path):", "############################################################################# def getMayaVersion(): u''' 마야의 버전을 반환합니다. 2018 이런식으로 int 값임 ''' return", "없으면 None을 반환 Script안 서브폴더까지는 검색하지 않음 ''' # scriptPath = os.environ['MAYA_SCRIPT_PATH'] 환경변수값", "# 정보 ############################################################################# def getMayaVersion(): u''' 마야의 버전을 반환합니다. 2018 이런식으로 int 값임", "def get_dirfiles_sub(_path): ''' sub 폴더 포함 ''' file_list = [] for r, d,", "_filename if os.path.exists(testPath): return testPath return None def get_maya_AppDir(): ''' C:/Users/rationalcat/Documents/maya/ :return: '''", "############################################################################# def getPathMayaFile(): u''' Maya파일의 절대경로 ''' #파일이름 포함 풀경로 얻어오기 return cmds.file(q=True,", "u''' 파일 경로를 윈도우에서 바로 복사할 경우 \\를 /로 이쁘게 바꿔준다 아니면 경로", "파일이 있으면 경로값을, 없으면 None을 반환 Script안 서브폴더까지는 검색하지 않음 ''' # scriptPath", "같은 폴더 녀석들만 골라쥼) if r == _path: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list def get_dirfiles_sub(_path):", "if os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] return os.path.realpath(os.path.join(home, 'Documents/maya')) def", "가져오는 다른 방식 scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") if not scriptPaths: return None for", "cmds.internalVar(userAppDir=True) def getPathDocumentMaya(): u''' * Dos에서도 잘 먹힘 C:/Users/rationalcat/Documents/maya/ ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir", "''' return (os.path.dirname(__file__).replace(\"\\\\\",\"/\")) # 정보 ############################################################################# def getMayaVersion(): u''' 마야의 버전을 반환합니다. 2018", "abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def get_module_dir_path(): ''' IMTool 폴더 :return: ''' return (os.path.dirname(__file__).replace(\"\\\\\",\"/\")) # 정보 #############################################################################", "-*- import sys, os import maya.cmds as cmds import maya.mel as mel from", "리스트를 나열해줍니다 ''' for item in _list: print item # 파일 리스트 얻기", "= path +'/' + _filename if os.path.exists(testPath): return testPath return None def get_maya_AppDir():", "''' return cmds.internalVar(userAppDir=True) def getPathDocumentMaya(): u''' * Dos에서도 잘 먹힘 C:/Users/rationalcat/Documents/maya/ ''' if", "path.find('markingMenus') != -1: return path.replace('prefs/markingMenus','scripts') def getPathIMTool(): u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") ''' #scriptPath =", "#파일이름 포함 풀경로 얻어오기 return cmds.file(q=True, sn=True) def getNameMayaFile(): u''' Maya파일의 이름 '''", "if os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] if path != '':", "getPathFindFile(_filename): u''' MAYA_SCRIPT_PATH안에 파일이 있으면 경로값을, 없으면 None을 반환 Script안 서브폴더까지는 검색하지 않음", "testPath = path +'/' + _filename if os.path.exists(testPath): return testPath return None def", "Maya파일의 절대경로 ''' #파일이름 포함 풀경로 얻어오기 return cmds.file(q=True, sn=True) def getNameMayaFile(): u'''", "os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path) else: return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathMaya2018Script(): u''' maya/2018/Script 경로 반환 '''", "def get_module_dir_path(): ''' IMTool 폴더 :return: ''' return (os.path.dirname(__file__).replace(\"\\\\\",\"/\")) # 정보 ############################################################################# def", "폴더의 경로를 반환 ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] if path != '':", "포함 풀경로 얻어오기 return cmds.file(q=True, sn=True) def getNameMayaFile(): u''' Maya파일의 이름 ''' #파일이름", "r, d, f in os.walk(_path): for file in f: #패스가 같을 경우 만(", "mel from inspect import getsourcefile from os.path import abspath import maya.app.general.resourceBrowser as resourceBrowser", "if r == _path: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list def get_dirfiles_sub(_path): ''' sub 폴더 포함", "r == _path: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list def get_dirfiles_sub(_path): ''' sub 폴더 포함 '''", "''' scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") for path in scriptPaths: if path.find('markingMenus') != -1:", "<reponame>naong2/MayaPython #-*- coding: utf-8 -*- import sys, os import maya.cmds as cmds import", "coding: utf-8 -*- import sys, os import maya.cmds as cmds import maya.mel as", "for r, d, f in os.walk(_path): for file in f: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list", "= [] for r, d, f in os.walk(_path): for file in f: #패스가", "C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") ''' #scriptPath = scriptPath.replace('/IMUtility/common.py','') return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def get_module_dir_path(): ''' IMTool 폴더", "home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathDocumentMaya_Add(path): u'''", "경우 \\를 /로 이쁘게 바꿔준다 아니면 경로 앞에 r을 붙여줘라 ''' return _path.replace(\"\\\\\",\"/\")", "= [] for r, d, f in os.walk(_path): for file in f: file_list.append(os.path.join(r,file).replace('\\\\','/'))", "return path.replace('prefs/markingMenus','scripts') def getPathIMTool(): u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") ''' #scriptPath = scriptPath.replace('/IMUtility/common.py','') return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','')", "= mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") if not scriptPaths: return None for path in scriptPaths: testPath", "def getPathFindFile(_filename): u''' MAYA_SCRIPT_PATH안에 파일이 있으면 경로값을, 없으면 None을 반환 Script안 서브폴더까지는 검색하지", "home = os.environ['USERPROFILE'] if path != '': return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path) else: return os.path.realpath(os.path.join(home,", "item # 파일 리스트 얻기 def get_dirfiles(_path): file_list = [] for r, d,", "maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] return os.path.realpath(os.path.join(home, 'Documents/maya'))", "# 경로 ############################################################################# def getPathMayaFile(): u''' Maya파일의 절대경로 ''' #파일이름 포함 풀경로 얻어오기", "in os.walk(_path): for file in f: #패스가 같을 경우 만( 고로 같은 폴더", "u''' Clear History for Script Editor ''' cmds.scriptEditorInfo(clearHistory=True) def forPrint(_list): u''' 그냥 리스트를", "경로를 반환 ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] if path != '': return", "/로 나눠서 맨끝에서 한칸 읽어오기(unicode) return cmds.file(q=True, sn=True).split('/')[-1] def getPathFindFile(_filename): u''' MAYA_SCRIPT_PATH안에 파일이", "''' IMTool 폴더 :return: ''' return (os.path.dirname(__file__).replace(\"\\\\\",\"/\")) # 정보 ############################################################################# def getMayaVersion(): u'''", "IMTool 폴더 :return: ''' return (os.path.dirname(__file__).replace(\"\\\\\",\"/\")) # 정보 ############################################################################# def getMayaVersion(): u''' 마야의", "있으면 경로값을, 없으면 None을 반환 Script안 서브폴더까지는 검색하지 않음 ''' # scriptPath =", "나열해줍니다 ''' for item in _list: print item # 파일 리스트 얻기 def", "scriptPaths: if path.find('markingMenus') != -1: return path.replace('prefs/markingMenus','scripts') def getPathIMTool(): u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") '''", "''' file_list = [] for r, d, f in os.walk(_path): for file in", "Dos에서도 잘 먹힘 C:/Users/rationalcat/Documents/maya/ ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] return maya_app_dir if", "다른 방식 scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") if not scriptPaths: return None for path", "잘 먹힘 C:/Users/rationalcat/Documents/maya/ ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] return maya_app_dir if os.environ.get('HOME'):", "import maya.cmds as cmds import maya.mel as mel from inspect import getsourcefile from", "def getPathMayaFile(): u''' Maya파일의 절대경로 ''' #파일이름 포함 풀경로 얻어오기 return cmds.file(q=True, sn=True)", "path != '': return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path) else: return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathMaya2018Script(): u'''", "C:/Users/rationalcat/Documents/maya/ :return: ''' return cmds.internalVar(userAppDir=True) def getPathDocumentMaya(): u''' * Dos에서도 잘 먹힘 C:/Users/rationalcat/Documents/maya/", "return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathDocumentMaya_Add(path): u''' * Dos에서도 잘 먹힘 User/Documents/Maya 폴더의 경로를", "home = os.environ['USERPROFILE'] return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathDocumentMaya_Add(path): u''' * Dos에서도 잘 먹힘", "os.environ['USERPROFILE'] if path != '': return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path) else: return os.path.realpath(os.path.join(home, 'Documents/maya')) def", "if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else:", "os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] if path != '': return", "반환 ''' scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") for path in scriptPaths: if path.find('markingMenus') !=", "getsourcefile from os.path import abspath import maya.app.general.resourceBrowser as resourceBrowser # 기능 ############################################################################# def", "os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는 다른 방식 scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") if not scriptPaths: return", "return (os.path.dirname(__file__).replace(\"\\\\\",\"/\")) # 정보 ############################################################################# def getMayaVersion(): u''' 마야의 버전을 반환합니다. 2018 이런식으로", "def convertSlash(_path): u''' 파일 경로를 윈도우에서 바로 복사할 경우 \\를 /로 이쁘게 바꿔준다", "버전을 반환합니다. 2018 이런식으로 int 값임 ''' return int(cmds.about(v = True).split(\"-\")[0].split(\" \")[0]) def", "경로 앞에 r을 붙여줘라 ''' return _path.replace(\"\\\\\",\"/\") def clearOutput(): u''' Clear History for", "clearOutput(): u''' Clear History for Script Editor ''' cmds.scriptEditorInfo(clearHistory=True) def forPrint(_list): u''' 그냥", "import maya.app.general.resourceBrowser as resourceBrowser # 기능 ############################################################################# def convertSlash(_path): u''' 파일 경로를 윈도우에서", "if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] if path != '': return os.path.join(maya_app_dir, path) else:", "os.environ['USERPROFILE'] return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathDocumentMaya_Add(path): u''' * Dos에서도 잘 먹힘 User/Documents/Maya 폴더의", "= True).split(\"-\")[0].split(\" \")[0]) def get_resource_path(): resource_browser = resourceBrowser.resourceBrowser() resource_path = resource_browser.run() return resource_path", "맨끝에서 한칸 읽어오기(unicode) return cmds.file(q=True, sn=True).split('/')[-1] def getPathFindFile(_filename): u''' MAYA_SCRIPT_PATH안에 파일이 있으면 경로값을,", "in f: #패스가 같을 경우 만( 고로 같은 폴더 녀석들만 골라쥼) if r", "환경변수값 가져오는 다른 방식 scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") if not scriptPaths: return None", "''' return int(cmds.about(v = True).split(\"-\")[0].split(\" \")[0]) def get_resource_path(): resource_browser = resourceBrowser.resourceBrowser() resource_path =", "d, f in os.walk(_path): for file in f: #패스가 같을 경우 만( 고로", "#scriptPath = scriptPath.replace('/IMUtility/common.py','') return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def get_module_dir_path(): ''' IMTool 폴더 :return: ''' return", "os.environ['MAYA_APP_DIR'] return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] return", "''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME']", "검색하지 않음 ''' # scriptPath = os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는 다른 방식 scriptPaths =", "None을 반환 Script안 서브폴더까지는 검색하지 않음 ''' # scriptPath = os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는", "scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") for path in scriptPaths: if path.find('markingMenus') != -1: return", "'Documents/maya')) def getPathDocumentMaya_Add(path): u''' * Dos에서도 잘 먹힘 User/Documents/Maya 폴더의 경로를 반환 '''", "절대경로 ''' #파일이름 포함 풀경로 얻어오기 return cmds.file(q=True, sn=True) def getNameMayaFile(): u''' Maya파일의", "Editor ''' cmds.scriptEditorInfo(clearHistory=True) def forPrint(_list): u''' 그냥 리스트를 나열해줍니다 ''' for item in", "= scriptPath.replace('/IMUtility/common.py','') return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def get_module_dir_path(): ''' IMTool 폴더 :return: ''' return (os.path.dirname(__file__).replace(\"\\\\\",\"/\"))", "고로 같은 폴더 녀석들만 골라쥼) if r == _path: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list def", "os.environ['HOME'] else: home = os.environ['USERPROFILE'] if path != '': return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path) else:", "'Documents/maya')),path) else: return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathMaya2018Script(): u''' maya/2018/Script 경로 반환 ''' scriptPaths", "file_list def get_dirfiles_sub(_path): ''' sub 폴더 포함 ''' file_list = [] for r,", "= os.environ['HOME'] else: home = os.environ['USERPROFILE'] return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathDocumentMaya_Add(path): u''' *", "\\를 /로 이쁘게 바꿔준다 아니면 경로 앞에 r을 붙여줘라 ''' return _path.replace(\"\\\\\",\"/\") def", "in scriptPaths: testPath = path +'/' + _filename if os.path.exists(testPath): return testPath return", "def getPathMaya2018Script(): u''' maya/2018/Script 경로 반환 ''' scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") for path", "def getPathDocumentMaya(): u''' * Dos에서도 잘 먹힘 C:/Users/rationalcat/Documents/maya/ ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir =", "None def get_maya_AppDir(): ''' C:/Users/rationalcat/Documents/maya/ :return: ''' return cmds.internalVar(userAppDir=True) def getPathDocumentMaya(): u''' *", "convertSlash(_path): u''' 파일 경로를 윈도우에서 바로 복사할 경우 \\를 /로 이쁘게 바꿔준다 아니면", "scriptPaths: testPath = path +'/' + _filename if os.path.exists(testPath): return testPath return None", "path in scriptPaths: if path.find('markingMenus') != -1: return path.replace('prefs/markingMenus','scripts') def getPathIMTool(): u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py", "= os.environ['USERPROFILE'] if path != '': return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path) else: return os.path.realpath(os.path.join(home, 'Documents/maya'))", "return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] return os.path.realpath(os.path.join(home,", "''' return _path.replace(\"\\\\\",\"/\") def clearOutput(): u''' Clear History for Script Editor ''' cmds.scriptEditorInfo(clearHistory=True)", "for path in scriptPaths: if path.find('markingMenus') != -1: return path.replace('prefs/markingMenus','scripts') def getPathIMTool(): u'''", ":return: ''' return (os.path.dirname(__file__).replace(\"\\\\\",\"/\")) # 정보 ############################################################################# def getMayaVersion(): u''' 마야의 버전을 반환합니다.", "home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] if path != '': return os.path.join(os.path.realpath(os.path.join(home,", "getPathIMTool(): u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") ''' #scriptPath = scriptPath.replace('/IMUtility/common.py','') return abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\").replace('/IMUtil.py','') def get_module_dir_path(): '''", "if os.path.exists(testPath): return testPath return None def get_maya_AppDir(): ''' C:/Users/rationalcat/Documents/maya/ :return: ''' return", "'Documents/maya')) def getPathMaya2018Script(): u''' maya/2018/Script 경로 반환 ''' scriptPaths = mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") for", "os import maya.cmds as cmds import maya.mel as mel from inspect import getsourcefile", "as mel from inspect import getsourcefile from os.path import abspath import maya.app.general.resourceBrowser as", "getPathDocumentMaya_Add(path): u''' * Dos에서도 잘 먹힘 User/Documents/Maya 폴더의 경로를 반환 ''' if os.environ.get('MAYA_APP_DIR'):", "return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path) else: return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathMaya2018Script(): u''' maya/2018/Script 경로 반환", "#패스가 같을 경우 만( 고로 같은 폴더 녀석들만 골라쥼) if r == _path:", "2018 이런식으로 int 값임 ''' return int(cmds.about(v = True).split(\"-\")[0].split(\" \")[0]) def get_resource_path(): resource_browser", "getNameMayaFile(): u''' Maya파일의 이름 ''' #파일이름 얻어오기 /로 나눠서 맨끝에서 한칸 읽어오기(unicode) return", "file_list = [] for r, d, f in os.walk(_path): for file in f:", "Maya파일의 이름 ''' #파일이름 얻어오기 /로 나눠서 맨끝에서 한칸 읽어오기(unicode) return cmds.file(q=True, sn=True).split('/')[-1]", "maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] if path !=", "for path in scriptPaths: testPath = path +'/' + _filename if os.path.exists(testPath): return", "반환 Script안 서브폴더까지는 검색하지 않음 ''' # scriptPath = os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는 다른", "from inspect import getsourcefile from os.path import abspath import maya.app.general.resourceBrowser as resourceBrowser #", "def clearOutput(): u''' Clear History for Script Editor ''' cmds.scriptEditorInfo(clearHistory=True) def forPrint(_list): u'''", "if path.find('markingMenus') != -1: return path.replace('prefs/markingMenus','scripts') def getPathIMTool(): u''' C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py abspath(getsourcefile(lambda:0)).replace(\"\\\\\",\"/\") ''' #scriptPath", "in _list: print item # 파일 리스트 얻기 def get_dirfiles(_path): file_list = []", "os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home", "''' cmds.scriptEditorInfo(clearHistory=True) def forPrint(_list): u''' 그냥 리스트를 나열해줍니다 ''' for item in _list:", "포함 ''' file_list = [] for r, d, f in os.walk(_path): for file", "녀석들만 골라쥼) if r == _path: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list def get_dirfiles_sub(_path): ''' sub", "만( 고로 같은 폴더 녀석들만 골라쥼) if r == _path: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list", "#파일이름 얻어오기 /로 나눠서 맨끝에서 한칸 읽어오기(unicode) return cmds.file(q=True, sn=True).split('/')[-1] def getPathFindFile(_filename): u'''", "cmds import maya.mel as mel from inspect import getsourcefile from os.path import abspath", "폴더 :return: ''' return (os.path.dirname(__file__).replace(\"\\\\\",\"/\")) # 정보 ############################################################################# def getMayaVersion(): u''' 마야의 버전을", "int 값임 ''' return int(cmds.about(v = True).split(\"-\")[0].split(\" \")[0]) def get_resource_path(): resource_browser = resourceBrowser.resourceBrowser()", "############################################################################# def convertSlash(_path): u''' 파일 경로를 윈도우에서 바로 복사할 경우 \\를 /로 이쁘게", "나눠서 맨끝에서 한칸 읽어오기(unicode) return cmds.file(q=True, sn=True).split('/')[-1] def getPathFindFile(_filename): u''' MAYA_SCRIPT_PATH안에 파일이 있으면", "Dos에서도 잘 먹힘 User/Documents/Maya 폴더의 경로를 반환 ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR']", "os.path.exists(testPath): return testPath return None def get_maya_AppDir(): ''' C:/Users/rationalcat/Documents/maya/ :return: ''' return cmds.internalVar(userAppDir=True)", "#-*- coding: utf-8 -*- import sys, os import maya.cmds as cmds import maya.mel", "Clear History for Script Editor ''' cmds.scriptEditorInfo(clearHistory=True) def forPrint(_list): u''' 그냥 리스트를 나열해줍니다", "print item # 파일 리스트 얻기 def get_dirfiles(_path): file_list = [] for r,", "!= '': return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path) else: return os.path.realpath(os.path.join(home, 'Documents/maya')) def getPathMaya2018Script(): u''' maya/2018/Script", "\"MAYA_SCRIPT_PATH\"').split(\";\") if not scriptPaths: return None for path in scriptPaths: testPath = path", "import sys, os import maya.cmds as cmds import maya.mel as mel from inspect", "as cmds import maya.mel as mel from inspect import getsourcefile from os.path import", "f: #패스가 같을 경우 만( 고로 같은 폴더 녀석들만 골라쥼) if r ==", "'': return os.path.join(maya_app_dir, path) else: return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else:", "os.walk(_path): for file in f: #패스가 같을 경우 만( 고로 같은 폴더 녀석들만", "as resourceBrowser # 기능 ############################################################################# def convertSlash(_path): u''' 파일 경로를 윈도우에서 바로 복사할", "for file in f: #패스가 같을 경우 만( 고로 같은 폴더 녀석들만 골라쥼)", "풀경로 얻어오기 return cmds.file(q=True, sn=True) def getNameMayaFile(): u''' Maya파일의 이름 ''' #파일이름 얻어오기", "path in scriptPaths: testPath = path +'/' + _filename if os.path.exists(testPath): return testPath", "in f: file_list.append(os.path.join(r,file).replace('\\\\','/')) return file_list # 경로 ############################################################################# def getPathMayaFile(): u''' Maya파일의 절대경로", "= mel.eval('getenv \"MAYA_SCRIPT_PATH\"').split(\";\") for path in scriptPaths: if path.find('markingMenus') != -1: return path.replace('prefs/markingMenus','scripts')", "def forPrint(_list): u''' 그냥 리스트를 나열해줍니다 ''' for item in _list: print item", "User/Documents/Maya 폴더의 경로를 반환 ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] if path !=", "getMayaVersion(): u''' 마야의 버전을 반환합니다. 2018 이런식으로 int 값임 ''' return int(cmds.about(v =", "앞에 r을 붙여줘라 ''' return _path.replace(\"\\\\\",\"/\") def clearOutput(): u''' Clear History for Script", "abspath import maya.app.general.resourceBrowser as resourceBrowser # 기능 ############################################################################# def convertSlash(_path): u''' 파일 경로를", "먹힘 C:/Users/rationalcat/Documents/maya/ ''' if os.environ.get('MAYA_APP_DIR'): maya_app_dir = os.environ['MAYA_APP_DIR'] return maya_app_dir if os.environ.get('HOME'): home", "else: return maya_app_dir if os.environ.get('HOME'): home = os.environ['HOME'] else: home = os.environ['USERPROFILE'] if" ]
[ "django.apps import AppConfig class AppConfigTrade(AppConfig): name = \"trades\" def ready(self): from trades.signals.post_save import", "from django.apps import AppConfig class AppConfigTrade(AppConfig): name = \"trades\" def ready(self): from trades.signals.post_save", "import AppConfig class AppConfigTrade(AppConfig): name = \"trades\" def ready(self): from trades.signals.post_save import post_save_hashid" ]
[ "program has taken') print('given the fact that I know it used MT19937!') print('\\nNow", "super-insecure randomness using timestamp!') else: print('Huh? That\\'s all you have? The real timestamp", "def main(): start_time = time.time() print('Pending...') delay(random.randint(40,1000)) timestamp = int(time.time()) seed_mt(timestamp) rand =", "using timestamp!') else: print('Huh? That\\'s all you have? The real timestamp is '", "of the RNG: ' + str(rand)) print('\\nNow I will try to discover the", "you have? The real timestamp is ' + str(timestamp)) print('Poor you.') time_elapsed =", "I will try to discover the seed the program has taken') print('given the", "Ctrl + C được, còn time.sleep() thì không start = time.time() while time.time()", "= time.time() while time.time() - start < seconds: pass def main(): start_time =", "to discover the seed the program has taken') print('given the fact that I", "thì không start = time.time() while time.time() - start < seconds: pass def", "have broken my super-insecure randomness using timestamp!') else: print('Huh? That\\'s all you have?", "the fact that I know it used MT19937!') print('\\nNow cracking...') test = int(time.time())", "try to discover the seed the program has taken') print('given the fact that", "time.time() - start < seconds: pass def main(): start_time = time.time() print('Pending...') delay(random.randint(40,1000))", "is ' + str(test) + ', isn\\'t it?') if test == timestamp: print('Congratulation!", "= extract_number() delay(random.randint(40,1000)) print('\\nFirst output of the RNG: ' + str(rand)) print('\\nNow I", "start = time.time() while time.time() - start < seconds: pass def main(): start_time", "int(time.time()) seed_mt(timestamp) rand = extract_number() delay(random.randint(40,1000)) print('\\nFirst output of the RNG: ' +", "extract_number() print('Haha, the time seed is ' + str(test) + ', isn\\'t it?')", "extract_number() while first != rand: test -= 1 seed_mt(test) first = extract_number() print('Haha,", "+ str(timestamp)) print('Poor you.') time_elapsed = time.time() - start_time print('Time elapsed: ' +", "- start < seconds: pass def main(): start_time = time.time() print('Pending...') delay(random.randint(40,1000)) timestamp", "seed_mt from MT19937 import extract_number def delay( seconds ): #delay thì Ctrl +", "seconds: pass def main(): start_time = time.time() print('Pending...') delay(random.randint(40,1000)) timestamp = int(time.time()) seed_mt(timestamp)", "print('Haha, the time seed is ' + str(test) + ', isn\\'t it?') if", "that I know it used MT19937!') print('\\nNow cracking...') test = int(time.time()) seed_mt(test) first", "first = extract_number() while first != rand: test -= 1 seed_mt(test) first =", "the RNG: ' + str(rand)) print('\\nNow I will try to discover the seed", "it used MT19937!') print('\\nNow cracking...') test = int(time.time()) seed_mt(test) first = extract_number() while", "That\\'s all you have? The real timestamp is ' + str(timestamp)) print('Poor you.')", "extract_number() delay(random.randint(40,1000)) print('\\nFirst output of the RNG: ' + str(rand)) print('\\nNow I will", "-= 1 seed_mt(test) first = extract_number() print('Haha, the time seed is ' +", "print('Pending...') delay(random.randint(40,1000)) timestamp = int(time.time()) seed_mt(timestamp) rand = extract_number() delay(random.randint(40,1000)) print('\\nFirst output of", "pass def main(): start_time = time.time() print('Pending...') delay(random.randint(40,1000)) timestamp = int(time.time()) seed_mt(timestamp) rand", "+ str(rand)) print('\\nNow I will try to discover the seed the program has", "start_time = time.time() print('Pending...') delay(random.randint(40,1000)) timestamp = int(time.time()) seed_mt(timestamp) rand = extract_number() delay(random.randint(40,1000))", "main(): start_time = time.time() print('Pending...') delay(random.randint(40,1000)) timestamp = int(time.time()) seed_mt(timestamp) rand = extract_number()", "have? The real timestamp is ' + str(timestamp)) print('Poor you.') time_elapsed = time.time()", "= extract_number() print('Haha, the time seed is ' + str(test) + ', isn\\'t", "import random from MT19937 import seed_mt from MT19937 import extract_number def delay( seconds", "import time import random from MT19937 import seed_mt from MT19937 import extract_number def", "all you have? The real timestamp is ' + str(timestamp)) print('Poor you.') time_elapsed", "broken my super-insecure randomness using timestamp!') else: print('Huh? That\\'s all you have? The", "print('\\nNow cracking...') test = int(time.time()) seed_mt(test) first = extract_number() while first != rand:", "delay(random.randint(40,1000)) timestamp = int(time.time()) seed_mt(timestamp) rand = extract_number() delay(random.randint(40,1000)) print('\\nFirst output of the", "MT19937!') print('\\nNow cracking...') test = int(time.time()) seed_mt(test) first = extract_number() while first !=", "RNG: ' + str(rand)) print('\\nNow I will try to discover the seed the", "print('\\nFirst output of the RNG: ' + str(rand)) print('\\nNow I will try to", "1 seed_mt(test) first = extract_number() print('Haha, the time seed is ' + str(test)", "', isn\\'t it?') if test == timestamp: print('Congratulation! You have broken my super-insecure", "the program has taken') print('given the fact that I know it used MT19937!')", "< seconds: pass def main(): start_time = time.time() print('Pending...') delay(random.randint(40,1000)) timestamp = int(time.time())", "know it used MT19937!') print('\\nNow cracking...') test = int(time.time()) seed_mt(test) first = extract_number()", "import seed_mt from MT19937 import extract_number def delay( seconds ): #delay thì Ctrl", "print('Huh? That\\'s all you have? The real timestamp is ' + str(timestamp)) print('Poor", "MT19937 import extract_number def delay( seconds ): #delay thì Ctrl + C được,", "isn\\'t it?') if test == timestamp: print('Congratulation! You have broken my super-insecure randomness", "The real timestamp is ' + str(timestamp)) print('Poor you.') time_elapsed = time.time() -", "while first != rand: test -= 1 seed_mt(test) first = extract_number() print('Haha, the", "): #delay thì Ctrl + C được, còn time.sleep() thì không start =", "output of the RNG: ' + str(rand)) print('\\nNow I will try to discover", "I know it used MT19937!') print('\\nNow cracking...') test = int(time.time()) seed_mt(test) first =", "str(test) + ', isn\\'t it?') if test == timestamp: print('Congratulation! You have broken", "= time.time() - start_time print('Time elapsed: ' + str(time_elapsed)) if __name__ == '__main__':", "timestamp: print('Congratulation! You have broken my super-insecure randomness using timestamp!') else: print('Huh? That\\'s", "if test == timestamp: print('Congratulation! You have broken my super-insecure randomness using timestamp!')", "timestamp is ' + str(timestamp)) print('Poor you.') time_elapsed = time.time() - start_time print('Time", "test == timestamp: print('Congratulation! You have broken my super-insecure randomness using timestamp!') else:", "randomness using timestamp!') else: print('Huh? That\\'s all you have? The real timestamp is", "seed is ' + str(test) + ', isn\\'t it?') if test == timestamp:", "from MT19937 import extract_number def delay( seconds ): #delay thì Ctrl + C", "time import random from MT19937 import seed_mt from MT19937 import extract_number def delay(", "được, còn time.sleep() thì không start = time.time() while time.time() - start <", "= int(time.time()) seed_mt(test) first = extract_number() while first != rand: test -= 1", "timestamp = int(time.time()) seed_mt(timestamp) rand = extract_number() delay(random.randint(40,1000)) print('\\nFirst output of the RNG:", "it?') if test == timestamp: print('Congratulation! You have broken my super-insecure randomness using", "extract_number def delay( seconds ): #delay thì Ctrl + C được, còn time.sleep()", "delay( seconds ): #delay thì Ctrl + C được, còn time.sleep() thì không", "print('\\nNow I will try to discover the seed the program has taken') print('given", "you.') time_elapsed = time.time() - start_time print('Time elapsed: ' + str(time_elapsed)) if __name__", "print('given the fact that I know it used MT19937!') print('\\nNow cracking...') test =", "str(rand)) print('\\nNow I will try to discover the seed the program has taken')", "= extract_number() while first != rand: test -= 1 seed_mt(test) first = extract_number()", "không start = time.time() while time.time() - start < seconds: pass def main():", "str(timestamp)) print('Poor you.') time_elapsed = time.time() - start_time print('Time elapsed: ' + str(time_elapsed))", "time.time() - start_time print('Time elapsed: ' + str(time_elapsed)) if __name__ == '__main__': main()", "You have broken my super-insecure randomness using timestamp!') else: print('Huh? That\\'s all you", "+ ', isn\\'t it?') if test == timestamp: print('Congratulation! You have broken my", "while time.time() - start < seconds: pass def main(): start_time = time.time() print('Pending...')", "time.time() while time.time() - start < seconds: pass def main(): start_time = time.time()", "seconds ): #delay thì Ctrl + C được, còn time.sleep() thì không start", "the time seed is ' + str(test) + ', isn\\'t it?') if test", "seed the program has taken') print('given the fact that I know it used", "is ' + str(timestamp)) print('Poor you.') time_elapsed = time.time() - start_time print('Time elapsed:", "' + str(test) + ', isn\\'t it?') if test == timestamp: print('Congratulation! You", "from MT19937 import seed_mt from MT19937 import extract_number def delay( seconds ): #delay", "int(time.time()) seed_mt(test) first = extract_number() while first != rand: test -= 1 seed_mt(test)", "seed_mt(timestamp) rand = extract_number() delay(random.randint(40,1000)) print('\\nFirst output of the RNG: ' + str(rand))", "delay(random.randint(40,1000)) print('\\nFirst output of the RNG: ' + str(rand)) print('\\nNow I will try", "time.time() print('Pending...') delay(random.randint(40,1000)) timestamp = int(time.time()) seed_mt(timestamp) rand = extract_number() delay(random.randint(40,1000)) print('\\nFirst output", "' + str(rand)) print('\\nNow I will try to discover the seed the program", "seed_mt(test) first = extract_number() print('Haha, the time seed is ' + str(test) +", "time_elapsed = time.time() - start_time print('Time elapsed: ' + str(time_elapsed)) if __name__ ==", "my super-insecure randomness using timestamp!') else: print('Huh? That\\'s all you have? The real", "will try to discover the seed the program has taken') print('given the fact", "thì Ctrl + C được, còn time.sleep() thì không start = time.time() while", "rand: test -= 1 seed_mt(test) first = extract_number() print('Haha, the time seed is", "timestamp!') else: print('Huh? That\\'s all you have? The real timestamp is ' +", "start < seconds: pass def main(): start_time = time.time() print('Pending...') delay(random.randint(40,1000)) timestamp =", "test -= 1 seed_mt(test) first = extract_number() print('Haha, the time seed is '", "time.sleep() thì không start = time.time() while time.time() - start < seconds: pass", "MT19937 import seed_mt from MT19937 import extract_number def delay( seconds ): #delay thì", "#delay thì Ctrl + C được, còn time.sleep() thì không start = time.time()", "first != rand: test -= 1 seed_mt(test) first = extract_number() print('Haha, the time", "has taken') print('given the fact that I know it used MT19937!') print('\\nNow cracking...')", "test = int(time.time()) seed_mt(test) first = extract_number() while first != rand: test -=", "taken') print('given the fact that I know it used MT19937!') print('\\nNow cracking...') test", "+ str(test) + ', isn\\'t it?') if test == timestamp: print('Congratulation! You have", "== timestamp: print('Congratulation! You have broken my super-insecure randomness using timestamp!') else: print('Huh?", "print('Congratulation! You have broken my super-insecure randomness using timestamp!') else: print('Huh? That\\'s all", "the seed the program has taken') print('given the fact that I know it", "used MT19937!') print('\\nNow cracking...') test = int(time.time()) seed_mt(test) first = extract_number() while first", "rand = extract_number() delay(random.randint(40,1000)) print('\\nFirst output of the RNG: ' + str(rand)) print('\\nNow", "còn time.sleep() thì không start = time.time() while time.time() - start < seconds:", "!= rand: test -= 1 seed_mt(test) first = extract_number() print('Haha, the time seed", "random from MT19937 import seed_mt from MT19937 import extract_number def delay( seconds ):", "+ C được, còn time.sleep() thì không start = time.time() while time.time() -", "first = extract_number() print('Haha, the time seed is ' + str(test) + ',", "= time.time() print('Pending...') delay(random.randint(40,1000)) timestamp = int(time.time()) seed_mt(timestamp) rand = extract_number() delay(random.randint(40,1000)) print('\\nFirst", "def delay( seconds ): #delay thì Ctrl + C được, còn time.sleep() thì", "import extract_number def delay( seconds ): #delay thì Ctrl + C được, còn", "seed_mt(test) first = extract_number() while first != rand: test -= 1 seed_mt(test) first", "else: print('Huh? That\\'s all you have? The real timestamp is ' + str(timestamp))", "C được, còn time.sleep() thì không start = time.time() while time.time() - start", "fact that I know it used MT19937!') print('\\nNow cracking...') test = int(time.time()) seed_mt(test)", "cracking...') test = int(time.time()) seed_mt(test) first = extract_number() while first != rand: test", "' + str(timestamp)) print('Poor you.') time_elapsed = time.time() - start_time print('Time elapsed: '", "= int(time.time()) seed_mt(timestamp) rand = extract_number() delay(random.randint(40,1000)) print('\\nFirst output of the RNG: '", "time seed is ' + str(test) + ', isn\\'t it?') if test ==", "real timestamp is ' + str(timestamp)) print('Poor you.') time_elapsed = time.time() - start_time", "print('Poor you.') time_elapsed = time.time() - start_time print('Time elapsed: ' + str(time_elapsed)) if", "discover the seed the program has taken') print('given the fact that I know" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "var to a port of a socket server. See `guild.cmd_notify` for details. \"\"\"", "from guild import log from guild import util def main(args): _init_logging(args) config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args))", "KIND, either express or implied. # See the License for the specific language", "def _cwd(args): return _validated_dir(args.cwd) def _guild_home(args): return _validated_dir(args.guild_home, abs=True, create=True, guild_nocopy=True) def _validated_dir(path,", "Unless required by applicable law or agreed to in writing, software # distributed", "of Guild commands. Currently Guild supports one handler type - socket notification of", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "License. # You may obtain a copy of the License at # #", "PBC # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "'%s' does not exist\" % path) if not os.path.isdir(path): cli.error(\"'%s' is not a", "a directory\" % path) if guild_nocopy: util.ensure_file(os.path.join(path, \".guild-nocopy\")) return path def _apply_guild_patch(): \"\"\"Look", "to respond to start and stop of Guild commands. Currently Guild supports one", "to a port of a socket server. See `guild.cmd_notify` for details. \"\"\" _maybe_register_cmd_notify()", "os.path.exists(patch_path): from guild import python_util python_util.exec_script(patch_path) def _register_cmd_context_handlers(): \"\"\"Register command context handlers. Command", "from guild import python_util python_util.exec_script(patch_path) def _register_cmd_context_handlers(): \"\"\"Register command context handlers. Command context", "does not exist\" % path) if not os.path.isdir(path): cli.error(\"'%s' is not a directory\"", "def _apply_guild_patch(): \"\"\"Look in config cwd for guild_patch.py and load if exists.\"\"\" patch_path", "or logging.INFO log.init_logging(log_level) log.disable_noisy_loggers(log_level) def _cwd(args): return _validated_dir(args.cwd) def _guild_home(args): return _validated_dir(args.guild_home, abs=True,", "law or agreed to in writing, software # distributed under the License is", "= os.path.abspath(path) if not os.path.exists(path): if create: util.ensure_dir(path) else: cli.error(\"directory '%s' does not", "def _register_cmd_context_handlers(): \"\"\"Register command context handlers. Command context handlers can be used to", "the License for the specific language governing permissions and # limitations under the", "return None try: return int(port) except ValueError: raise SystemExit( \"invalid value for GUILD_CMD_NOTIFY_PORT", "import log from guild import util def main(args): _init_logging(args) config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args)) _apply_guild_patch() _register_cmd_context_handlers()", "logging.INFO log.init_logging(log_level) log.disable_noisy_loggers(log_level) def _cwd(args): return _validated_dir(args.cwd) def _guild_home(args): return _validated_dir(args.guild_home, abs=True, create=True,", "compliance with the License. # You may obtain a copy of the License", "create=False, guild_nocopy=False): path = os.path.expanduser(path) if abs: path = os.path.abspath(path) if not os.path.exists(path):", "os.path.join(config.cwd(), \"guild_patch.py\") if os.path.exists(patch_path): from guild import python_util python_util.exec_script(patch_path) def _register_cmd_context_handlers(): \"\"\"Register command", "abs=False, create=False, guild_nocopy=False): path = os.path.expanduser(path) if abs: path = os.path.abspath(path) if not", "import logging import os from guild import cli from guild import config from", "from guild import config from guild import log from guild import util def", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "port = os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if not port: return None try: return int(port) except ValueError:", "this file except in compliance with the License. # You may obtain a", "used to respond to start and stop of Guild commands. Currently Guild supports", "and stop of Guild commands. Currently Guild supports one handler type - socket", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "if guild_nocopy: util.ensure_file(os.path.join(path, \".guild-nocopy\")) return path def _apply_guild_patch(): \"\"\"Look in config cwd for", "_apply_guild_patch() _register_cmd_context_handlers() def _init_logging(args): log_level = args.log_level or logging.INFO log.init_logging(log_level) log.disable_noisy_loggers(log_level) def _cwd(args):", "\"\"\" _maybe_register_cmd_notify() def _maybe_register_cmd_notify(): port = _try_cmd_notify_port() if port: from guild import cmd_notify", "you may not use this file except in compliance with the License. #", "for the specific language governing permissions and # limitations under the License. import", "of command info. This can be used to monitor Guild commands by setting", "a socket server. See `guild.cmd_notify` for details. \"\"\" _maybe_register_cmd_notify() def _maybe_register_cmd_notify(): port =", "from guild import cmd_notify cmd_notify.init_cmd_context_handler(port) def _try_cmd_notify_port(): port = os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if not port:", "not a directory\" % path) if guild_nocopy: util.ensure_file(os.path.join(path, \".guild-nocopy\")) return path def _apply_guild_patch():", "= os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if not port: return None try: return int(port) except ValueError: raise", "\".guild-nocopy\")) return path def _apply_guild_patch(): \"\"\"Look in config cwd for guild_patch.py and load", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "start and stop of Guild commands. Currently Guild supports one handler type -", "from guild import util def main(args): _init_logging(args) config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args)) _apply_guild_patch() _register_cmd_context_handlers() def _init_logging(args):", "if abs: path = os.path.abspath(path) if not os.path.exists(path): if create: util.ensure_dir(path) else: cli.error(\"directory", "guild_patch.py and load if exists.\"\"\" patch_path = os.path.join(config.cwd(), \"guild_patch.py\") if os.path.exists(patch_path): from guild", "ANY KIND, either express or implied. # See the License for the specific", "specific language governing permissions and # limitations under the License. import logging import", "Guild supports one handler type - socket notification of command info. This can", "\"invalid value for GUILD_CMD_NOTIFY_PORT %r: must \" \"be a valid numeric port\" %", "port: from guild import cmd_notify cmd_notify.init_cmd_context_handler(port) def _try_cmd_notify_port(): port = os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if not", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "type - socket notification of command info. This can be used to monitor", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "cwd for guild_patch.py and load if exists.\"\"\" patch_path = os.path.join(config.cwd(), \"guild_patch.py\") if os.path.exists(patch_path):", "use this file except in compliance with the License. # You may obtain", "= os.path.join(config.cwd(), \"guild_patch.py\") if os.path.exists(patch_path): from guild import python_util python_util.exec_script(patch_path) def _register_cmd_context_handlers(): \"\"\"Register", "handler type - socket notification of command info. This can be used to", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "_validated_dir(args.cwd) def _guild_home(args): return _validated_dir(args.guild_home, abs=True, create=True, guild_nocopy=True) def _validated_dir(path, abs=False, create=False, guild_nocopy=False):", "_register_cmd_context_handlers() def _init_logging(args): log_level = args.log_level or logging.INFO log.init_logging(log_level) log.disable_noisy_loggers(log_level) def _cwd(args): return", "- socket notification of command info. This can be used to monitor Guild", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "else: cli.error(\"directory '%s' does not exist\" % path) if not os.path.isdir(path): cli.error(\"'%s' is", "not exist\" % path) if not os.path.isdir(path): cli.error(\"'%s' is not a directory\" %", "and # limitations under the License. import logging import os from guild import", "_init_logging(args) config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args)) _apply_guild_patch() _register_cmd_context_handlers() def _init_logging(args): log_level = args.log_level or logging.INFO log.init_logging(log_level)", "See the License for the specific language governing permissions and # limitations under", "os from guild import cli from guild import config from guild import log", "load if exists.\"\"\" patch_path = os.path.join(config.cwd(), \"guild_patch.py\") if os.path.exists(patch_path): from guild import python_util", "monitor Guild commands by setting the `GUILD_CMD_NOTIFY_PORT` env var to a port of", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "config.set_guild_home(_guild_home(args)) _apply_guild_patch() _register_cmd_context_handlers() def _init_logging(args): log_level = args.log_level or logging.INFO log.init_logging(log_level) log.disable_noisy_loggers(log_level) def", "create: util.ensure_dir(path) else: cli.error(\"directory '%s' does not exist\" % path) if not os.path.isdir(path):", "License, Version 2.0 (the \"License\"); # you may not use this file except", "util.ensure_dir(path) else: cli.error(\"directory '%s' does not exist\" % path) if not os.path.isdir(path): cli.error(\"'%s'", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "if not port: return None try: return int(port) except ValueError: raise SystemExit( \"invalid", "int(port) except ValueError: raise SystemExit( \"invalid value for GUILD_CMD_NOTIFY_PORT %r: must \" \"be", "except ValueError: raise SystemExit( \"invalid value for GUILD_CMD_NOTIFY_PORT %r: must \" \"be a", "config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args)) _apply_guild_patch() _register_cmd_context_handlers() def _init_logging(args): log_level = args.log_level or logging.INFO log.init_logging(log_level) log.disable_noisy_loggers(log_level)", "if not os.path.exists(path): if create: util.ensure_dir(path) else: cli.error(\"directory '%s' does not exist\" %", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "cmd_notify cmd_notify.init_cmd_context_handler(port) def _try_cmd_notify_port(): port = os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if not port: return None try:", "cmd_notify.init_cmd_context_handler(port) def _try_cmd_notify_port(): port = os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if not port: return None try: return", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "import util def main(args): _init_logging(args) config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args)) _apply_guild_patch() _register_cmd_context_handlers() def _init_logging(args): log_level =", "context handlers. Command context handlers can be used to respond to start and", "config from guild import log from guild import util def main(args): _init_logging(args) config.set_cwd(_cwd(args))", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "for guild_patch.py and load if exists.\"\"\" patch_path = os.path.join(config.cwd(), \"guild_patch.py\") if os.path.exists(patch_path): from", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "_register_cmd_context_handlers(): \"\"\"Register command context handlers. Command context handlers can be used to respond", "\"guild_patch.py\") if os.path.exists(patch_path): from guild import python_util python_util.exec_script(patch_path) def _register_cmd_context_handlers(): \"\"\"Register command context", "guild import cmd_notify cmd_notify.init_cmd_context_handler(port) def _try_cmd_notify_port(): port = os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if not port: return", "def main(args): _init_logging(args) config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args)) _apply_guild_patch() _register_cmd_context_handlers() def _init_logging(args): log_level = args.log_level or", "OF ANY KIND, either express or implied. # See the License for the", "if not os.path.isdir(path): cli.error(\"'%s' is not a directory\" % path) if guild_nocopy: util.ensure_file(os.path.join(path,", "return _validated_dir(args.cwd) def _guild_home(args): return _validated_dir(args.guild_home, abs=True, create=True, guild_nocopy=True) def _validated_dir(path, abs=False, create=False,", "This can be used to monitor Guild commands by setting the `GUILD_CMD_NOTIFY_PORT` env", "2.0 (the \"License\"); # you may not use this file except in compliance", "for GUILD_CMD_NOTIFY_PORT %r: must \" \"be a valid numeric port\" % port )", "<reponame>guildai/guild-cli # Copyright 2017-2022 RStudio, PBC # # Licensed under the Apache License,", "path = os.path.expanduser(path) if abs: path = os.path.abspath(path) if not os.path.exists(path): if create:", "= os.path.expanduser(path) if abs: path = os.path.abspath(path) if not os.path.exists(path): if create: util.ensure_dir(path)", "# you may not use this file except in compliance with the License.", "Guild commands by setting the `GUILD_CMD_NOTIFY_PORT` env var to a port of a", "ValueError: raise SystemExit( \"invalid value for GUILD_CMD_NOTIFY_PORT %r: must \" \"be a valid", "raise SystemExit( \"invalid value for GUILD_CMD_NOTIFY_PORT %r: must \" \"be a valid numeric", "guild import python_util python_util.exec_script(patch_path) def _register_cmd_context_handlers(): \"\"\"Register command context handlers. Command context handlers", "context handlers can be used to respond to start and stop of Guild", "RStudio, PBC # # Licensed under the Apache License, Version 2.0 (the \"License\");", "agreed to in writing, software # distributed under the License is distributed on", "guild import log from guild import util def main(args): _init_logging(args) config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args)) _apply_guild_patch()", "util.ensure_file(os.path.join(path, \".guild-nocopy\")) return path def _apply_guild_patch(): \"\"\"Look in config cwd for guild_patch.py and", "_cwd(args): return _validated_dir(args.cwd) def _guild_home(args): return _validated_dir(args.guild_home, abs=True, create=True, guild_nocopy=True) def _validated_dir(path, abs=False,", "log.init_logging(log_level) log.disable_noisy_loggers(log_level) def _cwd(args): return _validated_dir(args.cwd) def _guild_home(args): return _validated_dir(args.guild_home, abs=True, create=True, guild_nocopy=True)", "path = os.path.abspath(path) if not os.path.exists(path): if create: util.ensure_dir(path) else: cli.error(\"directory '%s' does", "in config cwd for guild_patch.py and load if exists.\"\"\" patch_path = os.path.join(config.cwd(), \"guild_patch.py\")", "`guild.cmd_notify` for details. \"\"\" _maybe_register_cmd_notify() def _maybe_register_cmd_notify(): port = _try_cmd_notify_port() if port: from", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "for details. \"\"\" _maybe_register_cmd_notify() def _maybe_register_cmd_notify(): port = _try_cmd_notify_port() if port: from guild", "return _validated_dir(args.guild_home, abs=True, create=True, guild_nocopy=True) def _validated_dir(path, abs=False, create=False, guild_nocopy=False): path = os.path.expanduser(path)", "\"\"\"Look in config cwd for guild_patch.py and load if exists.\"\"\" patch_path = os.path.join(config.cwd(),", "(the \"License\"); # you may not use this file except in compliance with", "import os from guild import cli from guild import config from guild import", "guild_nocopy: util.ensure_file(os.path.join(path, \".guild-nocopy\")) return path def _apply_guild_patch(): \"\"\"Look in config cwd for guild_patch.py", "import python_util python_util.exec_script(patch_path) def _register_cmd_context_handlers(): \"\"\"Register command context handlers. Command context handlers can", "abs=True, create=True, guild_nocopy=True) def _validated_dir(path, abs=False, create=False, guild_nocopy=False): path = os.path.expanduser(path) if abs:", "return int(port) except ValueError: raise SystemExit( \"invalid value for GUILD_CMD_NOTIFY_PORT %r: must \"", "logging import os from guild import cli from guild import config from guild", "value for GUILD_CMD_NOTIFY_PORT %r: must \" \"be a valid numeric port\" % port", "python_util python_util.exec_script(patch_path) def _register_cmd_context_handlers(): \"\"\"Register command context handlers. Command context handlers can be", "# # Unless required by applicable law or agreed to in writing, software", "be used to respond to start and stop of Guild commands. Currently Guild", "guild import cli from guild import config from guild import log from guild", "main(args): _init_logging(args) config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args)) _apply_guild_patch() _register_cmd_context_handlers() def _init_logging(args): log_level = args.log_level or logging.INFO", "cli.error(\"'%s' is not a directory\" % path) if guild_nocopy: util.ensure_file(os.path.join(path, \".guild-nocopy\")) return path", "os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if not port: return None try: return int(port) except ValueError: raise SystemExit(", "express or implied. # See the License for the specific language governing permissions", "supports one handler type - socket notification of command info. This can be", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "command info. This can be used to monitor Guild commands by setting the", "except in compliance with the License. # You may obtain a copy of", "port of a socket server. See `guild.cmd_notify` for details. \"\"\" _maybe_register_cmd_notify() def _maybe_register_cmd_notify():", "_try_cmd_notify_port(): port = os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if not port: return None try: return int(port) except", "by applicable law or agreed to in writing, software # distributed under the", "Command context handlers can be used to respond to start and stop of", "def _try_cmd_notify_port(): port = os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if not port: return None try: return int(port)", "log from guild import util def main(args): _init_logging(args) config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args)) _apply_guild_patch() _register_cmd_context_handlers() def", "def _init_logging(args): log_level = args.log_level or logging.INFO log.init_logging(log_level) log.disable_noisy_loggers(log_level) def _cwd(args): return _validated_dir(args.cwd)", "path) if guild_nocopy: util.ensure_file(os.path.join(path, \".guild-nocopy\")) return path def _apply_guild_patch(): \"\"\"Look in config cwd", "2017-2022 RStudio, PBC # # Licensed under the Apache License, Version 2.0 (the", "under the License. import logging import os from guild import cli from guild", "\"\"\"Register command context handlers. Command context handlers can be used to respond to", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "patch_path = os.path.join(config.cwd(), \"guild_patch.py\") if os.path.exists(patch_path): from guild import python_util python_util.exec_script(patch_path) def _register_cmd_context_handlers():", "handlers. Command context handlers can be used to respond to start and stop", "commands. Currently Guild supports one handler type - socket notification of command info.", "can be used to monitor Guild commands by setting the `GUILD_CMD_NOTIFY_PORT` env var", "not os.path.isdir(path): cli.error(\"'%s' is not a directory\" % path) if guild_nocopy: util.ensure_file(os.path.join(path, \".guild-nocopy\"))", "either express or implied. # See the License for the specific language governing", "None try: return int(port) except ValueError: raise SystemExit( \"invalid value for GUILD_CMD_NOTIFY_PORT %r:", "_validated_dir(args.guild_home, abs=True, create=True, guild_nocopy=True) def _validated_dir(path, abs=False, create=False, guild_nocopy=False): path = os.path.expanduser(path) if", "cli.error(\"directory '%s' does not exist\" % path) if not os.path.isdir(path): cli.error(\"'%s' is not", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "by setting the `GUILD_CMD_NOTIFY_PORT` env var to a port of a socket server.", "Guild commands. Currently Guild supports one handler type - socket notification of command", "= args.log_level or logging.INFO log.init_logging(log_level) log.disable_noisy_loggers(log_level) def _cwd(args): return _validated_dir(args.cwd) def _guild_home(args): return", "notification of command info. This can be used to monitor Guild commands by", "governing permissions and # limitations under the License. import logging import os from", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "exists.\"\"\" patch_path = os.path.join(config.cwd(), \"guild_patch.py\") if os.path.exists(patch_path): from guild import python_util python_util.exec_script(patch_path) def", "Currently Guild supports one handler type - socket notification of command info. This", "server. See `guild.cmd_notify` for details. \"\"\" _maybe_register_cmd_notify() def _maybe_register_cmd_notify(): port = _try_cmd_notify_port() if", "permissions and # limitations under the License. import logging import os from guild", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "log.disable_noisy_loggers(log_level) def _cwd(args): return _validated_dir(args.cwd) def _guild_home(args): return _validated_dir(args.guild_home, abs=True, create=True, guild_nocopy=True) def", "handlers can be used to respond to start and stop of Guild commands.", "socket notification of command info. This can be used to monitor Guild commands", "file except in compliance with the License. # You may obtain a copy", "def _guild_home(args): return _validated_dir(args.guild_home, abs=True, create=True, guild_nocopy=True) def _validated_dir(path, abs=False, create=False, guild_nocopy=False): path", "_guild_home(args): return _validated_dir(args.guild_home, abs=True, create=True, guild_nocopy=True) def _validated_dir(path, abs=False, create=False, guild_nocopy=False): path =", "import cli from guild import config from guild import log from guild import", "and load if exists.\"\"\" patch_path = os.path.join(config.cwd(), \"guild_patch.py\") if os.path.exists(patch_path): from guild import", "to start and stop of Guild commands. Currently Guild supports one handler type", "of a socket server. See `guild.cmd_notify` for details. \"\"\" _maybe_register_cmd_notify() def _maybe_register_cmd_notify(): port", "not port: return None try: return int(port) except ValueError: raise SystemExit( \"invalid value", "if os.path.exists(patch_path): from guild import python_util python_util.exec_script(patch_path) def _register_cmd_context_handlers(): \"\"\"Register command context handlers.", "is not a directory\" % path) if guild_nocopy: util.ensure_file(os.path.join(path, \".guild-nocopy\")) return path def", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License for the specific language governing permissions and # limitations under the License.", "os.path.abspath(path) if not os.path.exists(path): if create: util.ensure_dir(path) else: cli.error(\"directory '%s' does not exist\"", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "os.path.exists(path): if create: util.ensure_dir(path) else: cli.error(\"directory '%s' does not exist\" % path) if", "language governing permissions and # limitations under the License. import logging import os", "_apply_guild_patch(): \"\"\"Look in config cwd for guild_patch.py and load if exists.\"\"\" patch_path =", "_try_cmd_notify_port() if port: from guild import cmd_notify cmd_notify.init_cmd_context_handler(port) def _try_cmd_notify_port(): port = os.getenv(\"GUILD_CMD_NOTIFY_PORT\")", "the License. # You may obtain a copy of the License at #", "setting the `GUILD_CMD_NOTIFY_PORT` env var to a port of a socket server. See", "if create: util.ensure_dir(path) else: cli.error(\"directory '%s' does not exist\" % path) if not", "to in writing, software # distributed under the License is distributed on an", "create=True, guild_nocopy=True) def _validated_dir(path, abs=False, create=False, guild_nocopy=False): path = os.path.expanduser(path) if abs: path", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "limitations under the License. import logging import os from guild import cli from", "one handler type - socket notification of command info. This can be used", "can be used to respond to start and stop of Guild commands. Currently", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "log_level = args.log_level or logging.INFO log.init_logging(log_level) log.disable_noisy_loggers(log_level) def _cwd(args): return _validated_dir(args.cwd) def _guild_home(args):", "return path def _apply_guild_patch(): \"\"\"Look in config cwd for guild_patch.py and load if", "def _maybe_register_cmd_notify(): port = _try_cmd_notify_port() if port: from guild import cmd_notify cmd_notify.init_cmd_context_handler(port) def", "\"License\"); # you may not use this file except in compliance with the", "_maybe_register_cmd_notify(): port = _try_cmd_notify_port() if port: from guild import cmd_notify cmd_notify.init_cmd_context_handler(port) def _try_cmd_notify_port():", "# limitations under the License. import logging import os from guild import cli", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "guild_nocopy=True) def _validated_dir(path, abs=False, create=False, guild_nocopy=False): path = os.path.expanduser(path) if abs: path =", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "python_util.exec_script(patch_path) def _register_cmd_context_handlers(): \"\"\"Register command context handlers. Command context handlers can be used", "required by applicable law or agreed to in writing, software # distributed under", "port: return None try: return int(port) except ValueError: raise SystemExit( \"invalid value for", "path def _apply_guild_patch(): \"\"\"Look in config cwd for guild_patch.py and load if exists.\"\"\"", "import cmd_notify cmd_notify.init_cmd_context_handler(port) def _try_cmd_notify_port(): port = os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if not port: return None", "commands by setting the `GUILD_CMD_NOTIFY_PORT` env var to a port of a socket", "the License. import logging import os from guild import cli from guild import", "a port of a socket server. See `guild.cmd_notify` for details. \"\"\" _maybe_register_cmd_notify() def", "config cwd for guild_patch.py and load if exists.\"\"\" patch_path = os.path.join(config.cwd(), \"guild_patch.py\") if", "applicable law or agreed to in writing, software # distributed under the License", "the `GUILD_CMD_NOTIFY_PORT` env var to a port of a socket server. See `guild.cmd_notify`", "port = _try_cmd_notify_port() if port: from guild import cmd_notify cmd_notify.init_cmd_context_handler(port) def _try_cmd_notify_port(): port", "`GUILD_CMD_NOTIFY_PORT` env var to a port of a socket server. See `guild.cmd_notify` for", "_validated_dir(path, abs=False, create=False, guild_nocopy=False): path = os.path.expanduser(path) if abs: path = os.path.abspath(path) if", "directory\" % path) if guild_nocopy: util.ensure_file(os.path.join(path, \".guild-nocopy\")) return path def _apply_guild_patch(): \"\"\"Look in", "env var to a port of a socket server. See `guild.cmd_notify` for details.", "if port: from guild import cmd_notify cmd_notify.init_cmd_context_handler(port) def _try_cmd_notify_port(): port = os.getenv(\"GUILD_CMD_NOTIFY_PORT\") if", "exist\" % path) if not os.path.isdir(path): cli.error(\"'%s' is not a directory\" % path)", "% path) if guild_nocopy: util.ensure_file(os.path.join(path, \".guild-nocopy\")) return path def _apply_guild_patch(): \"\"\"Look in config", "guild import util def main(args): _init_logging(args) config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args)) _apply_guild_patch() _register_cmd_context_handlers() def _init_logging(args): log_level", "guild_nocopy=False): path = os.path.expanduser(path) if abs: path = os.path.abspath(path) if not os.path.exists(path): if", "SystemExit( \"invalid value for GUILD_CMD_NOTIFY_PORT %r: must \" \"be a valid numeric port\"", "License. import logging import os from guild import cli from guild import config", "guild import config from guild import log from guild import util def main(args):", "util def main(args): _init_logging(args) config.set_cwd(_cwd(args)) config.set_guild_home(_guild_home(args)) _apply_guild_patch() _register_cmd_context_handlers() def _init_logging(args): log_level = args.log_level", "used to monitor Guild commands by setting the `GUILD_CMD_NOTIFY_PORT` env var to a", "See `guild.cmd_notify` for details. \"\"\" _maybe_register_cmd_notify() def _maybe_register_cmd_notify(): port = _try_cmd_notify_port() if port:", "_maybe_register_cmd_notify() def _maybe_register_cmd_notify(): port = _try_cmd_notify_port() if port: from guild import cmd_notify cmd_notify.init_cmd_context_handler(port)", "info. This can be used to monitor Guild commands by setting the `GUILD_CMD_NOTIFY_PORT`", "or agreed to in writing, software # distributed under the License is distributed", "def _validated_dir(path, abs=False, create=False, guild_nocopy=False): path = os.path.expanduser(path) if abs: path = os.path.abspath(path)", "abs: path = os.path.abspath(path) if not os.path.exists(path): if create: util.ensure_dir(path) else: cli.error(\"directory '%s'", "respond to start and stop of Guild commands. Currently Guild supports one handler", "cli from guild import config from guild import log from guild import util", "or implied. # See the License for the specific language governing permissions and", "not os.path.exists(path): if create: util.ensure_dir(path) else: cli.error(\"directory '%s' does not exist\" % path)", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Copyright 2017-2022 RStudio, PBC # # Licensed under the Apache License, Version 2.0", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "if exists.\"\"\" patch_path = os.path.join(config.cwd(), \"guild_patch.py\") if os.path.exists(patch_path): from guild import python_util python_util.exec_script(patch_path)", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "# Copyright 2017-2022 RStudio, PBC # # Licensed under the Apache License, Version", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "socket server. See `guild.cmd_notify` for details. \"\"\" _maybe_register_cmd_notify() def _maybe_register_cmd_notify(): port = _try_cmd_notify_port()", "try: return int(port) except ValueError: raise SystemExit( \"invalid value for GUILD_CMD_NOTIFY_PORT %r: must", "_init_logging(args): log_level = args.log_level or logging.INFO log.init_logging(log_level) log.disable_noisy_loggers(log_level) def _cwd(args): return _validated_dir(args.cwd) def", "% path) if not os.path.isdir(path): cli.error(\"'%s' is not a directory\" % path) if", "with the License. # You may obtain a copy of the License at", "the specific language governing permissions and # limitations under the License. import logging", "os.path.expanduser(path) if abs: path = os.path.abspath(path) if not os.path.exists(path): if create: util.ensure_dir(path) else:", "path) if not os.path.isdir(path): cli.error(\"'%s' is not a directory\" % path) if guild_nocopy:", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "from guild import cli from guild import config from guild import log from", "os.path.isdir(path): cli.error(\"'%s' is not a directory\" % path) if guild_nocopy: util.ensure_file(os.path.join(path, \".guild-nocopy\")) return", "details. \"\"\" _maybe_register_cmd_notify() def _maybe_register_cmd_notify(): port = _try_cmd_notify_port() if port: from guild import", "in writing, software # distributed under the License is distributed on an \"AS", "= _try_cmd_notify_port() if port: from guild import cmd_notify cmd_notify.init_cmd_context_handler(port) def _try_cmd_notify_port(): port =", "stop of Guild commands. Currently Guild supports one handler type - socket notification", "be used to monitor Guild commands by setting the `GUILD_CMD_NOTIFY_PORT` env var to", "import config from guild import log from guild import util def main(args): _init_logging(args)", "command context handlers. Command context handlers can be used to respond to start", "to monitor Guild commands by setting the `GUILD_CMD_NOTIFY_PORT` env var to a port", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "args.log_level or logging.INFO log.init_logging(log_level) log.disable_noisy_loggers(log_level) def _cwd(args): return _validated_dir(args.cwd) def _guild_home(args): return _validated_dir(args.guild_home," ]
[ "def wifi(): class Communicator(WifiCommunicator): def __init__(self): self.commands = WifiCommands(self) self.settings = WifiSettings(self) def", "Communicator() def test_get_with_no_params(wifi): url = wifi.commands.set_third_party_client_info() assert url == \"gp/gpControl/command/set_client_info\" def test_get_with_params(wifi): zoom", "import Path import pytest from open_gopro.wifi_commands import WifiCommands, WifiSettings, WifiCommunicator from open_gopro import", "assert url == f\"gopro/camera/digital_zoom?percent={zoom}\" def test_get_binary(wifi): file = wifi.commands.download_file(camera_file=\"test_file\", local_file=Path(\"local_file\")) assert file.name ==", "url, file yield Communicator() def test_get_with_no_params(wifi): url = wifi.commands.set_third_party_client_info() assert url == \"gp/gpControl/command/set_client_info\"", "Path): return url, file yield Communicator() def test_get_with_no_params(wifi): url = wifi.commands.set_third_party_client_info() assert url", "file yield Communicator() def test_get_with_no_params(wifi): url = wifi.commands.set_third_party_client_info() assert url == \"gp/gpControl/command/set_client_info\" def", "from open_gopro import params @pytest.fixture def wifi(): class Communicator(WifiCommunicator): def __init__(self): self.commands =", "<gh_stars>1-10 # test_wifi_commands.py/Open GoPro, Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro). #", "# test_wifi_commands.py/Open GoPro, Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro). # This", "22:08:51 UTC 2021 from pathlib import Path import pytest from open_gopro.wifi_commands import WifiCommands,", "def get(self, url: str): return url def stream_to_file(self, url: str, file: Path): return", "2021 from pathlib import Path import pytest from open_gopro.wifi_commands import WifiCommands, WifiSettings, WifiCommunicator", "== \"gp/gpControl/command/set_client_info\" def test_get_with_params(wifi): zoom = 99 url = wifi.commands.set_digital_zoom(zoom) assert url ==", "Path import pytest from open_gopro.wifi_commands import WifiCommands, WifiSettings, WifiCommunicator from open_gopro import params", "url: str): return url def stream_to_file(self, url: str, file: Path): return url, file", "url = wifi.commands.set_digital_zoom(zoom) assert url == f\"gopro/camera/digital_zoom?percent={zoom}\" def test_get_binary(wifi): file = wifi.commands.download_file(camera_file=\"test_file\", local_file=Path(\"local_file\"))", "url def stream_to_file(self, url: str, file: Path): return url, file yield Communicator() def", "return url def stream_to_file(self, url: str, file: Path): return url, file yield Communicator()", "str): return url def stream_to_file(self, url: str, file: Path): return url, file yield", "# This copyright was auto-generated on Tue May 18 22:08:51 UTC 2021 from", "auto-generated on Tue May 18 22:08:51 UTC 2021 from pathlib import Path import", "Tue May 18 22:08:51 UTC 2021 from pathlib import Path import pytest from", "url: str, file: Path): return url, file yield Communicator() def test_get_with_no_params(wifi): url =", "18 22:08:51 UTC 2021 from pathlib import Path import pytest from open_gopro.wifi_commands import", "= wifi.commands.set_digital_zoom(zoom) assert url == f\"gopro/camera/digital_zoom?percent={zoom}\" def test_get_binary(wifi): file = wifi.commands.download_file(camera_file=\"test_file\", local_file=Path(\"local_file\")) assert", "was auto-generated on Tue May 18 22:08:51 UTC 2021 from pathlib import Path", "from pathlib import Path import pytest from open_gopro.wifi_commands import WifiCommands, WifiSettings, WifiCommunicator from", "stream_to_file(self, url: str, file: Path): return url, file yield Communicator() def test_get_with_no_params(wifi): url", "def test_get_with_no_params(wifi): url = wifi.commands.set_third_party_client_info() assert url == \"gp/gpControl/command/set_client_info\" def test_get_with_params(wifi): zoom =", "pathlib import Path import pytest from open_gopro.wifi_commands import WifiCommands, WifiSettings, WifiCommunicator from open_gopro", "Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro). # This copyright was auto-generated", "get(self, url: str): return url def stream_to_file(self, url: str, file: Path): return url,", "import params @pytest.fixture def wifi(): class Communicator(WifiCommunicator): def __init__(self): self.commands = WifiCommands(self) self.settings", "zoom = 99 url = wifi.commands.set_digital_zoom(zoom) assert url == f\"gopro/camera/digital_zoom?percent={zoom}\" def test_get_binary(wifi): file", "wifi.commands.set_third_party_client_info() assert url == \"gp/gpControl/command/set_client_info\" def test_get_with_params(wifi): zoom = 99 url = wifi.commands.set_digital_zoom(zoom)", "assert url == \"gp/gpControl/command/set_client_info\" def test_get_with_params(wifi): zoom = 99 url = wifi.commands.set_digital_zoom(zoom) assert", "import pytest from open_gopro.wifi_commands import WifiCommands, WifiSettings, WifiCommunicator from open_gopro import params @pytest.fixture", "pytest from open_gopro.wifi_commands import WifiCommands, WifiSettings, WifiCommunicator from open_gopro import params @pytest.fixture def", "url == f\"gopro/camera/digital_zoom?percent={zoom}\" def test_get_binary(wifi): file = wifi.commands.download_file(camera_file=\"test_file\", local_file=Path(\"local_file\")) assert file.name == \"local_file\"", "WifiSettings, WifiCommunicator from open_gopro import params @pytest.fixture def wifi(): class Communicator(WifiCommunicator): def __init__(self):", "self.settings = WifiSettings(self) def get(self, url: str): return url def stream_to_file(self, url: str,", "class Communicator(WifiCommunicator): def __init__(self): self.commands = WifiCommands(self) self.settings = WifiSettings(self) def get(self, url:", "params @pytest.fixture def wifi(): class Communicator(WifiCommunicator): def __init__(self): self.commands = WifiCommands(self) self.settings =", "def __init__(self): self.commands = WifiCommands(self) self.settings = WifiSettings(self) def get(self, url: str): return", "Inc. (http://gopro.com/OpenGoPro). # This copyright was auto-generated on Tue May 18 22:08:51 UTC", "__init__(self): self.commands = WifiCommands(self) self.settings = WifiSettings(self) def get(self, url: str): return url", "99 url = wifi.commands.set_digital_zoom(zoom) assert url == f\"gopro/camera/digital_zoom?percent={zoom}\" def test_get_binary(wifi): file = wifi.commands.download_file(camera_file=\"test_file\",", "from open_gopro.wifi_commands import WifiCommands, WifiSettings, WifiCommunicator from open_gopro import params @pytest.fixture def wifi():", "= wifi.commands.set_third_party_client_info() assert url == \"gp/gpControl/command/set_client_info\" def test_get_with_params(wifi): zoom = 99 url =", "Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro). # This copyright was auto-generated on Tue May", "copyright was auto-generated on Tue May 18 22:08:51 UTC 2021 from pathlib import", "\"gp/gpControl/command/set_client_info\" def test_get_with_params(wifi): zoom = 99 url = wifi.commands.set_digital_zoom(zoom) assert url == f\"gopro/camera/digital_zoom?percent={zoom}\"", "1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro). # This copyright was auto-generated on", "file: Path): return url, file yield Communicator() def test_get_with_no_params(wifi): url = wifi.commands.set_third_party_client_info() assert", "def stream_to_file(self, url: str, file: Path): return url, file yield Communicator() def test_get_with_no_params(wifi):", "2021 GoPro, Inc. (http://gopro.com/OpenGoPro). # This copyright was auto-generated on Tue May 18", "on Tue May 18 22:08:51 UTC 2021 from pathlib import Path import pytest", "This copyright was auto-generated on Tue May 18 22:08:51 UTC 2021 from pathlib", "GoPro, Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro). # This copyright was", "@pytest.fixture def wifi(): class Communicator(WifiCommunicator): def __init__(self): self.commands = WifiCommands(self) self.settings = WifiSettings(self)", "self.commands = WifiCommands(self) self.settings = WifiSettings(self) def get(self, url: str): return url def", "str, file: Path): return url, file yield Communicator() def test_get_with_no_params(wifi): url = wifi.commands.set_third_party_client_info()", "url == \"gp/gpControl/command/set_client_info\" def test_get_with_params(wifi): zoom = 99 url = wifi.commands.set_digital_zoom(zoom) assert url", "return url, file yield Communicator() def test_get_with_no_params(wifi): url = wifi.commands.set_third_party_client_info() assert url ==", "= WifiCommands(self) self.settings = WifiSettings(self) def get(self, url: str): return url def stream_to_file(self,", "= WifiSettings(self) def get(self, url: str): return url def stream_to_file(self, url: str, file:", "GoPro, Inc. (http://gopro.com/OpenGoPro). # This copyright was auto-generated on Tue May 18 22:08:51", "WifiCommands(self) self.settings = WifiSettings(self) def get(self, url: str): return url def stream_to_file(self, url:", "wifi(): class Communicator(WifiCommunicator): def __init__(self): self.commands = WifiCommands(self) self.settings = WifiSettings(self) def get(self,", "wifi.commands.set_digital_zoom(zoom) assert url == f\"gopro/camera/digital_zoom?percent={zoom}\" def test_get_binary(wifi): file = wifi.commands.download_file(camera_file=\"test_file\", local_file=Path(\"local_file\")) assert file.name", "(http://gopro.com/OpenGoPro). # This copyright was auto-generated on Tue May 18 22:08:51 UTC 2021", "= 99 url = wifi.commands.set_digital_zoom(zoom) assert url == f\"gopro/camera/digital_zoom?percent={zoom}\" def test_get_binary(wifi): file =", "UTC 2021 from pathlib import Path import pytest from open_gopro.wifi_commands import WifiCommands, WifiSettings,", "test_get_with_params(wifi): zoom = 99 url = wifi.commands.set_digital_zoom(zoom) assert url == f\"gopro/camera/digital_zoom?percent={zoom}\" def test_get_binary(wifi):", "test_wifi_commands.py/Open GoPro, Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro). # This copyright", "Communicator(WifiCommunicator): def __init__(self): self.commands = WifiCommands(self) self.settings = WifiSettings(self) def get(self, url: str):", "yield Communicator() def test_get_with_no_params(wifi): url = wifi.commands.set_third_party_client_info() assert url == \"gp/gpControl/command/set_client_info\" def test_get_with_params(wifi):", "url = wifi.commands.set_third_party_client_info() assert url == \"gp/gpControl/command/set_client_info\" def test_get_with_params(wifi): zoom = 99 url", "test_get_with_no_params(wifi): url = wifi.commands.set_third_party_client_info() assert url == \"gp/gpControl/command/set_client_info\" def test_get_with_params(wifi): zoom = 99", "def test_get_with_params(wifi): zoom = 99 url = wifi.commands.set_digital_zoom(zoom) assert url == f\"gopro/camera/digital_zoom?percent={zoom}\" def", "open_gopro import params @pytest.fixture def wifi(): class Communicator(WifiCommunicator): def __init__(self): self.commands = WifiCommands(self)", "WifiCommands, WifiSettings, WifiCommunicator from open_gopro import params @pytest.fixture def wifi(): class Communicator(WifiCommunicator): def", "open_gopro.wifi_commands import WifiCommands, WifiSettings, WifiCommunicator from open_gopro import params @pytest.fixture def wifi(): class", "WifiSettings(self) def get(self, url: str): return url def stream_to_file(self, url: str, file: Path):", "WifiCommunicator from open_gopro import params @pytest.fixture def wifi(): class Communicator(WifiCommunicator): def __init__(self): self.commands", "import WifiCommands, WifiSettings, WifiCommunicator from open_gopro import params @pytest.fixture def wifi(): class Communicator(WifiCommunicator):", "May 18 22:08:51 UTC 2021 from pathlib import Path import pytest from open_gopro.wifi_commands", "(C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro). # This copyright was auto-generated on Tue" ]
[ "from praline.client.project.pipeline.stages.stage import stage from praline.client.repository.remote_proxy import RemoteProxy from praline.common.file_system import FileSystem, join", "pass def predicate(file_system: FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str, Any]): return not program_arguments['global']['skip_formatting']", "the praline-client.config file or add it to the path environment variable\") project_directory =", "Any]): return not program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']], output=['clang_format_style_file', 'clang_format_executable'], predicate=predicate) def load_clang_format(file_system: FileSystem, resources: StageResources,", "praline-client.config file or add it to the path environment variable\") project_directory = resources['project_directory']", "program_arguments: Dict[str, Any], configuration: Dict[str, Any]): return not program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']], output=['clang_format_style_file', 'clang_format_executable'], predicate=predicate)", "else: clang_format_executable = file_system.which('clang-format') if clang_format_executable is None: raise ClangFormatConfigurationError(\"coudn't find clang-format in", "load_clang_format(file_system: FileSystem, resources: StageResources, cache: Dict[str, Any], program_arguments: Dict[str, Any], configuration: Dict[str, Any],", "praline.common.file_system import FileSystem, join from typing import Any, Dict clang_format_style_file_contents = \"\"\"\\ Language:", "def load_clang_format(file_system: FileSystem, resources: StageResources, cache: Dict[str, Any], program_arguments: Dict[str, Any], configuration: Dict[str,", "praline.client.repository.remote_proxy import RemoteProxy from praline.common.file_system import FileSystem, join from typing import Any, Dict", "RemoteProxy from praline.common.file_system import FileSystem, join from typing import Any, Dict clang_format_style_file_contents =", "path environment variable\") project_directory = resources['project_directory'] resources['clang_format_executable'] = clang_format_executable resources['clang_format_style_file'] = clang_format_style_file =", "configuration: clang_format_executable = configuration['clang-format-executable-path'] if not file_system.is_file(clang_format_executable): raise ClangFormatConfigurationError(f\"user supplied clang-format '{clang_format_executable}' is", "FileSystem, join from typing import Any, Dict clang_format_style_file_contents = \"\"\"\\ Language: Cpp AccessModifierOffset:", "not program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']], output=['clang_format_style_file', 'clang_format_executable'], predicate=predicate) def load_clang_format(file_system: FileSystem, resources: StageResources, cache: Dict[str,", "FixNamespaceComments: false IndentWidth: 4 PointerAlignment: Left ReflowComments: true SortIncludes: true SortUsingDeclarations: true SpaceAfterTemplateKeyword:", "clang-format in path -- either supply it in the praline-client.config file or add", "import RemoteProxy from praline.common.file_system import FileSystem, join from typing import Any, Dict clang_format_style_file_contents", "= configuration['clang-format-executable-path'] if not file_system.is_file(clang_format_executable): raise ClangFormatConfigurationError(f\"user supplied clang-format '{clang_format_executable}' is not a", "Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy): if 'clang-format-executable-path' in configuration: clang_format_executable = configuration['clang-format-executable-path']", "configuration['clang-format-executable-path'] if not file_system.is_file(clang_format_executable): raise ClangFormatConfigurationError(f\"user supplied clang-format '{clang_format_executable}' is not a file\")", "to the path environment variable\") project_directory = resources['project_directory'] resources['clang_format_executable'] = clang_format_executable resources['clang_format_style_file'] =", "clang-format '{clang_format_executable}' is not a file\") else: clang_format_executable = file_system.which('clang-format') if clang_format_executable is", "ClangFormatConfigurationError(\"coudn't find clang-format in path -- either supply it in the praline-client.config file", "from praline.client.repository.remote_proxy import RemoteProxy from praline.common.file_system import FileSystem, join from typing import Any,", "true BreakBeforeBraces: Allman ColumnLimit: 120 ConstructorInitializerAllOnOneLineOrOnePerLine: true FixNamespaceComments: false IndentWidth: 4 PointerAlignment: Left", "either supply it in the praline-client.config file or add it to the path", "-- either supply it in the praline-client.config file or add it to the", "AlignTrailingComments: true AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine: None AlwaysBreakTemplateDeclarations: true BreakBeforeBraces: Allman ColumnLimit: 120 ConstructorInitializerAllOnOneLineOrOnePerLine:", "Dict[str, Any]): return not program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']], output=['clang_format_style_file', 'clang_format_executable'], predicate=predicate) def load_clang_format(file_system: FileSystem, resources:", "true SortUsingDeclarations: true SpaceAfterTemplateKeyword: false SpacesInAngles: false UseTab: Never \"\"\" class ClangFormatConfigurationError(Exception): pass", "FileSystem, resources: StageResources, cache: Dict[str, Any], program_arguments: Dict[str, Any], configuration: Dict[str, Any], remote_proxy:", "file_system.is_file(clang_format_executable): raise ClangFormatConfigurationError(f\"user supplied clang-format '{clang_format_executable}' is not a file\") else: clang_format_executable =", "= file_system.which('clang-format') if clang_format_executable is None: raise ClangFormatConfigurationError(\"coudn't find clang-format in path --", "ConstructorInitializerAllOnOneLineOrOnePerLine: true FixNamespaceComments: false IndentWidth: 4 PointerAlignment: Left ReflowComments: true SortIncludes: true SortUsingDeclarations:", "clang_format_executable = file_system.which('clang-format') if clang_format_executable is None: raise ClangFormatConfigurationError(\"coudn't find clang-format in path", "def predicate(file_system: FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str, Any]): return not program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']],", "import FileSystem, join from typing import Any, Dict clang_format_style_file_contents = \"\"\"\\ Language: Cpp", "supply it in the praline-client.config file or add it to the path environment", "SpaceAfterTemplateKeyword: false SpacesInAngles: false UseTab: Never \"\"\" class ClangFormatConfigurationError(Exception): pass def predicate(file_system: FileSystem,", "file\") else: clang_format_executable = file_system.which('clang-format') if clang_format_executable is None: raise ClangFormatConfigurationError(\"coudn't find clang-format", "clang_format_style_file_contents = \"\"\"\\ Language: Cpp AccessModifierOffset: -4 AlignTrailingComments: true AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine: None", "import stage from praline.client.repository.remote_proxy import RemoteProxy from praline.common.file_system import FileSystem, join from typing", "resources: StageResources, cache: Dict[str, Any], program_arguments: Dict[str, Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy):", "UseTab: Never \"\"\" class ClangFormatConfigurationError(Exception): pass def predicate(file_system: FileSystem, program_arguments: Dict[str, Any], configuration:", "false AllowShortFunctionsOnASingleLine: None AlwaysBreakTemplateDeclarations: true BreakBeforeBraces: Allman ColumnLimit: 120 ConstructorInitializerAllOnOneLineOrOnePerLine: true FixNamespaceComments: false", "praline.client.project.pipeline.stage_resources import StageResources from praline.client.project.pipeline.stages.stage import stage from praline.client.repository.remote_proxy import RemoteProxy from praline.common.file_system", "raise ClangFormatConfigurationError(\"coudn't find clang-format in path -- either supply it in the praline-client.config", "praline.client.project.pipeline.stages.stage import stage from praline.client.repository.remote_proxy import RemoteProxy from praline.common.file_system import FileSystem, join from", "ColumnLimit: 120 ConstructorInitializerAllOnOneLineOrOnePerLine: true FixNamespaceComments: false IndentWidth: 4 PointerAlignment: Left ReflowComments: true SortIncludes:", "Any, Dict clang_format_style_file_contents = \"\"\"\\ Language: Cpp AccessModifierOffset: -4 AlignTrailingComments: true AllowShortBlocksOnASingleLine: false", "if 'clang-format-executable-path' in configuration: clang_format_executable = configuration['clang-format-executable-path'] if not file_system.is_file(clang_format_executable): raise ClangFormatConfigurationError(f\"user supplied", "join from typing import Any, Dict clang_format_style_file_contents = \"\"\"\\ Language: Cpp AccessModifierOffset: -4", "FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str, Any]): return not program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']], output=['clang_format_style_file', 'clang_format_executable'],", "Dict[str, Any], program_arguments: Dict[str, Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy): if 'clang-format-executable-path' in", "cache: Dict[str, Any], program_arguments: Dict[str, Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy): if 'clang-format-executable-path'", "in the praline-client.config file or add it to the path environment variable\") project_directory", "AllowShortFunctionsOnASingleLine: None AlwaysBreakTemplateDeclarations: true BreakBeforeBraces: Allman ColumnLimit: 120 ConstructorInitializerAllOnOneLineOrOnePerLine: true FixNamespaceComments: false IndentWidth:", "Dict[str, Any], configuration: Dict[str, Any]): return not program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']], output=['clang_format_style_file', 'clang_format_executable'], predicate=predicate) def", "the path environment variable\") project_directory = resources['project_directory'] resources['clang_format_executable'] = clang_format_executable resources['clang_format_style_file'] = clang_format_style_file", "configuration: Dict[str, Any], remote_proxy: RemoteProxy): if 'clang-format-executable-path' in configuration: clang_format_executable = configuration['clang-format-executable-path'] if", "false SpacesInAngles: false UseTab: Never \"\"\" class ClangFormatConfigurationError(Exception): pass def predicate(file_system: FileSystem, program_arguments:", "false UseTab: Never \"\"\" class ClangFormatConfigurationError(Exception): pass def predicate(file_system: FileSystem, program_arguments: Dict[str, Any],", "variable\") project_directory = resources['project_directory'] resources['clang_format_executable'] = clang_format_executable resources['clang_format_style_file'] = clang_format_style_file = join(project_directory, '.clang-format')", "SortIncludes: true SortUsingDeclarations: true SpaceAfterTemplateKeyword: false SpacesInAngles: false UseTab: Never \"\"\" class ClangFormatConfigurationError(Exception):", "= resources['project_directory'] resources['clang_format_executable'] = clang_format_executable resources['clang_format_style_file'] = clang_format_style_file = join(project_directory, '.clang-format') file_system.create_file_if_missing(clang_format_style_file, clang_format_style_file_contents)", "in configuration: clang_format_executable = configuration['clang-format-executable-path'] if not file_system.is_file(clang_format_executable): raise ClangFormatConfigurationError(f\"user supplied clang-format '{clang_format_executable}'", "or add it to the path environment variable\") project_directory = resources['project_directory'] resources['clang_format_executable'] =", "a file\") else: clang_format_executable = file_system.which('clang-format') if clang_format_executable is None: raise ClangFormatConfigurationError(\"coudn't find", "predicate(file_system: FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str, Any]): return not program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']], output=['clang_format_style_file',", "predicate=predicate) def load_clang_format(file_system: FileSystem, resources: StageResources, cache: Dict[str, Any], program_arguments: Dict[str, Any], configuration:", "clang_format_executable = configuration['clang-format-executable-path'] if not file_system.is_file(clang_format_executable): raise ClangFormatConfigurationError(f\"user supplied clang-format '{clang_format_executable}' is not", "None: raise ClangFormatConfigurationError(\"coudn't find clang-format in path -- either supply it in the", "it in the praline-client.config file or add it to the path environment variable\")", "from praline.client.project.pipeline.stage_resources import StageResources from praline.client.project.pipeline.stages.stage import stage from praline.client.repository.remote_proxy import RemoteProxy from", "output=['clang_format_style_file', 'clang_format_executable'], predicate=predicate) def load_clang_format(file_system: FileSystem, resources: StageResources, cache: Dict[str, Any], program_arguments: Dict[str,", "Any], program_arguments: Dict[str, Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy): if 'clang-format-executable-path' in configuration:", "SortUsingDeclarations: true SpaceAfterTemplateKeyword: false SpacesInAngles: false UseTab: Never \"\"\" class ClangFormatConfigurationError(Exception): pass def", "Allman ColumnLimit: 120 ConstructorInitializerAllOnOneLineOrOnePerLine: true FixNamespaceComments: false IndentWidth: 4 PointerAlignment: Left ReflowComments: true", "stage from praline.client.repository.remote_proxy import RemoteProxy from praline.common.file_system import FileSystem, join from typing import", "None AlwaysBreakTemplateDeclarations: true BreakBeforeBraces: Allman ColumnLimit: 120 ConstructorInitializerAllOnOneLineOrOnePerLine: true FixNamespaceComments: false IndentWidth: 4", "import StageResources from praline.client.project.pipeline.stages.stage import stage from praline.client.repository.remote_proxy import RemoteProxy from praline.common.file_system import", "false IndentWidth: 4 PointerAlignment: Left ReflowComments: true SortIncludes: true SortUsingDeclarations: true SpaceAfterTemplateKeyword: false", "RemoteProxy): if 'clang-format-executable-path' in configuration: clang_format_executable = configuration['clang-format-executable-path'] if not file_system.is_file(clang_format_executable): raise ClangFormatConfigurationError(f\"user", "remote_proxy: RemoteProxy): if 'clang-format-executable-path' in configuration: clang_format_executable = configuration['clang-format-executable-path'] if not file_system.is_file(clang_format_executable): raise", "IndentWidth: 4 PointerAlignment: Left ReflowComments: true SortIncludes: true SortUsingDeclarations: true SpaceAfterTemplateKeyword: false SpacesInAngles:", "'clang_format_executable'], predicate=predicate) def load_clang_format(file_system: FileSystem, resources: StageResources, cache: Dict[str, Any], program_arguments: Dict[str, Any],", "file_system.which('clang-format') if clang_format_executable is None: raise ClangFormatConfigurationError(\"coudn't find clang-format in path -- either", "Dict clang_format_style_file_contents = \"\"\"\\ Language: Cpp AccessModifierOffset: -4 AlignTrailingComments: true AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine:", "AccessModifierOffset: -4 AlignTrailingComments: true AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine: None AlwaysBreakTemplateDeclarations: true BreakBeforeBraces: Allman ColumnLimit:", "ClangFormatConfigurationError(Exception): pass def predicate(file_system: FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str, Any]): return not", "not file_system.is_file(clang_format_executable): raise ClangFormatConfigurationError(f\"user supplied clang-format '{clang_format_executable}' is not a file\") else: clang_format_executable", "ClangFormatConfigurationError(f\"user supplied clang-format '{clang_format_executable}' is not a file\") else: clang_format_executable = file_system.which('clang-format') if", "program_arguments: Dict[str, Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy): if 'clang-format-executable-path' in configuration: clang_format_executable", "path -- either supply it in the praline-client.config file or add it to", "Cpp AccessModifierOffset: -4 AlignTrailingComments: true AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine: None AlwaysBreakTemplateDeclarations: true BreakBeforeBraces: Allman", "project_directory = resources['project_directory'] resources['clang_format_executable'] = clang_format_executable resources['clang_format_style_file'] = clang_format_style_file = join(project_directory, '.clang-format') file_system.create_file_if_missing(clang_format_style_file,", "4 PointerAlignment: Left ReflowComments: true SortIncludes: true SortUsingDeclarations: true SpaceAfterTemplateKeyword: false SpacesInAngles: false", "not a file\") else: clang_format_executable = file_system.which('clang-format') if clang_format_executable is None: raise ClangFormatConfigurationError(\"coudn't", "'clang-format-executable-path' in configuration: clang_format_executable = configuration['clang-format-executable-path'] if not file_system.is_file(clang_format_executable): raise ClangFormatConfigurationError(f\"user supplied clang-format", "SpacesInAngles: false UseTab: Never \"\"\" class ClangFormatConfigurationError(Exception): pass def predicate(file_system: FileSystem, program_arguments: Dict[str,", "Dict[str, Any], remote_proxy: RemoteProxy): if 'clang-format-executable-path' in configuration: clang_format_executable = configuration['clang-format-executable-path'] if not", "Any], configuration: Dict[str, Any]): return not program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']], output=['clang_format_style_file', 'clang_format_executable'], predicate=predicate) def load_clang_format(file_system:", "AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine: None AlwaysBreakTemplateDeclarations: true BreakBeforeBraces: Allman ColumnLimit: 120 ConstructorInitializerAllOnOneLineOrOnePerLine: true FixNamespaceComments:", "\"\"\" class ClangFormatConfigurationError(Exception): pass def predicate(file_system: FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str, Any]):", "PointerAlignment: Left ReflowComments: true SortIncludes: true SortUsingDeclarations: true SpaceAfterTemplateKeyword: false SpacesInAngles: false UseTab:", "StageResources from praline.client.project.pipeline.stages.stage import stage from praline.client.repository.remote_proxy import RemoteProxy from praline.common.file_system import FileSystem,", "if clang_format_executable is None: raise ClangFormatConfigurationError(\"coudn't find clang-format in path -- either supply", "clang_format_executable is None: raise ClangFormatConfigurationError(\"coudn't find clang-format in path -- either supply it", "true FixNamespaceComments: false IndentWidth: 4 PointerAlignment: Left ReflowComments: true SortIncludes: true SortUsingDeclarations: true", "'{clang_format_executable}' is not a file\") else: clang_format_executable = file_system.which('clang-format') if clang_format_executable is None:", "raise ClangFormatConfigurationError(f\"user supplied clang-format '{clang_format_executable}' is not a file\") else: clang_format_executable = file_system.which('clang-format')", "Never \"\"\" class ClangFormatConfigurationError(Exception): pass def predicate(file_system: FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str,", "environment variable\") project_directory = resources['project_directory'] resources['clang_format_executable'] = clang_format_executable resources['clang_format_style_file'] = clang_format_style_file = join(project_directory,", "file or add it to the path environment variable\") project_directory = resources['project_directory'] resources['clang_format_executable']", "120 ConstructorInitializerAllOnOneLineOrOnePerLine: true FixNamespaceComments: false IndentWidth: 4 PointerAlignment: Left ReflowComments: true SortIncludes: true", "true SpaceAfterTemplateKeyword: false SpacesInAngles: false UseTab: Never \"\"\" class ClangFormatConfigurationError(Exception): pass def predicate(file_system:", "Left ReflowComments: true SortIncludes: true SortUsingDeclarations: true SpaceAfterTemplateKeyword: false SpacesInAngles: false UseTab: Never", "class ClangFormatConfigurationError(Exception): pass def predicate(file_system: FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str, Any]): return", "import Any, Dict clang_format_style_file_contents = \"\"\"\\ Language: Cpp AccessModifierOffset: -4 AlignTrailingComments: true AllowShortBlocksOnASingleLine:", "typing import Any, Dict clang_format_style_file_contents = \"\"\"\\ Language: Cpp AccessModifierOffset: -4 AlignTrailingComments: true", "\"\"\"\\ Language: Cpp AccessModifierOffset: -4 AlignTrailingComments: true AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine: None AlwaysBreakTemplateDeclarations: true", "from praline.common.file_system import FileSystem, join from typing import Any, Dict clang_format_style_file_contents = \"\"\"\\", "Language: Cpp AccessModifierOffset: -4 AlignTrailingComments: true AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine: None AlwaysBreakTemplateDeclarations: true BreakBeforeBraces:", "= \"\"\"\\ Language: Cpp AccessModifierOffset: -4 AlignTrailingComments: true AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine: None AlwaysBreakTemplateDeclarations:", "program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']], output=['clang_format_style_file', 'clang_format_executable'], predicate=predicate) def load_clang_format(file_system: FileSystem, resources: StageResources, cache: Dict[str, Any],", "BreakBeforeBraces: Allman ColumnLimit: 120 ConstructorInitializerAllOnOneLineOrOnePerLine: true FixNamespaceComments: false IndentWidth: 4 PointerAlignment: Left ReflowComments:", "from typing import Any, Dict clang_format_style_file_contents = \"\"\"\\ Language: Cpp AccessModifierOffset: -4 AlignTrailingComments:", "configuration: Dict[str, Any]): return not program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']], output=['clang_format_style_file', 'clang_format_executable'], predicate=predicate) def load_clang_format(file_system: FileSystem,", "find clang-format in path -- either supply it in the praline-client.config file or", "supplied clang-format '{clang_format_executable}' is not a file\") else: clang_format_executable = file_system.which('clang-format') if clang_format_executable", "it to the path environment variable\") project_directory = resources['project_directory'] resources['clang_format_executable'] = clang_format_executable resources['clang_format_style_file']", "ReflowComments: true SortIncludes: true SortUsingDeclarations: true SpaceAfterTemplateKeyword: false SpacesInAngles: false UseTab: Never \"\"\"", "true SortIncludes: true SortUsingDeclarations: true SpaceAfterTemplateKeyword: false SpacesInAngles: false UseTab: Never \"\"\" class", "Dict[str, Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy): if 'clang-format-executable-path' in configuration: clang_format_executable =", "if not file_system.is_file(clang_format_executable): raise ClangFormatConfigurationError(f\"user supplied clang-format '{clang_format_executable}' is not a file\") else:", "Any], remote_proxy: RemoteProxy): if 'clang-format-executable-path' in configuration: clang_format_executable = configuration['clang-format-executable-path'] if not file_system.is_file(clang_format_executable):", "is None: raise ClangFormatConfigurationError(\"coudn't find clang-format in path -- either supply it in", "AlwaysBreakTemplateDeclarations: true BreakBeforeBraces: Allman ColumnLimit: 120 ConstructorInitializerAllOnOneLineOrOnePerLine: true FixNamespaceComments: false IndentWidth: 4 PointerAlignment:", "-4 AlignTrailingComments: true AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine: None AlwaysBreakTemplateDeclarations: true BreakBeforeBraces: Allman ColumnLimit: 120", "true AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine: None AlwaysBreakTemplateDeclarations: true BreakBeforeBraces: Allman ColumnLimit: 120 ConstructorInitializerAllOnOneLineOrOnePerLine: true", "return not program_arguments['global']['skip_formatting'] @stage(requirements=[['project_directory']], output=['clang_format_style_file', 'clang_format_executable'], predicate=predicate) def load_clang_format(file_system: FileSystem, resources: StageResources, cache:", "@stage(requirements=[['project_directory']], output=['clang_format_style_file', 'clang_format_executable'], predicate=predicate) def load_clang_format(file_system: FileSystem, resources: StageResources, cache: Dict[str, Any], program_arguments:", "in path -- either supply it in the praline-client.config file or add it", "StageResources, cache: Dict[str, Any], program_arguments: Dict[str, Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy): if", "add it to the path environment variable\") project_directory = resources['project_directory'] resources['clang_format_executable'] = clang_format_executable", "is not a file\") else: clang_format_executable = file_system.which('clang-format') if clang_format_executable is None: raise" ]
[ "if cleaned.get('password'): logger.info(\"%s successfully logged in\", self.cleaned_data['username']) else: logger.warn(\"Failed login attempt for %s\",", "( AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm) from django.utils.translation import ugettext_lazy as _ from contenteditor import", "that this is a new password if self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must not reuse a password\")", "at least 20 characters, at least one uppercase letter, at least one lowercase", "raise an error, else proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password def", "forms.CharField( label=_(\"New password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at least 20", "cleaned except forms.ValidationError: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) raise class StrictUserCreationForm(UserCreationForm): password1", "a new password if self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must not reuse a password\") # If password_validator", "if self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must not reuse a password\") # If password_validator returns errors, raise", "error, else proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password class StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password", "Requirements include: at least 20 characters, at least one uppercase letter, at least", "if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user = super(StrictAdminPasswordChangeForm, self).save()", "raise forms.ValidationError('\\n'.join(errors)) else: return password class StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password form for editing a user\"\"\"", "= forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at least 20", "= self.cleaned_data['<PASSWORD>'] errors = models.password_errors(password) # Also check that this is a new", "models.password_errors(password) # Also check that this is a new password if self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must", "logged in\", self.cleaned_data['username']) else: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) return cleaned except", "not reuse a password\") # If password_validator returns errors, raise an error, else", "def clean(self): logger = logging.getLogger(\"peacecorps.login\") try: cleaned = super(LoggingAuthenticationForm, self).clean() if cleaned.get('password'): logger.info(\"%s", "raise class StrictUserCreationForm(UserCreationForm): password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements", "errors = models.password_errors(password) # Also check that this is a new password if", "user.extra.password_expires = models.expires() user.extra.save() return user class StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password form residing at /admin/password_change\"\"\"", "character. \"\"\")) def clean_password1(self): \"\"\"Adds to the default password validation routine in order", "login attempt for %s\", self.cleaned_data.get('username')) raise class StrictUserCreationForm(UserCreationForm): password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput,", "forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at least 20 characters,", "logger.info(\"%s successfully logged in\", self.cleaned_data['username']) else: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) return", "except forms.ValidationError: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) raise class StrictUserCreationForm(UserCreationForm): password1 =", "else: return password def save(self): user = super(StrictPasswordChangeForm, self).save() user.extra.password_expires = models.expires() user.extra.save()", "in order to enforce stronger passwords\"\"\" password = self.cleaned_data['<PASSWORD>'] errors = models.password_errors(password) #", "residing at /admin/password_change\"\"\" new_password1 = forms.CharField( label=_(\"New password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password.", "self.cleaned_data['<PASSWORD>'] errors = models.password_errors(password) # Also check that this is a new password", "in order to enforce stronger passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password) #", "proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password class StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password form for", "from contenteditor import models class LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override login form to log attempts\"\"\" def", "a password. Requirements include: at least 20 characters, at least one uppercase letter,", "check that this is a new password if self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must not reuse a", "%s\", self.cleaned_data.get('username')) return cleaned except forms.ValidationError: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) raise", "this is a new password if self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must not reuse a password\") #", "forms from django.contrib.admin.forms import AdminAuthenticationForm from django.contrib.auth.forms import ( AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm) from", "= logging.getLogger(\"peacecorps.login\") try: cleaned = super(LoggingAuthenticationForm, self).clean() if cleaned.get('password'): logger.info(\"%s successfully logged in\",", "else: return password class StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password form for editing a user\"\"\" password1 =", "log attempts\"\"\" def clean(self): logger = logging.getLogger(\"peacecorps.login\") try: cleaned = super(LoggingAuthenticationForm, self).clean() if", "else: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) return cleaned except forms.ValidationError: logger.warn(\"Failed login", "= super(StrictAdminPasswordChangeForm, self).save() user.extra.password_expires = models.expires() user.extra.save() return user class StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password form", "include: at least 20 characters, at least one uppercase letter, at least one", "password. Requirements include: at least 20 characters, at least one uppercase letter, at", "and at least one special character. \"\"\")) def clean_password1(self): \"\"\"Adds to the default", "raise forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user = super(StrictAdminPasswordChangeForm, self).save() user.extra.password_expires =", "form residing at /admin/password_change\"\"\" new_password1 = forms.CharField( label=_(\"New password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a", "ugettext_lazy as _ from contenteditor import models class LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override login form to", "models.password_errors(password) # Also check that this is a new password if self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must", "this is a new password if self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must not reuse a password\") #", "\"\"\"Override login form to log attempts\"\"\" def clean(self): logger = logging.getLogger(\"peacecorps.login\") try: cleaned", "number, and at least one special character. \"\"\")) def clean_password1(self): \"\"\"Adds to the", "import logging from django import forms from django.contrib.admin.forms import AdminAuthenticationForm from django.contrib.auth.forms import", "self).clean() if cleaned.get('password'): logger.info(\"%s successfully logged in\", self.cleaned_data['username']) else: logger.warn(\"Failed login attempt for", "least one lowercase letter, at least one number, and at least one special", "for %s\", self.cleaned_data.get('username')) raise class StrictUserCreationForm(UserCreationForm): password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter", "order to enforce stronger passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password) # Also", "cleaned = super(LoggingAuthenticationForm, self).clean() if cleaned.get('password'): logger.info(\"%s successfully logged in\", self.cleaned_data['username']) else: logger.warn(\"Failed", "# Also check that this is a new password if self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must not", "error, else proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user", "at least one number, and at least one special character. \"\"\")) def clean_password1(self):", "at least one number, and at least one special character. \"\"\")) def clean_new_password1(self):", "passwords\"\"\" password = self.cleaned_data['<PASSWORD>'] errors = models.password_errors(password) # Also check that this is", "the default password validation routine in order to enforce stronger passwords\"\"\" password =", "successfully logged in\", self.cleaned_data['username']) else: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) return cleaned", "a password\") # If password_validator returns errors, raise an error, else proceed. if", "one number, and at least one special character. \"\"\")) def clean_new_password1(self): \"\"\"Adds to", "new password if self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must not reuse a password\") # If password_validator returns", "logger = logging.getLogger(\"peacecorps.login\") try: cleaned = super(LoggingAuthenticationForm, self).clean() if cleaned.get('password'): logger.info(\"%s successfully logged", "number, and at least one special character. \"\"\")) def clean_new_password1(self): \"\"\"Adds to the", "password\") # If password_validator returns errors, raise an error, else proceed. if errors:", "# Also check that this is a new password if self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must not", "return password class StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password form for editing a user\"\"\" password1 = forms.CharField(", "super(StrictAdminPasswordChangeForm, self).save() user.extra.password_expires = models.expires() user.extra.save() return user class StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password form residing", "user class StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password form residing at /admin/password_change\"\"\" new_password1 = forms.CharField( label=_(\"New password\"),", "proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user = super(StrictPasswordChangeForm,", "passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password) # Also check that this is", "label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at least 20 characters, at", "at least one special character. \"\"\")) def clean_password1(self): \"\"\"Adds to the default password", "is a new password if self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must not reuse a password\") # If", "= forms.CharField( label=_(\"New password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at least", "new_password1 = forms.CharField( label=_(\"New password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at", "clean_password1(self): \"\"\"Adds to the default password validation routine in order to enforce stronger", "\"\"\"Password form residing at /admin/password_change\"\"\" new_password1 = forms.CharField( label=_(\"New password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter", "super(LoggingAuthenticationForm, self).clean() if cleaned.get('password'): logger.info(\"%s successfully logged in\", self.cleaned_data['username']) else: logger.warn(\"Failed login attempt", "to log attempts\"\"\" def clean(self): logger = logging.getLogger(\"peacecorps.login\") try: cleaned = super(LoggingAuthenticationForm, self).clean()", "an error, else proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password class StrictAdminPasswordChangeForm(AdminPasswordChangeForm):", "import ugettext_lazy as _ from contenteditor import models class LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override login form", "logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) raise class StrictUserCreationForm(UserCreationForm): password1 = forms.CharField( label=_(\"Password\"),", "logging.getLogger(\"peacecorps.login\") try: cleaned = super(LoggingAuthenticationForm, self).clean() if cleaned.get('password'): logger.info(\"%s successfully logged in\", self.cleaned_data['username'])", "one special character. \"\"\")) def clean_password1(self): \"\"\"Adds to the default password validation routine", "password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at least", "20 characters, at least one uppercase letter, at least one lowercase letter, at", "errors, raise an error, else proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password", "import models class LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override login form to log attempts\"\"\" def clean(self): logger", "form for editing a user\"\"\" password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a", "password_validator returns errors, raise an error, else proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else:", "return cleaned except forms.ValidationError: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) raise class StrictUserCreationForm(UserCreationForm):", "validation routine in order to enforce stronger passwords\"\"\" password = self.cleaned_data['<PASSWORD>'] errors =", "If password_validator returns errors, raise an error, else proceed. if errors: raise forms.ValidationError('\\n'.join(errors))", "password def save(self): user = super(StrictAdminPasswordChangeForm, self).save() user.extra.password_expires = models.expires() user.extra.save() return user", "one number, and at least one special character. \"\"\")) def clean_password1(self): \"\"\"Adds to", "one lowercase letter, at least one number, and at least one special character.", "AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm) from django.utils.translation import ugettext_lazy as _ from contenteditor import models", "password = self.cleaned_data['password1'] errors = models.password_errors(password) # If password_validator returns errors, raise an", "forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user = super(StrictPasswordChangeForm, self).save() user.extra.password_expires = models.expires()", "user = super(StrictAdminPasswordChangeForm, self).save() user.extra.password_expires = models.expires() user.extra.save() return user class StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password", "return user class StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password form residing at /admin/password_change\"\"\" new_password1 = forms.CharField( label=_(\"New", "\"\"\"Password form for editing a user\"\"\" password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter", "StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password form residing at /admin/password_change\"\"\" new_password1 = forms.CharField( label=_(\"New password\"), widget=forms.PasswordInput, help_text=_(\"\"\"", "password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at least 20 characters, at", "clean_new_password1(self): \"\"\"Adds to the default password validation routine in order to enforce stronger", "<reponame>cmc333333/peacecorps-site import logging from django import forms from django.contrib.admin.forms import AdminAuthenticationForm from django.contrib.auth.forms", "class StrictUserCreationForm(UserCreationForm): password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include:", "at /admin/password_change\"\"\" new_password1 = forms.CharField( label=_(\"New password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements", "forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user = super(StrictAdminPasswordChangeForm, self).save() user.extra.password_expires = models.expires()", "errors.append(\"Must not reuse a password\") # If password_validator returns errors, raise an error,", "self).save() user.extra.password_expires = models.expires() user.extra.save() return user class StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password form residing at", "raise forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user = super(StrictPasswordChangeForm, self).save() user.extra.password_expires =", "least one special character. \"\"\")) def clean_new_password1(self): \"\"\"Adds to the default password validation", "widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at least 20 characters, at least", "enforce stronger passwords\"\"\" password = self.cleaned_data['<PASSWORD>'] errors = models.password_errors(password) # Also check that", "validation routine in order to enforce stronger passwords\"\"\" password = self.cleaned_data['password1'] errors =", "django import forms from django.contrib.admin.forms import AdminAuthenticationForm from django.contrib.auth.forms import ( AdminPasswordChangeForm, PasswordChangeForm,", "if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password class StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password form for editing", "to the default password validation routine in order to enforce stronger passwords\"\"\" password", "errors = models.password_errors(password) # If password_validator returns errors, raise an error, else proceed.", "UserCreationForm) from django.utils.translation import ugettext_lazy as _ from contenteditor import models class LoggingAuthenticationForm(AdminAuthenticationForm):", "in\", self.cleaned_data['username']) else: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) return cleaned except forms.ValidationError:", "models.password_errors(password) # If password_validator returns errors, raise an error, else proceed. if errors:", "Enter a password. Requirements include: at least 20 characters, at least one uppercase", "from django import forms from django.contrib.admin.forms import AdminAuthenticationForm from django.contrib.auth.forms import ( AdminPasswordChangeForm,", "proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user = super(StrictAdminPasswordChangeForm,", "self.cleaned_data['password1'] errors = models.password_errors(password) # Also check that this is a new password", "least one uppercase letter, at least one lowercase letter, at least one number,", "enforce stronger passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password) # Also check that", "if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user = super(StrictPasswordChangeForm, self).save()", "character. \"\"\")) def clean_new_password1(self): \"\"\"Adds to the default password validation routine in order", "stronger passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password) # Also check that this", "special character. \"\"\")) def clean_password1(self): \"\"\"Adds to the default password validation routine in", "password = self.cleaned_data['password1'] errors = models.password_errors(password) # Also check that this is a", "\"\"\")) def clean_password1(self): \"\"\"Adds to the default password validation routine in order to", "from django.utils.translation import ugettext_lazy as _ from contenteditor import models class LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override", "= self.cleaned_data['password1'] errors = models.password_errors(password) # If password_validator returns errors, raise an error,", "def save(self): user = super(StrictAdminPasswordChangeForm, self).save() user.extra.password_expires = models.expires() user.extra.save() return user class", "at least one uppercase letter, at least one lowercase letter, at least one", "self.cleaned_data['password1'] errors = models.password_errors(password) # If password_validator returns errors, raise an error, else", "logging from django import forms from django.contrib.admin.forms import AdminAuthenticationForm from django.contrib.auth.forms import (", "password validation routine in order to enforce stronger passwords\"\"\" password = self.cleaned_data['<PASSWORD>'] errors", "order to enforce stronger passwords\"\"\" password = self.cleaned_data['<PASSWORD>'] errors = models.password_errors(password) # Also", "save(self): user = super(StrictAdminPasswordChangeForm, self).save() user.extra.password_expires = models.expires() user.extra.save() return user class StrictPasswordChangeForm(PasswordChangeForm):", "password class StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password form for editing a user\"\"\" password1 = forms.CharField( label=_(\"Password\"),", "from django.contrib.auth.forms import ( AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm) from django.utils.translation import ugettext_lazy as _", "routine in order to enforce stronger passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password)", "from django.contrib.admin.forms import AdminAuthenticationForm from django.contrib.auth.forms import ( AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm) from django.utils.translation", "class StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password form for editing a user\"\"\" password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput,", "uppercase letter, at least one lowercase letter, at least one number, and at", "= models.password_errors(password) # If password_validator returns errors, raise an error, else proceed. if", "editing a user\"\"\" password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements", "reuse a password\") # If password_validator returns errors, raise an error, else proceed.", "label=_(\"New password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at least 20 characters,", "for %s\", self.cleaned_data.get('username')) return cleaned except forms.ValidationError: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username'))", "help_text=_(\"\"\" Enter a password. Requirements include: at least 20 characters, at least one", "= models.password_errors(password) # Also check that this is a new password if self.user.check_password(self.cleaned_data['<PASSWORD>1']):", "is a new password if self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must not reuse a password\") # If", "password = self.cleaned_data['<PASSWORD>'] errors = models.password_errors(password) # Also check that this is a", "stronger passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password) # If password_validator returns errors,", "import forms from django.contrib.admin.forms import AdminAuthenticationForm from django.contrib.auth.forms import ( AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm)", "to enforce stronger passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password) # Also check", "= self.cleaned_data['password1'] errors = models.password_errors(password) # Also check that this is a new", "return password def save(self): user = super(StrictPasswordChangeForm, self).save() user.extra.password_expires = models.expires() user.extra.save() return", "clean(self): logger = logging.getLogger(\"peacecorps.login\") try: cleaned = super(LoggingAuthenticationForm, self).clean() if cleaned.get('password'): logger.info(\"%s successfully", "special character. \"\"\")) def clean_new_password1(self): \"\"\"Adds to the default password validation routine in", "self.cleaned_data.get('username')) return cleaned except forms.ValidationError: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) raise class", "passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password) # If password_validator returns errors, raise", "import ( AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm) from django.utils.translation import ugettext_lazy as _ from contenteditor", "errors: raise forms.ValidationError('\\n'.join(errors)) else: return password class StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password form for editing a", "Also check that this is a new password if self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must not reuse", "PasswordChangeForm, UserCreationForm) from django.utils.translation import ugettext_lazy as _ from contenteditor import models class", "self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must not reuse a password\") # If password_validator returns errors, raise an", "login form to log attempts\"\"\" def clean(self): logger = logging.getLogger(\"peacecorps.login\") try: cleaned =", "\"\"\"Adds to the default password validation routine in order to enforce stronger passwords\"\"\"", "a user\"\"\" password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include:", "default password validation routine in order to enforce stronger passwords\"\"\" password = self.cleaned_data['<PASSWORD>']", "def clean_new_password1(self): \"\"\"Adds to the default password validation routine in order to enforce", "return password def save(self): user = super(StrictAdminPasswordChangeForm, self).save() user.extra.password_expires = models.expires() user.extra.save() return", "def clean_password1(self): \"\"\"Adds to the default password validation routine in order to enforce", "self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must not reuse a password\") # If password_validator returns errors, raise an", "characters, at least one uppercase letter, at least one lowercase letter, at least", "to enforce stronger passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password) # If password_validator", "class StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password form residing at /admin/password_change\"\"\" new_password1 = forms.CharField( label=_(\"New password\"), widget=forms.PasswordInput,", "StrictUserCreationForm(UserCreationForm): password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at", "/admin/password_change\"\"\" new_password1 = forms.CharField( label=_(\"New password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include:", "contenteditor import models class LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override login form to log attempts\"\"\" def clean(self):", "django.contrib.admin.forms import AdminAuthenticationForm from django.contrib.auth.forms import ( AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm) from django.utils.translation import", "attempt for %s\", self.cleaned_data.get('username')) return cleaned except forms.ValidationError: logger.warn(\"Failed login attempt for %s\",", "self.cleaned_data.get('username')) raise class StrictUserCreationForm(UserCreationForm): password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password.", "\"\"\")) def clean_new_password1(self): \"\"\"Adds to the default password validation routine in order to", "Also check that this is a new password if self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must not reuse", "class LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override login form to log attempts\"\"\" def clean(self): logger = logging.getLogger(\"peacecorps.login\")", "LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override login form to log attempts\"\"\" def clean(self): logger = logging.getLogger(\"peacecorps.login\") try:", "stronger passwords\"\"\" password = self.cleaned_data['<PASSWORD>'] errors = models.password_errors(password) # Also check that this", "_ from contenteditor import models class LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override login form to log attempts\"\"\"", "an error, else proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password def save(self):", "models.expires() user.extra.save() return user class StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password form residing at /admin/password_change\"\"\" new_password1 =", "lowercase letter, at least one number, and at least one special character. \"\"\"))", "one special character. \"\"\")) def clean_new_password1(self): \"\"\"Adds to the default password validation routine", "errors: raise forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user = super(StrictAdminPasswordChangeForm, self).save() user.extra.password_expires", "forms.ValidationError: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) raise class StrictUserCreationForm(UserCreationForm): password1 = forms.CharField(", "as _ from contenteditor import models class LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override login form to log", "password if self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must not reuse a password\") # If password_validator returns errors,", "= models.expires() user.extra.save() return user class StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password form residing at /admin/password_change\"\"\" new_password1", "django.utils.translation import ugettext_lazy as _ from contenteditor import models class LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override login", "%s\", self.cleaned_data.get('username')) raise class StrictUserCreationForm(UserCreationForm): password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a", "# If password_validator returns errors, raise an error, else proceed. if errors: raise", "new password if self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must not reuse a password\") # If password_validator returns", "least 20 characters, at least one uppercase letter, at least one lowercase letter,", "StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password form for editing a user\"\"\" password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\"", "raise an error, else proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password class", "letter, at least one number, and at least one special character. \"\"\")) def", "password if self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must not reuse a password\") # If password_validator returns errors,", "returns errors, raise an error, else proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return", "AdminAuthenticationForm from django.contrib.auth.forms import ( AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm) from django.utils.translation import ugettext_lazy as", "check that this is a new password if self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must not reuse a", "= models.password_errors(password) # Also check that this is a new password if self.user.check_password(self.cleaned_data['password1']):", "for editing a user\"\"\" password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password.", "least one number, and at least one special character. \"\"\")) def clean_new_password1(self): \"\"\"Adds", "enforce stronger passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password) # If password_validator returns", "= super(LoggingAuthenticationForm, self).clean() if cleaned.get('password'): logger.info(\"%s successfully logged in\", self.cleaned_data['username']) else: logger.warn(\"Failed login", "routine in order to enforce stronger passwords\"\"\" password = self.cleaned_data['<PASSWORD>'] errors = models.password_errors(password)", "if self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must not reuse a password\") # If password_validator returns errors, raise", "django.contrib.auth.forms import ( AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm) from django.utils.translation import ugettext_lazy as _ from", "form to log attempts\"\"\" def clean(self): logger = logging.getLogger(\"peacecorps.login\") try: cleaned = super(LoggingAuthenticationForm,", "order to enforce stronger passwords\"\"\" password = self.cleaned_data['password1'] errors = models.password_errors(password) # If", "least one number, and at least one special character. \"\"\")) def clean_password1(self): \"\"\"Adds", "self.cleaned_data['username']) else: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) return cleaned except forms.ValidationError: logger.warn(\"Failed", "that this is a new password if self.user.check_password(self.cleaned_data['<PASSWORD>1']): errors.append(\"Must not reuse a password\")", "one uppercase letter, at least one lowercase letter, at least one number, and", "attempt for %s\", self.cleaned_data.get('username')) raise class StrictUserCreationForm(UserCreationForm): password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\"", "cleaned.get('password'): logger.info(\"%s successfully logged in\", self.cleaned_data['username']) else: logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username'))", "import AdminAuthenticationForm from django.contrib.auth.forms import ( AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm) from django.utils.translation import ugettext_lazy", "attempts\"\"\" def clean(self): logger = logging.getLogger(\"peacecorps.login\") try: cleaned = super(LoggingAuthenticationForm, self).clean() if cleaned.get('password'):", "letter, at least one lowercase letter, at least one number, and at least", "password validation routine in order to enforce stronger passwords\"\"\" password = self.cleaned_data['password1'] errors", "else proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password class StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password form", "forms.ValidationError('\\n'.join(errors)) else: return password class StrictAdminPasswordChangeForm(AdminPasswordChangeForm): \"\"\"Password form for editing a user\"\"\" password1", "else: return password def save(self): user = super(StrictAdminPasswordChangeForm, self).save() user.extra.password_expires = models.expires() user.extra.save()", "login attempt for %s\", self.cleaned_data.get('username')) return cleaned except forms.ValidationError: logger.warn(\"Failed login attempt for", "least one special character. \"\"\")) def clean_password1(self): \"\"\"Adds to the default password validation", "models class LoggingAuthenticationForm(AdminAuthenticationForm): \"\"\"Override login form to log attempts\"\"\" def clean(self): logger =", "to enforce stronger passwords\"\"\" password = self.cleaned_data['<PASSWORD>'] errors = models.password_errors(password) # Also check", "user.extra.save() return user class StrictPasswordChangeForm(PasswordChangeForm): \"\"\"Password form residing at /admin/password_change\"\"\" new_password1 = forms.CharField(", "a new password if self.user.check_password(self.cleaned_data['password1']): errors.append(\"Must not reuse a password\") # If password_validator", "and at least one special character. \"\"\")) def clean_new_password1(self): \"\"\"Adds to the default", "errors: raise forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user = super(StrictPasswordChangeForm, self).save() user.extra.password_expires", "at least one lowercase letter, at least one number, and at least one", "logger.warn(\"Failed login attempt for %s\", self.cleaned_data.get('username')) return cleaned except forms.ValidationError: logger.warn(\"Failed login attempt", "password def save(self): user = super(StrictPasswordChangeForm, self).save() user.extra.password_expires = models.expires() user.extra.save() return user", "default password validation routine in order to enforce stronger passwords\"\"\" password = self.cleaned_data['password1']", "at least one special character. \"\"\")) def clean_new_password1(self): \"\"\"Adds to the default password", "try: cleaned = super(LoggingAuthenticationForm, self).clean() if cleaned.get('password'): logger.info(\"%s successfully logged in\", self.cleaned_data['username']) else:", "else proceed. if errors: raise forms.ValidationError('\\n'.join(errors)) else: return password def save(self): user =", "user\"\"\" password1 = forms.CharField( label=_(\"Password\"), widget=forms.PasswordInput, help_text=_(\"\"\" Enter a password. Requirements include: at" ]
[ "= \"kalliochannel\" self.message = \"kalliomessage\" def testParameters(self): def run_test(parameters_to_test): with self.assertRaises(MissingParameterException): Slack(**parameters_to_test) #", "\"kalliochannel\" self.message = \"kalliomessage\" def testParameters(self): def run_test(parameters_to_test): with self.assertRaises(MissingParameterException): Slack(**parameters_to_test) # empty", "unittest from kalliope.core.NeuronModule import MissingParameterException from kalliope.neurons.slack.slack import Slack class TestSlack(unittest.TestCase): def setUp(self):", "run_test(parameters_to_test): with self.assertRaises(MissingParameterException): Slack(**parameters_to_test) # empty parameters = dict() run_test(parameters) # missing message", "self.channel, } run_test(parameters) # missing slack_token parameters = { \"channel\": self.channel, \"message\": self.message", "parameters = { \"channel\": self.channel, \"message\": self.message } run_test(parameters) # missing channel parameters", "empty parameters = dict() run_test(parameters) # missing message parameters = { \"slack_token\": self.slack_token,", "= \"kalliomessage\" def testParameters(self): def run_test(parameters_to_test): with self.assertRaises(MissingParameterException): Slack(**parameters_to_test) # empty parameters =", "message parameters = { \"slack_token\": self.slack_token, \"channel\": self.channel, } run_test(parameters) # missing slack_token", "dict() run_test(parameters) # missing message parameters = { \"slack_token\": self.slack_token, \"channel\": self.channel, }", "\"message\": self.message } run_test(parameters) # missing channel parameters = { \"slack_token\": self.slack_token, \"message\":", "missing message parameters = { \"slack_token\": self.slack_token, \"channel\": self.channel, } run_test(parameters) # missing", "{ \"slack_token\": self.slack_token, \"channel\": self.channel, } run_test(parameters) # missing slack_token parameters = {", "run_test(parameters) # missing channel parameters = { \"slack_token\": self.slack_token, \"message\": self.message } run_test(parameters)", "import MissingParameterException from kalliope.neurons.slack.slack import Slack class TestSlack(unittest.TestCase): def setUp(self): self.slack_token=\"<PASSWORD>\" self.channel =", "# missing slack_token parameters = { \"channel\": self.channel, \"message\": self.message } run_test(parameters) #", "\"channel\": self.channel, \"message\": self.message } run_test(parameters) # missing channel parameters = { \"slack_token\":", "self.channel, \"message\": self.message } run_test(parameters) # missing channel parameters = { \"slack_token\": self.slack_token,", "# missing message parameters = { \"slack_token\": self.slack_token, \"channel\": self.channel, } run_test(parameters) #", "def setUp(self): self.slack_token=\"<PASSWORD>\" self.channel = \"kalliochannel\" self.message = \"kalliomessage\" def testParameters(self): def run_test(parameters_to_test):", "kalliope.neurons.slack.slack import Slack class TestSlack(unittest.TestCase): def setUp(self): self.slack_token=\"<PASSWORD>\" self.channel = \"kalliochannel\" self.message =", "\"slack_token\": self.slack_token, \"channel\": self.channel, } run_test(parameters) # missing slack_token parameters = { \"channel\":", "def run_test(parameters_to_test): with self.assertRaises(MissingParameterException): Slack(**parameters_to_test) # empty parameters = dict() run_test(parameters) # missing", "Slack(**parameters_to_test) # empty parameters = dict() run_test(parameters) # missing message parameters = {", "kalliope.core.NeuronModule import MissingParameterException from kalliope.neurons.slack.slack import Slack class TestSlack(unittest.TestCase): def setUp(self): self.slack_token=\"<PASSWORD>\" self.channel", "# empty parameters = dict() run_test(parameters) # missing message parameters = { \"slack_token\":", "= { \"channel\": self.channel, \"message\": self.message } run_test(parameters) # missing channel parameters =", "setUp(self): self.slack_token=\"<PASSWORD>\" self.channel = \"kalliochannel\" self.message = \"kalliomessage\" def testParameters(self): def run_test(parameters_to_test): with", "MissingParameterException from kalliope.neurons.slack.slack import Slack class TestSlack(unittest.TestCase): def setUp(self): self.slack_token=\"<PASSWORD>\" self.channel = \"kalliochannel\"", "from kalliope.core.NeuronModule import MissingParameterException from kalliope.neurons.slack.slack import Slack class TestSlack(unittest.TestCase): def setUp(self): self.slack_token=\"<PASSWORD>\"", "self.slack_token, \"channel\": self.channel, } run_test(parameters) # missing slack_token parameters = { \"channel\": self.channel,", "testParameters(self): def run_test(parameters_to_test): with self.assertRaises(MissingParameterException): Slack(**parameters_to_test) # empty parameters = dict() run_test(parameters) #", "} run_test(parameters) # missing channel parameters = { \"slack_token\": self.slack_token, \"message\": self.message }", "slack_token parameters = { \"channel\": self.channel, \"message\": self.message } run_test(parameters) # missing channel", "} run_test(parameters) # missing slack_token parameters = { \"channel\": self.channel, \"message\": self.message }", "parameters = { \"slack_token\": self.slack_token, \"channel\": self.channel, } run_test(parameters) # missing slack_token parameters", "from kalliope.neurons.slack.slack import Slack class TestSlack(unittest.TestCase): def setUp(self): self.slack_token=\"<PASSWORD>\" self.channel = \"kalliochannel\" self.message", "self.slack_token=\"<PASSWORD>\" self.channel = \"kalliochannel\" self.message = \"kalliomessage\" def testParameters(self): def run_test(parameters_to_test): with self.assertRaises(MissingParameterException):", "run_test(parameters) # missing slack_token parameters = { \"channel\": self.channel, \"message\": self.message } run_test(parameters)", "class TestSlack(unittest.TestCase): def setUp(self): self.slack_token=\"<PASSWORD>\" self.channel = \"kalliochannel\" self.message = \"kalliomessage\" def testParameters(self):", "= dict() run_test(parameters) # missing message parameters = { \"slack_token\": self.slack_token, \"channel\": self.channel,", "TestSlack(unittest.TestCase): def setUp(self): self.slack_token=\"<PASSWORD>\" self.channel = \"kalliochannel\" self.message = \"kalliomessage\" def testParameters(self): def", "self.message = \"kalliomessage\" def testParameters(self): def run_test(parameters_to_test): with self.assertRaises(MissingParameterException): Slack(**parameters_to_test) # empty parameters", "self.assertRaises(MissingParameterException): Slack(**parameters_to_test) # empty parameters = dict() run_test(parameters) # missing message parameters =", "def testParameters(self): def run_test(parameters_to_test): with self.assertRaises(MissingParameterException): Slack(**parameters_to_test) # empty parameters = dict() run_test(parameters)", "run_test(parameters) # missing message parameters = { \"slack_token\": self.slack_token, \"channel\": self.channel, } run_test(parameters)", "with self.assertRaises(MissingParameterException): Slack(**parameters_to_test) # empty parameters = dict() run_test(parameters) # missing message parameters", "parameters = dict() run_test(parameters) # missing message parameters = { \"slack_token\": self.slack_token, \"channel\":", "Slack class TestSlack(unittest.TestCase): def setUp(self): self.slack_token=\"<PASSWORD>\" self.channel = \"kalliochannel\" self.message = \"kalliomessage\" def", "\"kalliomessage\" def testParameters(self): def run_test(parameters_to_test): with self.assertRaises(MissingParameterException): Slack(**parameters_to_test) # empty parameters = dict()", "= { \"slack_token\": self.slack_token, \"channel\": self.channel, } run_test(parameters) # missing slack_token parameters =", "import unittest from kalliope.core.NeuronModule import MissingParameterException from kalliope.neurons.slack.slack import Slack class TestSlack(unittest.TestCase): def", "missing slack_token parameters = { \"channel\": self.channel, \"message\": self.message } run_test(parameters) # missing", "self.channel = \"kalliochannel\" self.message = \"kalliomessage\" def testParameters(self): def run_test(parameters_to_test): with self.assertRaises(MissingParameterException): Slack(**parameters_to_test)", "{ \"channel\": self.channel, \"message\": self.message } run_test(parameters) # missing channel parameters = {", "self.message } run_test(parameters) # missing channel parameters = { \"slack_token\": self.slack_token, \"message\": self.message", "\"channel\": self.channel, } run_test(parameters) # missing slack_token parameters = { \"channel\": self.channel, \"message\":", "import Slack class TestSlack(unittest.TestCase): def setUp(self): self.slack_token=\"<PASSWORD>\" self.channel = \"kalliochannel\" self.message = \"kalliomessage\"" ]
[ "req = msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req) def List(): \"\"\"List instance configs in the", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "= msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append( msgs.ReplicaInfo(location=replica['location'], type=replica_type)) # TODO(b/399093071): Implement --replicas-file option. labels_message = {}", "= resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req) def", "replica['type'] == 'WITNESS': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append( msgs.ReplicaInfo(location=replica['location'], type=replica_type)) # TODO(b/399093071): Implement --replicas-file", "msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key, value=value) for key, value in six.iteritems(labels) ]) instance_config = msgs.InstanceConfig( name=config_ref.RelativeName(),", "'v1') ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName()) return", "= apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsGetRequest(", "replicas=replica_info) if etag: instance_config.etag = etag req = msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(), instanceConfigId=config, instanceConfig=instance_config, validateOnly=validate_only)", "this file except in compliance with the License. # You may obtain a", "import six def Get(config): \"\"\"Get the specified instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1')", "LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req) def List(): \"\"\"List instance configs in", "import apis from googlecloudsdk.command_lib.ai import errors from googlecloudsdk.command_lib.util.args import labels_util from googlecloudsdk.core import", "validate_only, labels=None, etag=None): \"\"\"Create instance configs in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1')", "not update_mask: raise errors.NoFieldsSpecifiedError('No updates requested.') req = msgs.SpannerProjectsInstanceConfigsPatchRequest( name=ref.RelativeName(), instanceConfig=instance_config, updateMask=','.join(update_mask), validateOnly=args.validate_only)", "specific language governing permissions and # limitations under the License. \"\"\"Spanner instanceConfigs API", "ANY KIND, either express or implied. # See the License for the specific", "project_ref = resources.REGISTRY.Create( 'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail) config_ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') replica_info", "= apis.GetMessagesModule('spanner', 'v1') project_ref = resources.REGISTRY.Create( 'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail) config_ref = resources.REGISTRY.Parse( config, params={'projectsId':", "from googlecloudsdk.core import resources import six def Get(config): \"\"\"Get the specified instance config.\"\"\"", "validateOnly=validate_only) return client.projects_instanceConfigs.Delete(req) def Create(config, display_name, base_config, replicas, validate_only, labels=None, etag=None): \"\"\"Create instance", "# TODO(b/399093071): Implement --replicas-file option. labels_message = {} if labels is not None:", "coding: utf-8 -*- # # Copyright 2016 Google LLC. All Rights Reserved. #", "to ReplicaInfo.TypeValueValuesEnum instead # of str. replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if replica['type'] == 'READ_ONLY':", "= msgs.InstanceConfig(name=ref.RelativeName()) update_mask = [] if args.display_name is not None: instance_config.displayName = args.display_name", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "instanceConfigs API helper.\"\"\" from __future__ import absolute_import from __future__ import division from __future__", "msgs.InstanceConfig.LabelsValue, GetLabels) if labels_update.needs_update: instance_config.labels = labels_update.labels update_mask.append('labels') if not update_mask: raise errors.NoFieldsSpecifiedError('No", "from __future__ import division from __future__ import unicode_literals from apitools.base.py import list_pager from", "params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(), etag=etag, validateOnly=validate_only) return client.projects_instanceConfigs.Delete(req) def Create(config,", "language governing permissions and # limitations under the License. \"\"\"Spanner instanceConfigs API helper.\"\"\"", "= msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req).labels labels_update = labels_util.ProcessUpdateArgsLazy( args, msgs.InstanceConfig.LabelsValue, GetLabels) if labels_update.needs_update: instance_config.labels", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "resources.REGISTRY.Parse( args.config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') instance_config = msgs.InstanceConfig(name=ref.RelativeName()) update_mask = [] if args.display_name", "OF ANY KIND, either express or implied. # See the License for the", "return list_pager.YieldFromList( client.projects_instanceConfigs, req, field='instanceConfigs', batch_size_attribute='pageSize') def Delete(config, etag=None, validate_only=False): \"\"\"Delete an instance", "validate_only=False): \"\"\"Delete an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1')", "client.projects_instanceConfigs.Create(req) def Patch(args): \"\"\"Update an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs =", "not None: labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key, value=value) for key, value in six.iteritems(labels)", "type=replica_type)) # TODO(b/399093071): Implement --replicas-file option. labels_message = {} if labels is not", "args.display_name update_mask.append('display_name') if args.etag is not None: instance_config.etag = args.etag def GetLabels(): req", "project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') project_ref = resources.REGISTRY.Create( 'spanner.projects',", "from googlecloudsdk.api_lib.util import apis from googlecloudsdk.command_lib.ai import errors from googlecloudsdk.command_lib.util.args import labels_util from", "parent='projects/'+properties.VALUES.core.project.GetOrFail()) return list_pager.YieldFromList( client.projects_instanceConfigs, req, field='instanceConfigs', batch_size_attribute='pageSize') def Delete(config, etag=None, validate_only=False): \"\"\"Delete an", "= labels_util.ProcessUpdateArgsLazy( args, msgs.InstanceConfig.LabelsValue, GetLabels) if labels_update.needs_update: instance_config.labels = labels_update.labels update_mask.append('labels') if not", "\"\"\"Get the specified instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1')", "None: labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key, value=value) for key, value in six.iteritems(labels) ])", "ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(), etag=etag, validateOnly=validate_only)", "client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( config, params={'projectsId':", "properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') instance_config = msgs.InstanceConfig(name=ref.RelativeName()) update_mask = [] if args.display_name is not None:", "update_mask: raise errors.NoFieldsSpecifiedError('No updates requested.') req = msgs.SpannerProjectsInstanceConfigsPatchRequest( name=ref.RelativeName(), instanceConfig=instance_config, updateMask=','.join(update_mask), validateOnly=args.validate_only) return", "'v1') project_ref = resources.REGISTRY.Create( 'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail) config_ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs')", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "= resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(), etag=etag, validateOnly=validate_only) return", "Create(config, display_name, base_config, replicas, validate_only, labels=None, etag=None): \"\"\"Create instance configs in the project.\"\"\"", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( args.config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') instance_config", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "ref = resources.REGISTRY.Parse( args.config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') instance_config = msgs.InstanceConfig(name=ref.RelativeName()) update_mask = []", "apis.GetMessagesModule('spanner', 'v1') project_ref = resources.REGISTRY.Create( 'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail) config_ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail},", "name=ref.RelativeName(), etag=etag, validateOnly=validate_only) return client.projects_instanceConfigs.Delete(req) def Create(config, display_name, base_config, replicas, validate_only, labels=None, etag=None):", "# of str. replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if replica['type'] == 'READ_ONLY': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY", "update_mask.append('labels') if not update_mask: raise errors.NoFieldsSpecifiedError('No updates requested.') req = msgs.SpannerProjectsInstanceConfigsPatchRequest( name=ref.RelativeName(), instanceConfig=instance_config,", "properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req) def List(): \"\"\"List instance configs", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "= msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail()) return list_pager.YieldFromList( client.projects_instanceConfigs, req, field='instanceConfigs', batch_size_attribute='pageSize') def Delete(config, etag=None, validate_only=False):", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref =", "= msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(), etag=etag, validateOnly=validate_only) return client.projects_instanceConfigs.Delete(req) def Create(config, display_name, base_config, replicas, validate_only,", "instanceConfigId=config, instanceConfig=instance_config, validateOnly=validate_only) return client.projects_instanceConfigs.Create(req) def Patch(args): \"\"\"Update an instance config.\"\"\" client =", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') instance_config = msgs.InstanceConfig(name=ref.RelativeName()) update_mask = [] if args.display_name is not", "labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key, value=value) for key, value in six.iteritems(labels) ]) instance_config", "args.config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') instance_config = msgs.InstanceConfig(name=ref.RelativeName()) update_mask = [] if args.display_name is", "required by applicable law or agreed to in writing, software # distributed under", "# -*- coding: utf-8 -*- # # Copyright 2016 Google LLC. All Rights", "# TODO(b/399093071): Change type to ReplicaInfo.TypeValueValuesEnum instead # of str. replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED", "args.etag is not None: instance_config.etag = args.etag def GetLabels(): req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return", "applicable law or agreed to in writing, software # distributed under the License", "args.etag def GetLabels(): req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req).labels labels_update = labels_util.ProcessUpdateArgsLazy( args, msgs.InstanceConfig.LabelsValue,", "\"\"\"List instance configs in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner',", "client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') req = msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail()) return", "or agreed to in writing, software # distributed under the License is distributed", "Copyright 2016 Google LLC. All Rights Reserved. # # Licensed under the Apache", "__future__ import absolute_import from __future__ import division from __future__ import unicode_literals from apitools.base.py", "[] for replica in replicas: # TODO(b/399093071): Change type to ReplicaInfo.TypeValueValuesEnum instead #", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "= labels_update.labels update_mask.append('labels') if not update_mask: raise errors.NoFieldsSpecifiedError('No updates requested.') req = msgs.SpannerProjectsInstanceConfigsPatchRequest(", "in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') req =", "configs in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') req", "config_ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') replica_info = [] for replica in", "in replicas: # TODO(b/399093071): Change type to ReplicaInfo.TypeValueValuesEnum instead # of str. replica_type", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "== 'READ_WRITE': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif replica['type'] == 'WITNESS': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append(", "TODO(b/399093071): Change type to ReplicaInfo.TypeValueValuesEnum instead # of str. replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if", "writing, software # distributed under the License is distributed on an \"AS IS\"", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "'WITNESS': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append( msgs.ReplicaInfo(location=replica['location'], type=replica_type)) # TODO(b/399093071): Implement --replicas-file option. labels_message", "License. # You may obtain a copy of the License at # #", "[] if args.display_name is not None: instance_config.displayName = args.display_name update_mask.append('display_name') if args.etag is", "list_pager.YieldFromList( client.projects_instanceConfigs, req, field='instanceConfigs', batch_size_attribute='pageSize') def Delete(config, etag=None, validate_only=False): \"\"\"Delete an instance config.\"\"\"", "apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs')", "labels_message = {} if labels is not None: labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key,", "client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') project_ref = resources.REGISTRY.Create( 'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail)", "return client.projects_instanceConfigs.Create(req) def Patch(args): \"\"\"Update an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs", "if replica['type'] == 'READ_ONLY': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif replica['type'] == 'READ_WRITE': replica_type =", "compliance with the License. # You may obtain a copy of the License", "limitations under the License. \"\"\"Spanner instanceConfigs API helper.\"\"\" from __future__ import absolute_import from", "__future__ import unicode_literals from apitools.base.py import list_pager from googlecloudsdk.api_lib.util import apis from googlecloudsdk.command_lib.ai", "field='instanceConfigs', batch_size_attribute='pageSize') def Delete(config, etag=None, validate_only=False): \"\"\"Delete an instance config.\"\"\" client = apis.GetClientInstance('spanner',", "under the License. \"\"\"Spanner instanceConfigs API helper.\"\"\" from __future__ import absolute_import from __future__", "req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req).labels labels_update = labels_util.ProcessUpdateArgsLazy( args, msgs.InstanceConfig.LabelsValue, GetLabels) if labels_update.needs_update:", "type to ReplicaInfo.TypeValueValuesEnum instead # of str. replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if replica['type'] ==", "baseConfig=base_config, labels=labels_message, replicas=replica_info) if etag: instance_config.etag = etag req = msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(), instanceConfigId=config,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "utf-8 -*- # # Copyright 2016 Google LLC. All Rights Reserved. # #", "= [] for replica in replicas: # TODO(b/399093071): Change type to ReplicaInfo.TypeValueValuesEnum instead", "= etag req = msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(), instanceConfigId=config, instanceConfig=instance_config, validateOnly=validate_only) return client.projects_instanceConfigs.Create(req) def Patch(args):", "is not None: instance_config.etag = args.etag def GetLabels(): req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req).labels", "msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( args.config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') instance_config =", "import list_pager from googlecloudsdk.api_lib.util import apis from googlecloudsdk.command_lib.ai import errors from googlecloudsdk.command_lib.util.args import", "config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') replica_info = [] for replica in replicas: # TODO(b/399093071):", "msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(), instanceConfigId=config, instanceConfig=instance_config, validateOnly=validate_only) return client.projects_instanceConfigs.Create(req) def Patch(args): \"\"\"Update an instance config.\"\"\"", "def Create(config, display_name, base_config, replicas, validate_only, labels=None, etag=None): \"\"\"Create instance configs in the", "import errors from googlecloudsdk.command_lib.util.args import labels_util from googlecloudsdk.core import properties from googlecloudsdk.core import", "not use this file except in compliance with the License. # You may", "-*- # # Copyright 2016 Google LLC. All Rights Reserved. # # Licensed", "License, Version 2.0 (the \"License\"); # you may not use this file except", "= msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif replica['type'] == 'READ_WRITE': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif replica['type'] == 'WITNESS':", "for replica in replicas: # TODO(b/399093071): Change type to ReplicaInfo.TypeValueValuesEnum instead # of", "= msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key, value=value) for key, value in six.iteritems(labels) ]) instance_config =", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( args.config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs')", "client.projects_instanceConfigs.Delete(req) def Create(config, display_name, base_config, replicas, validate_only, labels=None, etag=None): \"\"\"Create instance configs in", "list_pager from googlecloudsdk.api_lib.util import apis from googlecloudsdk.command_lib.ai import errors from googlecloudsdk.command_lib.util.args import labels_util", "'v1') msgs = apis.GetMessagesModule('spanner', 'v1') req = msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail()) return list_pager.YieldFromList( client.projects_instanceConfigs, req,", "# you may not use this file except in compliance with the License.", "def Delete(config, etag=None, validate_only=False): \"\"\"Delete an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs", "agreed to in writing, software # distributed under the License is distributed on", "return client.projects_instanceConfigs.Get(req).labels labels_update = labels_util.ProcessUpdateArgsLazy( args, msgs.InstanceConfig.LabelsValue, GetLabels) if labels_update.needs_update: instance_config.labels = labels_update.labels", "update_mask.append('display_name') if args.etag is not None: instance_config.etag = args.etag def GetLabels(): req =", "'v1') req = msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail()) return list_pager.YieldFromList( client.projects_instanceConfigs, req, field='instanceConfigs', batch_size_attribute='pageSize') def Delete(config,", "the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') project_ref = resources.REGISTRY.Create(", "(the \"License\"); # you may not use this file except in compliance with", "apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') project_ref = resources.REGISTRY.Create( 'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail) config_ref =", "display_name, base_config, replicas, validate_only, labels=None, etag=None): \"\"\"Create instance configs in the project.\"\"\" client", "labels_update = labels_util.ProcessUpdateArgsLazy( args, msgs.InstanceConfig.LabelsValue, GetLabels) if labels_update.needs_update: instance_config.labels = labels_update.labels update_mask.append('labels') if", "replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if replica['type'] == 'READ_ONLY': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif replica['type'] ==", "import labels_util from googlecloudsdk.core import properties from googlecloudsdk.core import resources import six def", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "apis from googlecloudsdk.command_lib.ai import errors from googlecloudsdk.command_lib.util.args import labels_util from googlecloudsdk.core import properties", "\"\"\"Delete an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref", "= resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') replica_info = [] for replica in replicas:", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append( msgs.ReplicaInfo(location=replica['location'], type=replica_type)) # TODO(b/399093071): Implement --replicas-file option. labels_message =", "= msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif replica['type'] == 'WITNESS': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append( msgs.ReplicaInfo(location=replica['location'], type=replica_type)) #", "apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') req = msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail()) return list_pager.YieldFromList( client.projects_instanceConfigs,", "from __future__ import unicode_literals from apitools.base.py import list_pager from googlecloudsdk.api_lib.util import apis from", "if args.display_name is not None: instance_config.displayName = args.display_name update_mask.append('display_name') if args.etag is not", "GetLabels(): req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req).labels labels_update = labels_util.ProcessUpdateArgsLazy( args, msgs.InstanceConfig.LabelsValue, GetLabels) if", "file except in compliance with the License. # You may obtain a copy", "= [] if args.display_name is not None: instance_config.displayName = args.display_name update_mask.append('display_name') if args.etag", "batch_size_attribute='pageSize') def Delete(config, etag=None, validate_only=False): \"\"\"Delete an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1')", "labels_update.needs_update: instance_config.labels = labels_update.labels update_mask.append('labels') if not update_mask: raise errors.NoFieldsSpecifiedError('No updates requested.') req", "etag req = msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(), instanceConfigId=config, instanceConfig=instance_config, validateOnly=validate_only) return client.projects_instanceConfigs.Create(req) def Patch(args): \"\"\"Update", "Implement --replicas-file option. labels_message = {} if labels is not None: labels_message =", "License for the specific language governing permissions and # limitations under the License.", "to in writing, software # distributed under the License is distributed on an", "in six.iteritems(labels) ]) instance_config = msgs.InstanceConfig( name=config_ref.RelativeName(), displayName=display_name, baseConfig=base_config, labels=labels_message, replicas=replica_info) if etag:", "= resources.REGISTRY.Parse( args.config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') instance_config = msgs.InstanceConfig(name=ref.RelativeName()) update_mask = [] if", "import absolute_import from __future__ import division from __future__ import unicode_literals from apitools.base.py import", "properties from googlecloudsdk.core import resources import six def Get(config): \"\"\"Get the specified instance", "Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "elif replica['type'] == 'READ_WRITE': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif replica['type'] == 'WITNESS': replica_type =", "config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( config,", "# limitations under the License. \"\"\"Spanner instanceConfigs API helper.\"\"\" from __future__ import absolute_import", "projectsId=properties.VALUES.core.project.GetOrFail) config_ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') replica_info = [] for replica", "{} if labels is not None: labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key, value=value) for", "etag=etag, validateOnly=validate_only) return client.projects_instanceConfigs.Delete(req) def Create(config, display_name, base_config, replicas, validate_only, labels=None, etag=None): \"\"\"Create", "replica['type'] == 'READ_ONLY': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif replica['type'] == 'READ_WRITE': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE", "permissions and # limitations under the License. \"\"\"Spanner instanceConfigs API helper.\"\"\" from __future__", "or implied. # See the License for the specific language governing permissions and", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "helper.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "specified instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref =", "parent=project_ref.RelativeName(), instanceConfigId=config, instanceConfig=instance_config, validateOnly=validate_only) return client.projects_instanceConfigs.Create(req) def Patch(args): \"\"\"Update an instance config.\"\"\" client", "= msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req) def List(): \"\"\"List instance configs in the project.\"\"\"", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(), etag=etag, validateOnly=validate_only) return client.projects_instanceConfigs.Delete(req) def Create(config, display_name, base_config, replicas, validate_only, labels=None,", "configs in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') project_ref", "instance_config.labels = labels_update.labels update_mask.append('labels') if not update_mask: raise errors.NoFieldsSpecifiedError('No updates requested.') req =", "collection='spanner.projects.instanceConfigs') instance_config = msgs.InstanceConfig(name=ref.RelativeName()) update_mask = [] if args.display_name is not None: instance_config.displayName", "def List(): \"\"\"List instance configs in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs", "from googlecloudsdk.core import properties from googlecloudsdk.core import resources import six def Get(config): \"\"\"Get", "resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(), etag=etag, validateOnly=validate_only) return client.projects_instanceConfigs.Delete(req)", "instance_config = msgs.InstanceConfig(name=ref.RelativeName()) update_mask = [] if args.display_name is not None: instance_config.displayName =", "req = msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(), instanceConfigId=config, instanceConfig=instance_config, validateOnly=validate_only) return client.projects_instanceConfigs.Create(req) def Patch(args): \"\"\"Update an", "instance_config.displayName = args.display_name update_mask.append('display_name') if args.etag is not None: instance_config.etag = args.etag def", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "Delete(config, etag=None, validate_only=False): \"\"\"Delete an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs =", "not None: instance_config.etag = args.etag def GetLabels(): req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req).labels labels_update", "you may not use this file except in compliance with the License. #", "\"\"\"Create instance configs in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner',", "= apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( args.config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') instance_config = msgs.InstanceConfig(name=ref.RelativeName())", "API helper.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import", "msgs.ReplicaInfo(location=replica['location'], type=replica_type)) # TODO(b/399093071): Implement --replicas-file option. labels_message = {} if labels is", "displayName=display_name, baseConfig=base_config, labels=labels_message, replicas=replica_info) if etag: instance_config.etag = etag req = msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(),", "= args.display_name update_mask.append('display_name') if args.etag is not None: instance_config.etag = args.etag def GetLabels():", "use this file except in compliance with the License. # You may obtain", "msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req) def List(): \"\"\"List instance configs in the project.\"\"\" client", "GetLabels) if labels_update.needs_update: instance_config.labels = labels_update.labels update_mask.append('labels') if not update_mask: raise errors.NoFieldsSpecifiedError('No updates", "None: instance_config.displayName = args.display_name update_mask.append('display_name') if args.etag is not None: instance_config.etag = args.etag", "replicas, validate_only, labels=None, etag=None): \"\"\"Create instance configs in the project.\"\"\" client = apis.GetClientInstance('spanner',", "= apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( args.config, params={'projectsId': properties.VALUES.core.project.GetOrFail},", "Patch(args): \"\"\"Update an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1')", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "msgs.InstanceConfig(name=ref.RelativeName()) update_mask = [] if args.display_name is not None: instance_config.displayName = args.display_name update_mask.append('display_name')", "the specific language governing permissions and # limitations under the License. \"\"\"Spanner instanceConfigs", "== 'WITNESS': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append( msgs.ReplicaInfo(location=replica['location'], type=replica_type)) # TODO(b/399093071): Implement --replicas-file option.", "msgs = apis.GetMessagesModule('spanner', 'v1') req = msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail()) return list_pager.YieldFromList( client.projects_instanceConfigs, req, field='instanceConfigs',", "the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') req = msgs.SpannerProjectsInstanceConfigsListRequest(", "2.0 (the \"License\"); # you may not use this file except in compliance", "collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(), etag=etag, validateOnly=validate_only) return client.projects_instanceConfigs.Delete(req) def Create(config, display_name, base_config,", "in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') project_ref =", "None: instance_config.etag = args.etag def GetLabels(): req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req).labels labels_update =", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "if labels_update.needs_update: instance_config.labels = labels_update.labels update_mask.append('labels') if not update_mask: raise errors.NoFieldsSpecifiedError('No updates requested.')", "apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(),", "apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( args.config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') instance_config = msgs.InstanceConfig(name=ref.RelativeName()) update_mask", "msgs = apis.GetMessagesModule('spanner', 'v1') project_ref = resources.REGISTRY.Create( 'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail) config_ref = resources.REGISTRY.Parse( config,", "governing permissions and # limitations under the License. \"\"\"Spanner instanceConfigs API helper.\"\"\" from", "# # Unless required by applicable law or agreed to in writing, software", "'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req", "\"\"\"Spanner instanceConfigs API helper.\"\"\" from __future__ import absolute_import from __future__ import division from", "express or implied. # See the License for the specific language governing permissions", "project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') req = msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail())", "def Get(config): \"\"\"Get the specified instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs =", "labels_update.labels update_mask.append('labels') if not update_mask: raise errors.NoFieldsSpecifiedError('No updates requested.') req = msgs.SpannerProjectsInstanceConfigsPatchRequest( name=ref.RelativeName(),", "msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req =", "config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req) def List(): \"\"\"List", "is not None: instance_config.displayName = args.display_name update_mask.append('display_name') if args.etag is not None: instance_config.etag", "config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(), etag=etag, validateOnly=validate_only) return client.projects_instanceConfigs.Delete(req) def", "msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if replica['type'] == 'READ_ONLY': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif replica['type'] == 'READ_WRITE': replica_type", "msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key, value=value) for key, value in six.iteritems(labels) ]) instance_config = msgs.InstanceConfig(", "etag: instance_config.etag = etag req = msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(), instanceConfigId=config, instanceConfig=instance_config, validateOnly=validate_only) return client.projects_instanceConfigs.Create(req)", "ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req)", "replica_info = [] for replica in replicas: # TODO(b/399093071): Change type to ReplicaInfo.TypeValueValuesEnum", "= args.etag def GetLabels(): req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req).labels labels_update = labels_util.ProcessUpdateArgsLazy( args,", "googlecloudsdk.core import properties from googlecloudsdk.core import resources import six def Get(config): \"\"\"Get the", "replica in replicas: # TODO(b/399093071): Change type to ReplicaInfo.TypeValueValuesEnum instead # of str.", "either express or implied. # See the License for the specific language governing", "googlecloudsdk.core import resources import six def Get(config): \"\"\"Get the specified instance config.\"\"\" client", "replica_info.append( msgs.ReplicaInfo(location=replica['location'], type=replica_type)) # TODO(b/399093071): Implement --replicas-file option. labels_message = {} if labels", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse(", "apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName())", "TODO(b/399093071): Implement --replicas-file option. labels_message = {} if labels is not None: labels_message", "import resources import six def Get(config): \"\"\"Get the specified instance config.\"\"\" client =", "replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif replica['type'] == 'WITNESS': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append( msgs.ReplicaInfo(location=replica['location'], type=replica_type))", "for the specific language governing permissions and # limitations under the License. \"\"\"Spanner", "# Copyright 2016 Google LLC. All Rights Reserved. # # Licensed under the", "'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail) config_ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') replica_info = [] for", "name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req) def List(): \"\"\"List instance configs in the project.\"\"\" client =", "the License. # You may obtain a copy of the License at #", "googlecloudsdk.api_lib.util import apis from googlecloudsdk.command_lib.ai import errors from googlecloudsdk.command_lib.util.args import labels_util from googlecloudsdk.core", "key=key, value=value) for key, value in six.iteritems(labels) ]) instance_config = msgs.InstanceConfig( name=config_ref.RelativeName(), displayName=display_name,", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "= apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') req = msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail()) return list_pager.YieldFromList(", "msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req).labels labels_update = labels_util.ProcessUpdateArgsLazy( args, msgs.InstanceConfig.LabelsValue, GetLabels) if labels_update.needs_update: instance_config.labels =", "instanceConfig=instance_config, validateOnly=validate_only) return client.projects_instanceConfigs.Create(req) def Patch(args): \"\"\"Update an instance config.\"\"\" client = apis.GetClientInstance('spanner',", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "instance_config.etag = args.etag def GetLabels(): req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req).labels labels_update = labels_util.ProcessUpdateArgsLazy(", "six.iteritems(labels) ]) instance_config = msgs.InstanceConfig( name=config_ref.RelativeName(), displayName=display_name, baseConfig=base_config, labels=labels_message, replicas=replica_info) if etag: instance_config.etag", "of str. replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if replica['type'] == 'READ_ONLY': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif", "msgs.InstanceConfig( name=config_ref.RelativeName(), displayName=display_name, baseConfig=base_config, labels=labels_message, replicas=replica_info) if etag: instance_config.etag = etag req =", "resources.REGISTRY.Create( 'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail) config_ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') replica_info = []", "properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(), etag=etag, validateOnly=validate_only) return client.projects_instanceConfigs.Delete(req) def Create(config, display_name,", "resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req) def List():", "= apis.GetMessagesModule('spanner', 'v1') req = msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail()) return list_pager.YieldFromList( client.projects_instanceConfigs, req, field='instanceConfigs', batch_size_attribute='pageSize')", "from apitools.base.py import list_pager from googlecloudsdk.api_lib.util import apis from googlecloudsdk.command_lib.ai import errors from", "= resources.REGISTRY.Create( 'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail) config_ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') replica_info =", "= apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail},", "with the License. # You may obtain a copy of the License at", "resources import six def Get(config): \"\"\"Get the specified instance config.\"\"\" client = apis.GetClientInstance('spanner',", "req = msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(), etag=etag, validateOnly=validate_only) return client.projects_instanceConfigs.Delete(req) def Create(config, display_name, base_config, replicas,", "replica['type'] == 'READ_WRITE': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif replica['type'] == 'WITNESS': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS", "client.projects_instanceConfigs.Get(req).labels labels_update = labels_util.ProcessUpdateArgsLazy( args, msgs.InstanceConfig.LabelsValue, GetLabels) if labels_update.needs_update: instance_config.labels = labels_update.labels update_mask.append('labels')", "elif replica['type'] == 'WITNESS': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append( msgs.ReplicaInfo(location=replica['location'], type=replica_type)) # TODO(b/399093071): Implement", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "the License. \"\"\"Spanner instanceConfigs API helper.\"\"\" from __future__ import absolute_import from __future__ import", "return client.projects_instanceConfigs.Delete(req) def Create(config, display_name, base_config, replicas, validate_only, labels=None, etag=None): \"\"\"Create instance configs", "req = msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail()) return list_pager.YieldFromList( client.projects_instanceConfigs, req, field='instanceConfigs', batch_size_attribute='pageSize') def Delete(config, etag=None,", "client.projects_instanceConfigs, req, field='instanceConfigs', batch_size_attribute='pageSize') def Delete(config, etag=None, validate_only=False): \"\"\"Delete an instance config.\"\"\" client", "'READ_ONLY': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif replica['type'] == 'READ_WRITE': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif replica['type']", "= msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if replica['type'] == 'READ_ONLY': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif replica['type'] == 'READ_WRITE':", "import unicode_literals from apitools.base.py import list_pager from googlecloudsdk.api_lib.util import apis from googlecloudsdk.command_lib.ai import", "law or agreed to in writing, software # distributed under the License is", "Change type to ReplicaInfo.TypeValueValuesEnum instead # of str. replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if replica['type']", "the License for the specific language governing permissions and # limitations under the", "__future__ import division from __future__ import unicode_literals from apitools.base.py import list_pager from googlecloudsdk.api_lib.util", "client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( args.config, params={'projectsId':", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif replica['type'] == 'WITNESS': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append( msgs.ReplicaInfo(location=replica['location'], type=replica_type)) # TODO(b/399093071):", "License. \"\"\"Spanner instanceConfigs API helper.\"\"\" from __future__ import absolute_import from __future__ import division", "ReplicaInfo.TypeValueValuesEnum instead # of str. replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if replica['type'] == 'READ_ONLY': replica_type", "'v1') ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsDeleteRequest( name=ref.RelativeName(), etag=etag,", "the specified instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref", "'v1') msgs = apis.GetMessagesModule('spanner', 'v1') project_ref = resources.REGISTRY.Create( 'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail) config_ref = resources.REGISTRY.Parse(", "if etag: instance_config.etag = etag req = msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(), instanceConfigId=config, instanceConfig=instance_config, validateOnly=validate_only) return", "is not None: labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key, value=value) for key, value in", "errors from googlecloudsdk.command_lib.util.args import labels_util from googlecloudsdk.core import properties from googlecloudsdk.core import resources", "labels=None, etag=None): \"\"\"Create instance configs in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs", "replicas: # TODO(b/399093071): Change type to ReplicaInfo.TypeValueValuesEnum instead # of str. replica_type =", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "apis.GetMessagesModule('spanner', 'v1') req = msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail()) return list_pager.YieldFromList( client.projects_instanceConfigs, req, field='instanceConfigs', batch_size_attribute='pageSize') def", "def GetLabels(): req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req).labels labels_update = labels_util.ProcessUpdateArgsLazy( args, msgs.InstanceConfig.LabelsValue, GetLabels)", "from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') replica_info = [] for replica in replicas: # TODO(b/399093071): Change type", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "if labels is not None: labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key, value=value) for key,", "def Patch(args): \"\"\"Update an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner',", "instance configs in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1')", "if args.etag is not None: instance_config.etag = args.etag def GetLabels(): req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName())", "See the License for the specific language governing permissions and # limitations under", "= msgs.InstanceConfig( name=config_ref.RelativeName(), displayName=display_name, baseConfig=base_config, labels=labels_message, replicas=replica_info) if etag: instance_config.etag = etag req", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "six def Get(config): \"\"\"Get the specified instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "msgs.SpannerProjectsInstanceConfigsListRequest( parent='projects/'+properties.VALUES.core.project.GetOrFail()) return list_pager.YieldFromList( client.projects_instanceConfigs, req, field='instanceConfigs', batch_size_attribute='pageSize') def Delete(config, etag=None, validate_only=False): \"\"\"Delete", "= apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') project_ref = resources.REGISTRY.Create( 'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail) config_ref", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "return client.projects_instanceConfigs.Get(req) def List(): \"\"\"List instance configs in the project.\"\"\" client = apis.GetClientInstance('spanner',", "googlecloudsdk.command_lib.util.args import labels_util from googlecloudsdk.core import properties from googlecloudsdk.core import resources import six", "args.display_name is not None: instance_config.displayName = args.display_name update_mask.append('display_name') if args.etag is not None:", "for key, value in six.iteritems(labels) ]) instance_config = msgs.InstanceConfig( name=config_ref.RelativeName(), displayName=display_name, baseConfig=base_config, labels=labels_message,", "= msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(), instanceConfigId=config, instanceConfig=instance_config, validateOnly=validate_only) return client.projects_instanceConfigs.Create(req) def Patch(args): \"\"\"Update an instance", "msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append( msgs.ReplicaInfo(location=replica['location'], type=replica_type)) # TODO(b/399093071): Implement --replicas-file option. labels_message = {} if", "== 'READ_ONLY': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif replica['type'] == 'READ_WRITE': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif", "validateOnly=validate_only) return client.projects_instanceConfigs.Create(req) def Patch(args): \"\"\"Update an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1')", "req, field='instanceConfigs', batch_size_attribute='pageSize') def Delete(config, etag=None, validate_only=False): \"\"\"Delete an instance config.\"\"\" client =", "base_config, replicas, validate_only, labels=None, etag=None): \"\"\"Create instance configs in the project.\"\"\" client =", "resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') replica_info = [] for replica in replicas: #", "instance_config.etag = etag req = msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(), instanceConfigId=config, instanceConfig=instance_config, validateOnly=validate_only) return client.projects_instanceConfigs.Create(req) def", "= {} if labels is not None: labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key, value=value)", "googlecloudsdk.command_lib.ai import errors from googlecloudsdk.command_lib.util.args import labels_util from googlecloudsdk.core import properties from googlecloudsdk.core", "labels=labels_message, replicas=replica_info) if etag: instance_config.etag = etag req = msgs.SpannerProjectsInstanceConfigsCreateRequest( parent=project_ref.RelativeName(), instanceConfigId=config, instanceConfig=instance_config,", "value=value) for key, value in six.iteritems(labels) ]) instance_config = msgs.InstanceConfig( name=config_ref.RelativeName(), displayName=display_name, baseConfig=base_config,", "key, value in six.iteritems(labels) ]) instance_config = msgs.InstanceConfig( name=config_ref.RelativeName(), displayName=display_name, baseConfig=base_config, labels=labels_message, replicas=replica_info)", "args, msgs.InstanceConfig.LabelsValue, GetLabels) if labels_update.needs_update: instance_config.labels = labels_update.labels update_mask.append('labels') if not update_mask: raise", "collection='spanner.projects.instanceConfigs') replica_info = [] for replica in replicas: # TODO(b/399093071): Change type to", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "-*- coding: utf-8 -*- # # Copyright 2016 Google LLC. All Rights Reserved.", "if not update_mask: raise errors.NoFieldsSpecifiedError('No updates requested.') req = msgs.SpannerProjectsInstanceConfigsPatchRequest( name=ref.RelativeName(), instanceConfig=instance_config, updateMask=','.join(update_mask),", "config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( args.config,", "client.projects_instanceConfigs.Get(req) def List(): \"\"\"List instance configs in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1')", "'v1') ref = resources.REGISTRY.Parse( args.config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') instance_config = msgs.InstanceConfig(name=ref.RelativeName()) update_mask =", "value in six.iteritems(labels) ]) instance_config = msgs.InstanceConfig( name=config_ref.RelativeName(), displayName=display_name, baseConfig=base_config, labels=labels_message, replicas=replica_info) if", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "= apis.GetMessagesModule('spanner', 'v1') ref = resources.REGISTRY.Parse( config, params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsDeleteRequest(", "labels_util.ProcessUpdateArgsLazy( args, msgs.InstanceConfig.LabelsValue, GetLabels) if labels_update.needs_update: instance_config.labels = labels_update.labels update_mask.append('labels') if not update_mask:", "and # limitations under the License. \"\"\"Spanner instanceConfigs API helper.\"\"\" from __future__ import", "params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') replica_info = [] for replica in replicas: # TODO(b/399093071): Change", "import properties from googlecloudsdk.core import resources import six def Get(config): \"\"\"Get the specified", "name=config_ref.RelativeName(), displayName=display_name, baseConfig=base_config, labels=labels_message, replicas=replica_info) if etag: instance_config.etag = etag req = msgs.SpannerProjectsInstanceConfigsCreateRequest(", "msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif replica['type'] == 'READ_WRITE': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif replica['type'] == 'WITNESS': replica_type", "2016 Google LLC. All Rights Reserved. # # Licensed under the Apache License,", "import division from __future__ import unicode_literals from apitools.base.py import list_pager from googlecloudsdk.api_lib.util import", "update_mask = [] if args.display_name is not None: instance_config.displayName = args.display_name update_mask.append('display_name') if", "--replicas-file option. labels_message = {} if labels is not None: labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[", "Get(config): \"\"\"Get the specified instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner',", "params={'projectsId': properties.VALUES.core.project.GetOrFail}, collection='spanner.projects.instanceConfigs') req = msgs.SpannerProjectsInstanceConfigsGetRequest( name=ref.RelativeName()) return client.projects_instanceConfigs.Get(req) def List(): \"\"\"List instance", "List(): \"\"\"List instance configs in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs =", "instead # of str. replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if replica['type'] == 'READ_ONLY': replica_type =", "instance_config = msgs.InstanceConfig( name=config_ref.RelativeName(), displayName=display_name, baseConfig=base_config, labels=labels_message, replicas=replica_info) if etag: instance_config.etag = etag", "from googlecloudsdk.command_lib.ai import errors from googlecloudsdk.command_lib.util.args import labels_util from googlecloudsdk.core import properties from", "unicode_literals from apitools.base.py import list_pager from googlecloudsdk.api_lib.util import apis from googlecloudsdk.command_lib.ai import errors", "absolute_import from __future__ import division from __future__ import unicode_literals from apitools.base.py import list_pager", "labels is not None: labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty( key=key, value=value) for key, value", "etag=None, validate_only=False): \"\"\"Delete an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner',", "not None: instance_config.displayName = args.display_name update_mask.append('display_name') if args.etag is not None: instance_config.etag =", "# # Copyright 2016 Google LLC. All Rights Reserved. # # Licensed under", "from googlecloudsdk.command_lib.util.args import labels_util from googlecloudsdk.core import properties from googlecloudsdk.core import resources import", "division from __future__ import unicode_literals from apitools.base.py import list_pager from googlecloudsdk.api_lib.util import apis", "option. labels_message = {} if labels is not None: labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[ msgs.InstanceConfig.LabelsValue.AdditionalProperty(", "'READ_WRITE': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif replica['type'] == 'WITNESS': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS replica_info.append( msgs.ReplicaInfo(location=replica['location'],", "]) instance_config = msgs.InstanceConfig( name=config_ref.RelativeName(), displayName=display_name, baseConfig=base_config, labels=labels_message, replicas=replica_info) if etag: instance_config.etag =", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "str. replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED if replica['type'] == 'READ_ONLY': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif replica['type']", "labels_util from googlecloudsdk.core import properties from googlecloudsdk.core import resources import six def Get(config):", "raise errors.NoFieldsSpecifiedError('No updates requested.') req = msgs.SpannerProjectsInstanceConfigsPatchRequest( name=ref.RelativeName(), instanceConfig=instance_config, updateMask=','.join(update_mask), validateOnly=args.validate_only) return client.projects_instanceConfigs.Patch(req)", "\"\"\"Update an instance config.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs = apis.GetMessagesModule('spanner', 'v1') ref", "replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY elif replica['type'] == 'READ_WRITE': replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE elif replica['type'] ==", "etag=None): \"\"\"Create instance configs in the project.\"\"\" client = apis.GetClientInstance('spanner', 'v1') msgs =", "apitools.base.py import list_pager from googlecloudsdk.api_lib.util import apis from googlecloudsdk.command_lib.ai import errors from googlecloudsdk.command_lib.util.args" ]
[ "{\"ANT\": \"walk\", \"BEE\": \"flies\", \"CAT\": \"meows\", \"DOG\": \"barks\"} Animal = Enum(\"Animal\", {k: k", "import Enum from typing import Optional import uvicorn from fastapi import FastAPI, Query", "doug = \"doug\" @app.get(\"/hello\") async def hello(name: NameEnum = Query(NameEnum.empty, alias=\"first_name\")): return \"hello", "= Enum(\"Animal\", {k: k for k in animals_dict}) @app.get(\"/animal\") async def animal(anim: Optional[Animal]", "k in animals_dict}) @app.get(\"/animal\") async def animal(anim: Optional[Animal] = Query(None)): if anim is", "\"\" bob = \"bob\" doug = \"doug\" @app.get(\"/hello\") async def hello(name: NameEnum =", "+ name.value animals_dict = {\"ANT\": \"walk\", \"BEE\": \"flies\", \"CAT\": \"meows\", \"DOG\": \"barks\"} Animal", "Query(None)): if anim is not None: return anim.name else: return \"not found\" if", "enum import Enum from typing import Optional import uvicorn from fastapi import FastAPI,", "None: return anim.name else: return \"not found\" if __name__ == \"__main__\": uvicorn.run(app, host=\"0.0.0.0\",", "return anim.name else: return \"not found\" if __name__ == \"__main__\": uvicorn.run(app, host=\"0.0.0.0\", port=8000)", "in animals_dict}) @app.get(\"/animal\") async def animal(anim: Optional[Animal] = Query(None)): if anim is not", "{k: k for k in animals_dict}) @app.get(\"/animal\") async def animal(anim: Optional[Animal] = Query(None)):", "async def hello(name: NameEnum = Query(NameEnum.empty, alias=\"first_name\")): return \"hello \" + name.value animals_dict", "k for k in animals_dict}) @app.get(\"/animal\") async def animal(anim: Optional[Animal] = Query(None)): if", "Query(NameEnum.empty, alias=\"first_name\")): return \"hello \" + name.value animals_dict = {\"ANT\": \"walk\", \"BEE\": \"flies\",", "hello(name: NameEnum = Query(NameEnum.empty, alias=\"first_name\")): return \"hello \" + name.value animals_dict = {\"ANT\":", "NameEnum = Query(NameEnum.empty, alias=\"first_name\")): return \"hello \" + name.value animals_dict = {\"ANT\": \"walk\",", "@app.get(\"/hello\") async def hello(name: NameEnum = Query(NameEnum.empty, alias=\"first_name\")): return \"hello \" + name.value", "<reponame>falkben/fastapi_experiments<gh_stars>1-10 from enum import Enum from typing import Optional import uvicorn from fastapi", "import FastAPI, Query app = FastAPI() class NameEnum(str, Enum): empty = \"\" bob", "\"CAT\": \"meows\", \"DOG\": \"barks\"} Animal = Enum(\"Animal\", {k: k for k in animals_dict})", "return \"hello \" + name.value animals_dict = {\"ANT\": \"walk\", \"BEE\": \"flies\", \"CAT\": \"meows\",", "= FastAPI() class NameEnum(str, Enum): empty = \"\" bob = \"bob\" doug =", "bob = \"bob\" doug = \"doug\" @app.get(\"/hello\") async def hello(name: NameEnum = Query(NameEnum.empty,", "\"meows\", \"DOG\": \"barks\"} Animal = Enum(\"Animal\", {k: k for k in animals_dict}) @app.get(\"/animal\")", "FastAPI, Query app = FastAPI() class NameEnum(str, Enum): empty = \"\" bob =", "\" + name.value animals_dict = {\"ANT\": \"walk\", \"BEE\": \"flies\", \"CAT\": \"meows\", \"DOG\": \"barks\"}", "is not None: return anim.name else: return \"not found\" if __name__ == \"__main__\":", "name.value animals_dict = {\"ANT\": \"walk\", \"BEE\": \"flies\", \"CAT\": \"meows\", \"DOG\": \"barks\"} Animal =", "uvicorn from fastapi import FastAPI, Query app = FastAPI() class NameEnum(str, Enum): empty", "Animal = Enum(\"Animal\", {k: k for k in animals_dict}) @app.get(\"/animal\") async def animal(anim:", "anim is not None: return anim.name else: return \"not found\" if __name__ ==", "class NameEnum(str, Enum): empty = \"\" bob = \"bob\" doug = \"doug\" @app.get(\"/hello\")", "Query app = FastAPI() class NameEnum(str, Enum): empty = \"\" bob = \"bob\"", "empty = \"\" bob = \"bob\" doug = \"doug\" @app.get(\"/hello\") async def hello(name:", "Optional[Animal] = Query(None)): if anim is not None: return anim.name else: return \"not", "from fastapi import FastAPI, Query app = FastAPI() class NameEnum(str, Enum): empty =", "if anim is not None: return anim.name else: return \"not found\" if __name__", "= \"\" bob = \"bob\" doug = \"doug\" @app.get(\"/hello\") async def hello(name: NameEnum", "FastAPI() class NameEnum(str, Enum): empty = \"\" bob = \"bob\" doug = \"doug\"", "animals_dict = {\"ANT\": \"walk\", \"BEE\": \"flies\", \"CAT\": \"meows\", \"DOG\": \"barks\"} Animal = Enum(\"Animal\",", "= \"bob\" doug = \"doug\" @app.get(\"/hello\") async def hello(name: NameEnum = Query(NameEnum.empty, alias=\"first_name\")):", "\"walk\", \"BEE\": \"flies\", \"CAT\": \"meows\", \"DOG\": \"barks\"} Animal = Enum(\"Animal\", {k: k for", "animal(anim: Optional[Animal] = Query(None)): if anim is not None: return anim.name else: return", "def hello(name: NameEnum = Query(NameEnum.empty, alias=\"first_name\")): return \"hello \" + name.value animals_dict =", "Enum from typing import Optional import uvicorn from fastapi import FastAPI, Query app", "for k in animals_dict}) @app.get(\"/animal\") async def animal(anim: Optional[Animal] = Query(None)): if anim", "not None: return anim.name else: return \"not found\" if __name__ == \"__main__\": uvicorn.run(app,", "from enum import Enum from typing import Optional import uvicorn from fastapi import", "@app.get(\"/animal\") async def animal(anim: Optional[Animal] = Query(None)): if anim is not None: return", "\"hello \" + name.value animals_dict = {\"ANT\": \"walk\", \"BEE\": \"flies\", \"CAT\": \"meows\", \"DOG\":", "\"BEE\": \"flies\", \"CAT\": \"meows\", \"DOG\": \"barks\"} Animal = Enum(\"Animal\", {k: k for k", "from typing import Optional import uvicorn from fastapi import FastAPI, Query app =", "\"doug\" @app.get(\"/hello\") async def hello(name: NameEnum = Query(NameEnum.empty, alias=\"first_name\")): return \"hello \" +", "alias=\"first_name\")): return \"hello \" + name.value animals_dict = {\"ANT\": \"walk\", \"BEE\": \"flies\", \"CAT\":", "Enum(\"Animal\", {k: k for k in animals_dict}) @app.get(\"/animal\") async def animal(anim: Optional[Animal] =", "\"flies\", \"CAT\": \"meows\", \"DOG\": \"barks\"} Animal = Enum(\"Animal\", {k: k for k in", "import uvicorn from fastapi import FastAPI, Query app = FastAPI() class NameEnum(str, Enum):", "NameEnum(str, Enum): empty = \"\" bob = \"bob\" doug = \"doug\" @app.get(\"/hello\") async", "import Optional import uvicorn from fastapi import FastAPI, Query app = FastAPI() class", "app = FastAPI() class NameEnum(str, Enum): empty = \"\" bob = \"bob\" doug", "= Query(NameEnum.empty, alias=\"first_name\")): return \"hello \" + name.value animals_dict = {\"ANT\": \"walk\", \"BEE\":", "= {\"ANT\": \"walk\", \"BEE\": \"flies\", \"CAT\": \"meows\", \"DOG\": \"barks\"} Animal = Enum(\"Animal\", {k:", "\"barks\"} Animal = Enum(\"Animal\", {k: k for k in animals_dict}) @app.get(\"/animal\") async def", "def animal(anim: Optional[Animal] = Query(None)): if anim is not None: return anim.name else:", "animals_dict}) @app.get(\"/animal\") async def animal(anim: Optional[Animal] = Query(None)): if anim is not None:", "Enum): empty = \"\" bob = \"bob\" doug = \"doug\" @app.get(\"/hello\") async def", "fastapi import FastAPI, Query app = FastAPI() class NameEnum(str, Enum): empty = \"\"", "typing import Optional import uvicorn from fastapi import FastAPI, Query app = FastAPI()", "\"bob\" doug = \"doug\" @app.get(\"/hello\") async def hello(name: NameEnum = Query(NameEnum.empty, alias=\"first_name\")): return", "= Query(None)): if anim is not None: return anim.name else: return \"not found\"", "Optional import uvicorn from fastapi import FastAPI, Query app = FastAPI() class NameEnum(str,", "= \"doug\" @app.get(\"/hello\") async def hello(name: NameEnum = Query(NameEnum.empty, alias=\"first_name\")): return \"hello \"", "\"DOG\": \"barks\"} Animal = Enum(\"Animal\", {k: k for k in animals_dict}) @app.get(\"/animal\") async", "async def animal(anim: Optional[Animal] = Query(None)): if anim is not None: return anim.name" ]
[ "True))[0] except IndexError: return autocomplete.__version__ return pkg.version version = autocomplete.get_mercurial_version() or get_metadata_version() setup(", "= 'http://bitbucket.org/tyrion/django-autocomplete', download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages = ['autocomplete'], include_package_data = True, classifiers =", "= ['autocomplete'], include_package_data = True, classifiers = [ 'Development Status :: 4 -", "[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework ::", "def get_metadata_version(): \"\"\" Tries to get the version from the django_autocomplete.egg-info directory. \"\"\"", "= '<EMAIL>', url = 'http://bitbucket.org/tyrion/django-autocomplete', download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages = ['autocomplete'], include_package_data =", "or get_metadata_version() setup( name = 'django-autocomplete', version = version, description = 'autocomplete utilities", "get the version from the django_autocomplete.egg-info directory. \"\"\" try: pkg = list(pkg_resources.find_distributions('.', True))[0]", "\"\"\" Tries to get the version from the django_autocomplete.egg-info directory. \"\"\" try: pkg", "'http://bitbucket.org/tyrion/django-autocomplete', download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages = ['autocomplete'], include_package_data = True, classifiers = [", "from setuptools import setup import pkg_resources import autocomplete def get_metadata_version(): \"\"\" Tries to", "import setup import pkg_resources import autocomplete def get_metadata_version(): \"\"\" Tries to get the", ":: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience", "'<NAME>', author_email = '<EMAIL>', url = 'http://bitbucket.org/tyrion/django-autocomplete', download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages = ['autocomplete'],", "include_package_data = True, classifiers = [ 'Development Status :: 4 - Beta', 'Environment", ":: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic", "'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Utilities' ],", "'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming", "['autocomplete'], include_package_data = True, classifiers = [ 'Development Status :: 4 - Beta',", "classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Web Environment',", ":: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License',", "return autocomplete.__version__ return pkg.version version = autocomplete.get_mercurial_version() or get_metadata_version() setup( name = 'django-autocomplete',", "import pkg_resources import autocomplete def get_metadata_version(): \"\"\" Tries to get the version from", "- Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers',", "the version from the django_autocomplete.egg-info directory. \"\"\" try: pkg = list(pkg_resources.find_distributions('.', True))[0] except", "= 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages = ['autocomplete'], include_package_data = True, classifiers = [ 'Development Status", "setup import pkg_resources import autocomplete def get_metadata_version(): \"\"\" Tries to get the version", "'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT", ":: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS", "version from the django_autocomplete.egg-info directory. \"\"\" try: pkg = list(pkg_resources.find_distributions('.', True))[0] except IndexError:", "'django-autocomplete', version = version, description = 'autocomplete utilities for django', author = '<NAME>',", "setup( name = 'django-autocomplete', version = version, description = 'autocomplete utilities for django',", "for django', author = '<NAME>', author_email = '<EMAIL>', url = 'http://bitbucket.org/tyrion/django-autocomplete', download_url =", "Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating", "name = 'django-autocomplete', version = version, description = 'autocomplete utilities for django', author", "django_autocomplete.egg-info directory. \"\"\" try: pkg = list(pkg_resources.find_distributions('.', True))[0] except IndexError: return autocomplete.__version__ return", "list(pkg_resources.find_distributions('.', True))[0] except IndexError: return autocomplete.__version__ return pkg.version version = autocomplete.get_mercurial_version() or get_metadata_version()", "Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System ::", "get_metadata_version(): \"\"\" Tries to get the version from the django_autocomplete.egg-info directory. \"\"\" try:", "= '<NAME>', author_email = '<EMAIL>', url = 'http://bitbucket.org/tyrion/django-autocomplete', download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages =", "MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic ::", "'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System", "setuptools import setup import pkg_resources import autocomplete def get_metadata_version(): \"\"\" Tries to get", ":: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI", "'<EMAIL>', url = 'http://bitbucket.org/tyrion/django-autocomplete', download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages = ['autocomplete'], include_package_data = True,", "version = autocomplete.get_mercurial_version() or get_metadata_version() setup( name = 'django-autocomplete', version = version, description", "download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages = ['autocomplete'], include_package_data = True, classifiers = [ 'Development", "directory. \"\"\" try: pkg = list(pkg_resources.find_distributions('.', True))[0] except IndexError: return autocomplete.__version__ return pkg.version", "'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages = ['autocomplete'], include_package_data = True, classifiers = [ 'Development Status ::", "Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended", "Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python',", "autocomplete.get_mercurial_version() or get_metadata_version() setup( name = 'django-autocomplete', version = version, description = 'autocomplete", "author_email = '<EMAIL>', url = 'http://bitbucket.org/tyrion/django-autocomplete', download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages = ['autocomplete'], include_package_data", "try: pkg = list(pkg_resources.find_distributions('.', True))[0] except IndexError: return autocomplete.__version__ return pkg.version version =", "autocomplete.__version__ return pkg.version version = autocomplete.get_mercurial_version() or get_metadata_version() setup( name = 'django-autocomplete', version", "OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language ::", "pkg.version version = autocomplete.get_mercurial_version() or get_metadata_version() setup( name = 'django-autocomplete', version = version,", "'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License ::", "True, classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Web", "Tries to get the version from the django_autocomplete.egg-info directory. \"\"\" try: pkg =", "4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience ::", "to get the version from the django_autocomplete.egg-info directory. \"\"\" try: pkg = list(pkg_resources.find_distributions('.',", "Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent',", "from the django_autocomplete.egg-info directory. \"\"\" try: pkg = list(pkg_resources.find_distributions('.', True))[0] except IndexError: return", "return pkg.version version = autocomplete.get_mercurial_version() or get_metadata_version() setup( name = 'django-autocomplete', version =", "= 'autocomplete utilities for django', author = '<NAME>', author_email = '<EMAIL>', url =", "django', author = '<NAME>', author_email = '<EMAIL>', url = 'http://bitbucket.org/tyrion/django-autocomplete', download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads',", "= 'django-autocomplete', version = version, description = 'autocomplete utilities for django', author =", "\"\"\" try: pkg = list(pkg_resources.find_distributions('.', True))[0] except IndexError: return autocomplete.__version__ return pkg.version version", "author = '<NAME>', author_email = '<EMAIL>', url = 'http://bitbucket.org/tyrion/django-autocomplete', download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages", "Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License", "utilities for django', author = '<NAME>', author_email = '<EMAIL>', url = 'http://bitbucket.org/tyrion/django-autocomplete', download_url", "version = version, description = 'autocomplete utilities for django', author = '<NAME>', author_email", "= version, description = 'autocomplete utilities for django', author = '<NAME>', author_email =", "License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Utilities'", "import autocomplete def get_metadata_version(): \"\"\" Tries to get the version from the django_autocomplete.egg-info", "pkg = list(pkg_resources.find_distributions('.', True))[0] except IndexError: return autocomplete.__version__ return pkg.version version = autocomplete.get_mercurial_version()", "get_metadata_version() setup( name = 'django-autocomplete', version = version, description = 'autocomplete utilities for", "version, description = 'autocomplete utilities for django', author = '<NAME>', author_email = '<EMAIL>',", "= True, classifiers = [ 'Development Status :: 4 - Beta', 'Environment ::", "autocomplete def get_metadata_version(): \"\"\" Tries to get the version from the django_autocomplete.egg-info directory.", "description = 'autocomplete utilities for django', author = '<NAME>', author_email = '<EMAIL>', url", "'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django',", "packages = ['autocomplete'], include_package_data = True, classifiers = [ 'Development Status :: 4", "= autocomplete.get_mercurial_version() or get_metadata_version() setup( name = 'django-autocomplete', version = version, description =", "pkg_resources import autocomplete def get_metadata_version(): \"\"\" Tries to get the version from the", "= list(pkg_resources.find_distributions('.', True))[0] except IndexError: return autocomplete.__version__ return pkg.version version = autocomplete.get_mercurial_version() or", "System :: OS Independent', 'Programming Language :: Python', 'Topic :: Utilities' ], )", ":: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language", "Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved ::", "the django_autocomplete.egg-info directory. \"\"\" try: pkg = list(pkg_resources.find_distributions('.', True))[0] except IndexError: return autocomplete.__version__", "= [ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework", "except IndexError: return autocomplete.__version__ return pkg.version version = autocomplete.get_mercurial_version() or get_metadata_version() setup( name", "Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved", "IndexError: return autocomplete.__version__ return pkg.version version = autocomplete.get_mercurial_version() or get_metadata_version() setup( name =", "url = 'http://bitbucket.org/tyrion/django-autocomplete', download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages = ['autocomplete'], include_package_data = True, classifiers", "'autocomplete utilities for django', author = '<NAME>', author_email = '<EMAIL>', url = 'http://bitbucket.org/tyrion/django-autocomplete'," ]
[ "\"\"\" style_paths = [ plots.styles(s) for s in plots.get_available_styles()] assert all(os.path.isfile(s) for s", "sizes.\"\"\" ret = plots.figsize(10) assert ret == (10, 6.1803398874989481) def test_get_subplot_arrangement(self): \"\"\"Test the", "determination of subplot arrangements.\"\"\" shape = plots.get_subplot_arrangement(8) assert shape == (3, 3) def", "ratio for figures sizes.\"\"\" ret = plots.figsize(10) assert ret == (10, 6.1803398874989481) def", "\"\"\"Testing the plot functions.\"\"\" def test_figsize(self): \"\"\"Test golden ratio for figures sizes.\"\"\" ret", "shape = plots.get_subplot_arrangement(8) assert shape == (3, 3) def test_get_available_styles(self): \"\"\"Check matplotlib stylesheet", "consinstency of the in and outputs of styles() and get_available_styles(). \"\"\" style_paths =", "TestPlots: \"\"\"Testing the plot functions.\"\"\" def test_figsize(self): \"\"\"Test golden ratio for figures sizes.\"\"\"", "functions in typhon.plots. \"\"\" import os from typhon import plots class TestPlots: \"\"\"Testing", "the functions in typhon.plots. \"\"\" import os from typhon import plots class TestPlots:", "utf-8 -*- \"\"\"Testing the functions in typhon.plots. \"\"\" import os from typhon import", "coding: utf-8 -*- \"\"\"Testing the functions in typhon.plots. \"\"\" import os from typhon", "the consinstency of the in and outputs of styles() and get_available_styles(). \"\"\" style_paths", "os from typhon import plots class TestPlots: \"\"\"Testing the plot functions.\"\"\" def test_figsize(self):", "= plots.figsize(10) assert ret == (10, 6.1803398874989481) def test_get_subplot_arrangement(self): \"\"\"Test the determination of", "style_paths = [ plots.styles(s) for s in plots.get_available_styles()] assert all(os.path.isfile(s) for s in", "typhon.plots. \"\"\" import os from typhon import plots class TestPlots: \"\"\"Testing the plot", "class TestPlots: \"\"\"Testing the plot functions.\"\"\" def test_figsize(self): \"\"\"Test golden ratio for figures", "for figures sizes.\"\"\" ret = plots.figsize(10) assert ret == (10, 6.1803398874989481) def test_get_subplot_arrangement(self):", "def test_get_subplot_arrangement(self): \"\"\"Test the determination of subplot arrangements.\"\"\" shape = plots.get_subplot_arrangement(8) assert shape", "arrangements.\"\"\" shape = plots.get_subplot_arrangement(8) assert shape == (3, 3) def test_get_available_styles(self): \"\"\"Check matplotlib", "# -*- coding: utf-8 -*- \"\"\"Testing the functions in typhon.plots. \"\"\" import os", "(10, 6.1803398874989481) def test_get_subplot_arrangement(self): \"\"\"Test the determination of subplot arrangements.\"\"\" shape = plots.get_subplot_arrangement(8)", "import plots class TestPlots: \"\"\"Testing the plot functions.\"\"\" def test_figsize(self): \"\"\"Test golden ratio", "subplot arrangements.\"\"\" shape = plots.get_subplot_arrangement(8) assert shape == (3, 3) def test_get_available_styles(self): \"\"\"Check", "(3, 3) def test_get_available_styles(self): \"\"\"Check matplotlib stylesheet paths. This test checks the consinstency", "the in and outputs of styles() and get_available_styles(). \"\"\" style_paths = [ plots.styles(s)", "matplotlib stylesheet paths. This test checks the consinstency of the in and outputs", "6.1803398874989481) def test_get_subplot_arrangement(self): \"\"\"Test the determination of subplot arrangements.\"\"\" shape = plots.get_subplot_arrangement(8) assert", "= [ plots.styles(s) for s in plots.get_available_styles()] assert all(os.path.isfile(s) for s in style_paths)", "checks the consinstency of the in and outputs of styles() and get_available_styles(). \"\"\"", "= plots.get_subplot_arrangement(8) assert shape == (3, 3) def test_get_available_styles(self): \"\"\"Check matplotlib stylesheet paths.", "golden ratio for figures sizes.\"\"\" ret = plots.figsize(10) assert ret == (10, 6.1803398874989481)", "assert ret == (10, 6.1803398874989481) def test_get_subplot_arrangement(self): \"\"\"Test the determination of subplot arrangements.\"\"\"", "from typhon import plots class TestPlots: \"\"\"Testing the plot functions.\"\"\" def test_figsize(self): \"\"\"Test", "test checks the consinstency of the in and outputs of styles() and get_available_styles().", "outputs of styles() and get_available_styles(). \"\"\" style_paths = [ plots.styles(s) for s in", "test_figsize(self): \"\"\"Test golden ratio for figures sizes.\"\"\" ret = plots.figsize(10) assert ret ==", "\"\"\"Test golden ratio for figures sizes.\"\"\" ret = plots.figsize(10) assert ret == (10,", "-*- \"\"\"Testing the functions in typhon.plots. \"\"\" import os from typhon import plots", "of the in and outputs of styles() and get_available_styles(). \"\"\" style_paths = [", "styles() and get_available_styles(). \"\"\" style_paths = [ plots.styles(s) for s in plots.get_available_styles()] assert", "-*- coding: utf-8 -*- \"\"\"Testing the functions in typhon.plots. \"\"\" import os from", "test_get_available_styles(self): \"\"\"Check matplotlib stylesheet paths. This test checks the consinstency of the in", "of subplot arrangements.\"\"\" shape = plots.get_subplot_arrangement(8) assert shape == (3, 3) def test_get_available_styles(self):", "This test checks the consinstency of the in and outputs of styles() and", "in and outputs of styles() and get_available_styles(). \"\"\" style_paths = [ plots.styles(s) for", "== (10, 6.1803398874989481) def test_get_subplot_arrangement(self): \"\"\"Test the determination of subplot arrangements.\"\"\" shape =", "typhon import plots class TestPlots: \"\"\"Testing the plot functions.\"\"\" def test_figsize(self): \"\"\"Test golden", "\"\"\"Testing the functions in typhon.plots. \"\"\" import os from typhon import plots class", "and outputs of styles() and get_available_styles(). \"\"\" style_paths = [ plots.styles(s) for s", "def test_get_available_styles(self): \"\"\"Check matplotlib stylesheet paths. This test checks the consinstency of the", "in typhon.plots. \"\"\" import os from typhon import plots class TestPlots: \"\"\"Testing the", "functions.\"\"\" def test_figsize(self): \"\"\"Test golden ratio for figures sizes.\"\"\" ret = plots.figsize(10) assert", "the determination of subplot arrangements.\"\"\" shape = plots.get_subplot_arrangement(8) assert shape == (3, 3)", "stylesheet paths. This test checks the consinstency of the in and outputs of", "the plot functions.\"\"\" def test_figsize(self): \"\"\"Test golden ratio for figures sizes.\"\"\" ret =", "ret == (10, 6.1803398874989481) def test_get_subplot_arrangement(self): \"\"\"Test the determination of subplot arrangements.\"\"\" shape", "def test_figsize(self): \"\"\"Test golden ratio for figures sizes.\"\"\" ret = plots.figsize(10) assert ret", "plot functions.\"\"\" def test_figsize(self): \"\"\"Test golden ratio for figures sizes.\"\"\" ret = plots.figsize(10)", "figures sizes.\"\"\" ret = plots.figsize(10) assert ret == (10, 6.1803398874989481) def test_get_subplot_arrangement(self): \"\"\"Test", "test_get_subplot_arrangement(self): \"\"\"Test the determination of subplot arrangements.\"\"\" shape = plots.get_subplot_arrangement(8) assert shape ==", "== (3, 3) def test_get_available_styles(self): \"\"\"Check matplotlib stylesheet paths. This test checks the", "get_available_styles(). \"\"\" style_paths = [ plots.styles(s) for s in plots.get_available_styles()] assert all(os.path.isfile(s) for", "import os from typhon import plots class TestPlots: \"\"\"Testing the plot functions.\"\"\" def", "\"\"\"Test the determination of subplot arrangements.\"\"\" shape = plots.get_subplot_arrangement(8) assert shape == (3,", "ret = plots.figsize(10) assert ret == (10, 6.1803398874989481) def test_get_subplot_arrangement(self): \"\"\"Test the determination", "and get_available_styles(). \"\"\" style_paths = [ plots.styles(s) for s in plots.get_available_styles()] assert all(os.path.isfile(s)", "shape == (3, 3) def test_get_available_styles(self): \"\"\"Check matplotlib stylesheet paths. This test checks", "assert shape == (3, 3) def test_get_available_styles(self): \"\"\"Check matplotlib stylesheet paths. This test", "plots.figsize(10) assert ret == (10, 6.1803398874989481) def test_get_subplot_arrangement(self): \"\"\"Test the determination of subplot", "3) def test_get_available_styles(self): \"\"\"Check matplotlib stylesheet paths. This test checks the consinstency of", "plots.get_subplot_arrangement(8) assert shape == (3, 3) def test_get_available_styles(self): \"\"\"Check matplotlib stylesheet paths. This", "of styles() and get_available_styles(). \"\"\" style_paths = [ plots.styles(s) for s in plots.get_available_styles()]", "\"\"\"Check matplotlib stylesheet paths. This test checks the consinstency of the in and", "paths. This test checks the consinstency of the in and outputs of styles()", "\"\"\" import os from typhon import plots class TestPlots: \"\"\"Testing the plot functions.\"\"\"", "plots class TestPlots: \"\"\"Testing the plot functions.\"\"\" def test_figsize(self): \"\"\"Test golden ratio for" ]
[ "toolkit.get_action('package_search')({}, {'rows': 1}) if result.get('count'): count = result.get('count') return count def get_hero_images(): resources", "plugins.implements(plugins.ITemplateHelpers) # IConfigurer def update_config(self, config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('fanstatic', 'heroslider') #", "log = logging.getLogger(__name__) def dataset_count(): \"\"\"Return a count of all datasets\"\"\" count =", "2.7 and later log = logging.getLogger(__name__) def dataset_count(): \"\"\"Return a count of all", "'public') toolkit.add_resource('fanstatic', 'heroslider') # ITemplateHelpers def get_helpers(self): return { 'hero_dataset_count': dataset_count, 'get_hero_images': get_hero_images,", "config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('fanstatic', 'heroslider') # ITemplateHelpers def get_helpers(self): return {", "return resources class HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers) # IConfigurer def update_config(self, config_): toolkit.add_template_directory(config_, 'templates')", "resources = [] try: package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images') result = toolkit.get_action('package_show')({}, {'id': package_id})", "['JPEG','PNG']: if item.get('url'): resources.append(item.get('url')) except: log.debug('Getting Hero images failed') return resources class HerosliderPlugin(plugins.SingletonPlugin):", "IConfigurer def update_config(self, config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('fanstatic', 'heroslider') # ITemplateHelpers def", "count = result.get('count') return count def get_hero_images(): resources = [] try: package_id =", "logging.getLogger(__name__) def dataset_count(): \"\"\"Return a count of all datasets\"\"\" count = 0 result", "if item.get('url'): resources.append(item.get('url')) except: log.debug('Getting Hero images failed') return resources class HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer)", "get_hero_images(): resources = [] try: package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images') result = toolkit.get_action('package_show')({}, {'id':", "logging try: from ckan.common import config # CKAN 2.7 and later except ImportError:", "# CKAN 2.7 and later except ImportError: from pylons import config # CKAN", "package_id}) resource_list = result.get('resources') for item in resource_list: if item.get('format') in ['JPEG','PNG']: if", "import ckan.plugins as plugins import ckan.plugins.toolkit as toolkit import logging try: from ckan.common", "in ['JPEG','PNG']: if item.get('url'): resources.append(item.get('url')) except: log.debug('Getting Hero images failed') return resources class", "import logging try: from ckan.common import config # CKAN 2.7 and later except", "import config # CKAN 2.7 and later except ImportError: from pylons import config", "from pylons import config # CKAN 2.7 and later log = logging.getLogger(__name__) def", "resources class HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers) # IConfigurer def update_config(self, config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_,", "update_config(self, config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('fanstatic', 'heroslider') # ITemplateHelpers def get_helpers(self): return", "plugins import ckan.plugins.toolkit as toolkit import logging try: from ckan.common import config #", "return count def get_hero_images(): resources = [] try: package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images') result", "'hero-slider-images') result = toolkit.get_action('package_show')({}, {'id': package_id}) resource_list = result.get('resources') for item in resource_list:", "class HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers) # IConfigurer def update_config(self, config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public')", "log.debug('Getting Hero images failed') return resources class HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers) # IConfigurer def", "# CKAN 2.7 and later log = logging.getLogger(__name__) def dataset_count(): \"\"\"Return a count", "and later log = logging.getLogger(__name__) def dataset_count(): \"\"\"Return a count of all datasets\"\"\"", "{'id': package_id}) resource_list = result.get('resources') for item in resource_list: if item.get('format') in ['JPEG','PNG']:", "= 0 result = toolkit.get_action('package_search')({}, {'rows': 1}) if result.get('count'): count = result.get('count') return", "resource_list: if item.get('format') in ['JPEG','PNG']: if item.get('url'): resources.append(item.get('url')) except: log.debug('Getting Hero images failed')", "item.get('url'): resources.append(item.get('url')) except: log.debug('Getting Hero images failed') return resources class HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers)", "\"\"\"Return a count of all datasets\"\"\" count = 0 result = toolkit.get_action('package_search')({}, {'rows':", "2.7 and later except ImportError: from pylons import config # CKAN 2.7 and", "1}) if result.get('count'): count = result.get('count') return count def get_hero_images(): resources = []", "config # CKAN 2.7 and later except ImportError: from pylons import config #", "and later except ImportError: from pylons import config # CKAN 2.7 and later", "images failed') return resources class HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers) # IConfigurer def update_config(self, config_):", "resources.append(item.get('url')) except: log.debug('Getting Hero images failed') return resources class HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers) #", "def get_hero_images(): resources = [] try: package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images') result = toolkit.get_action('package_show')({},", "# encoding: utf-8 import ckan.plugins as plugins import ckan.plugins.toolkit as toolkit import logging", "try: package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images') result = toolkit.get_action('package_show')({}, {'id': package_id}) resource_list = result.get('resources')", "HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers) # IConfigurer def update_config(self, config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('fanstatic',", "result.get('count'): count = result.get('count') return count def get_hero_images(): resources = [] try: package_id", "package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images') result = toolkit.get_action('package_show')({}, {'id': package_id}) resource_list = result.get('resources') for", "config.get('ckanext.heroslider.package_id', 'hero-slider-images') result = toolkit.get_action('package_show')({}, {'id': package_id}) resource_list = result.get('resources') for item in", "CKAN 2.7 and later except ImportError: from pylons import config # CKAN 2.7", "toolkit.add_public_directory(config_, 'public') toolkit.add_resource('fanstatic', 'heroslider') # ITemplateHelpers def get_helpers(self): return { 'hero_dataset_count': dataset_count, 'get_hero_images':", "from ckan.common import config # CKAN 2.7 and later except ImportError: from pylons", "of all datasets\"\"\" count = 0 result = toolkit.get_action('package_search')({}, {'rows': 1}) if result.get('count'):", "config # CKAN 2.7 and later log = logging.getLogger(__name__) def dataset_count(): \"\"\"Return a", "item in resource_list: if item.get('format') in ['JPEG','PNG']: if item.get('url'): resources.append(item.get('url')) except: log.debug('Getting Hero", "result.get('count') return count def get_hero_images(): resources = [] try: package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images')", "= result.get('count') return count def get_hero_images(): resources = [] try: package_id = config.get('ckanext.heroslider.package_id',", "datasets\"\"\" count = 0 result = toolkit.get_action('package_search')({}, {'rows': 1}) if result.get('count'): count =", "for item in resource_list: if item.get('format') in ['JPEG','PNG']: if item.get('url'): resources.append(item.get('url')) except: log.debug('Getting", "except: log.debug('Getting Hero images failed') return resources class HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers) # IConfigurer", "# IConfigurer def update_config(self, config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('fanstatic', 'heroslider') # ITemplateHelpers", "ckan.plugins.toolkit as toolkit import logging try: from ckan.common import config # CKAN 2.7", "dataset_count(): \"\"\"Return a count of all datasets\"\"\" count = 0 result = toolkit.get_action('package_search')({},", "except ImportError: from pylons import config # CKAN 2.7 and later log =", "count = 0 result = toolkit.get_action('package_search')({}, {'rows': 1}) if result.get('count'): count = result.get('count')", "toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('fanstatic', 'heroslider') # ITemplateHelpers def get_helpers(self): return { 'hero_dataset_count':", "failed') return resources class HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers) # IConfigurer def update_config(self, config_): toolkit.add_template_directory(config_,", "result = toolkit.get_action('package_search')({}, {'rows': 1}) if result.get('count'): count = result.get('count') return count def", "import ckan.plugins.toolkit as toolkit import logging try: from ckan.common import config # CKAN", "import config # CKAN 2.7 and later log = logging.getLogger(__name__) def dataset_count(): \"\"\"Return", "[] try: package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images') result = toolkit.get_action('package_show')({}, {'id': package_id}) resource_list =", "in resource_list: if item.get('format') in ['JPEG','PNG']: if item.get('url'): resources.append(item.get('url')) except: log.debug('Getting Hero images", "count def get_hero_images(): resources = [] try: package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images') result =", "all datasets\"\"\" count = 0 result = toolkit.get_action('package_search')({}, {'rows': 1}) if result.get('count'): count", "try: from ckan.common import config # CKAN 2.7 and later except ImportError: from", "CKAN 2.7 and later log = logging.getLogger(__name__) def dataset_count(): \"\"\"Return a count of", "result = toolkit.get_action('package_show')({}, {'id': package_id}) resource_list = result.get('resources') for item in resource_list: if", "later except ImportError: from pylons import config # CKAN 2.7 and later log", "toolkit.get_action('package_show')({}, {'id': package_id}) resource_list = result.get('resources') for item in resource_list: if item.get('format') in", "utf-8 import ckan.plugins as plugins import ckan.plugins.toolkit as toolkit import logging try: from", "= logging.getLogger(__name__) def dataset_count(): \"\"\"Return a count of all datasets\"\"\" count = 0", "0 result = toolkit.get_action('package_search')({}, {'rows': 1}) if result.get('count'): count = result.get('count') return count", "= toolkit.get_action('package_show')({}, {'id': package_id}) resource_list = result.get('resources') for item in resource_list: if item.get('format')", "pylons import config # CKAN 2.7 and later log = logging.getLogger(__name__) def dataset_count():", "plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers) # IConfigurer def update_config(self, config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('fanstatic', 'heroslider')", "if result.get('count'): count = result.get('count') return count def get_hero_images(): resources = [] try:", "{'rows': 1}) if result.get('count'): count = result.get('count') return count def get_hero_images(): resources =", "resource_list = result.get('resources') for item in resource_list: if item.get('format') in ['JPEG','PNG']: if item.get('url'):", "item.get('format') in ['JPEG','PNG']: if item.get('url'): resources.append(item.get('url')) except: log.debug('Getting Hero images failed') return resources", "= [] try: package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images') result = toolkit.get_action('package_show')({}, {'id': package_id}) resource_list", "as toolkit import logging try: from ckan.common import config # CKAN 2.7 and", "ImportError: from pylons import config # CKAN 2.7 and later log = logging.getLogger(__name__)", "= config.get('ckanext.heroslider.package_id', 'hero-slider-images') result = toolkit.get_action('package_show')({}, {'id': package_id}) resource_list = result.get('resources') for item", "Hero images failed') return resources class HerosliderPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer) plugins.implements(plugins.ITemplateHelpers) # IConfigurer def update_config(self,", "later log = logging.getLogger(__name__) def dataset_count(): \"\"\"Return a count of all datasets\"\"\" count", "count of all datasets\"\"\" count = 0 result = toolkit.get_action('package_search')({}, {'rows': 1}) if", "toolkit.add_resource('fanstatic', 'heroslider') # ITemplateHelpers def get_helpers(self): return { 'hero_dataset_count': dataset_count, 'get_hero_images': get_hero_images, }", "def update_config(self, config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('fanstatic', 'heroslider') # ITemplateHelpers def get_helpers(self):", "result.get('resources') for item in resource_list: if item.get('format') in ['JPEG','PNG']: if item.get('url'): resources.append(item.get('url')) except:", "if item.get('format') in ['JPEG','PNG']: if item.get('url'): resources.append(item.get('url')) except: log.debug('Getting Hero images failed') return", "ckan.common import config # CKAN 2.7 and later except ImportError: from pylons import", "toolkit import logging try: from ckan.common import config # CKAN 2.7 and later", "a count of all datasets\"\"\" count = 0 result = toolkit.get_action('package_search')({}, {'rows': 1})", "as plugins import ckan.plugins.toolkit as toolkit import logging try: from ckan.common import config", "def dataset_count(): \"\"\"Return a count of all datasets\"\"\" count = 0 result =", "= result.get('resources') for item in resource_list: if item.get('format') in ['JPEG','PNG']: if item.get('url'): resources.append(item.get('url'))", "'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('fanstatic', 'heroslider') # ITemplateHelpers def get_helpers(self): return { 'hero_dataset_count': dataset_count,", "encoding: utf-8 import ckan.plugins as plugins import ckan.plugins.toolkit as toolkit import logging try:", "ckan.plugins as plugins import ckan.plugins.toolkit as toolkit import logging try: from ckan.common import", "= toolkit.get_action('package_search')({}, {'rows': 1}) if result.get('count'): count = result.get('count') return count def get_hero_images():" ]
[ "= logging.getLogger('graphsignal') class GraphsignalTest(unittest.TestCase): def setUp(self): if len(logger.handlers) == 0: logger.addHandler(logging.StreamHandler(sys.stdout)) graphsignal.configure(api_key='k1', debug_mode=True)", "class GraphsignalTest(unittest.TestCase): def setUp(self): if len(logger.handlers) == 0: logger.addHandler(logging.StreamHandler(sys.stdout)) graphsignal.configure(api_key='k1', debug_mode=True) def tearDown(self):", "GraphsignalTest(unittest.TestCase): def setUp(self): if len(logger.handlers) == 0: logger.addHandler(logging.StreamHandler(sys.stdout)) graphsignal.configure(api_key='k1', debug_mode=True) def tearDown(self): graphsignal.shutdown()", "logger = logging.getLogger('graphsignal') class GraphsignalTest(unittest.TestCase): def setUp(self): if len(logger.handlers) == 0: logger.addHandler(logging.StreamHandler(sys.stdout)) graphsignal.configure(api_key='k1',", "import patch, Mock import graphsignal logger = logging.getLogger('graphsignal') class GraphsignalTest(unittest.TestCase): def setUp(self): if", "if len(logger.handlers) == 0: logger.addHandler(logging.StreamHandler(sys.stdout)) graphsignal.configure(api_key='k1', debug_mode=True) def tearDown(self): graphsignal.shutdown() def test_configure(self): self.assertEqual(graphsignal._get_config().api_key,", "logging import sys from unittest.mock import patch, Mock import graphsignal logger = logging.getLogger('graphsignal')", "== 0: logger.addHandler(logging.StreamHandler(sys.stdout)) graphsignal.configure(api_key='k1', debug_mode=True) def tearDown(self): graphsignal.shutdown() def test_configure(self): self.assertEqual(graphsignal._get_config().api_key, 'k1') self.assertEqual(graphsignal._get_config().debug_mode,", "import logging import sys from unittest.mock import patch, Mock import graphsignal logger =", "graphsignal logger = logging.getLogger('graphsignal') class GraphsignalTest(unittest.TestCase): def setUp(self): if len(logger.handlers) == 0: logger.addHandler(logging.StreamHandler(sys.stdout))", "setUp(self): if len(logger.handlers) == 0: logger.addHandler(logging.StreamHandler(sys.stdout)) graphsignal.configure(api_key='k1', debug_mode=True) def tearDown(self): graphsignal.shutdown() def test_configure(self):", "unittest.mock import patch, Mock import graphsignal logger = logging.getLogger('graphsignal') class GraphsignalTest(unittest.TestCase): def setUp(self):", "Mock import graphsignal logger = logging.getLogger('graphsignal') class GraphsignalTest(unittest.TestCase): def setUp(self): if len(logger.handlers) ==", "def setUp(self): if len(logger.handlers) == 0: logger.addHandler(logging.StreamHandler(sys.stdout)) graphsignal.configure(api_key='k1', debug_mode=True) def tearDown(self): graphsignal.shutdown() def", "import unittest import logging import sys from unittest.mock import patch, Mock import graphsignal", "logging.getLogger('graphsignal') class GraphsignalTest(unittest.TestCase): def setUp(self): if len(logger.handlers) == 0: logger.addHandler(logging.StreamHandler(sys.stdout)) graphsignal.configure(api_key='k1', debug_mode=True) def", "0: logger.addHandler(logging.StreamHandler(sys.stdout)) graphsignal.configure(api_key='k1', debug_mode=True) def tearDown(self): graphsignal.shutdown() def test_configure(self): self.assertEqual(graphsignal._get_config().api_key, 'k1') self.assertEqual(graphsignal._get_config().debug_mode, True)", "<filename>graphsignal/graphsignal_test.py import unittest import logging import sys from unittest.mock import patch, Mock import", "import graphsignal logger = logging.getLogger('graphsignal') class GraphsignalTest(unittest.TestCase): def setUp(self): if len(logger.handlers) == 0:", "sys from unittest.mock import patch, Mock import graphsignal logger = logging.getLogger('graphsignal') class GraphsignalTest(unittest.TestCase):", "from unittest.mock import patch, Mock import graphsignal logger = logging.getLogger('graphsignal') class GraphsignalTest(unittest.TestCase): def", "import sys from unittest.mock import patch, Mock import graphsignal logger = logging.getLogger('graphsignal') class", "patch, Mock import graphsignal logger = logging.getLogger('graphsignal') class GraphsignalTest(unittest.TestCase): def setUp(self): if len(logger.handlers)", "len(logger.handlers) == 0: logger.addHandler(logging.StreamHandler(sys.stdout)) graphsignal.configure(api_key='k1', debug_mode=True) def tearDown(self): graphsignal.shutdown() def test_configure(self): self.assertEqual(graphsignal._get_config().api_key, 'k1')", "unittest import logging import sys from unittest.mock import patch, Mock import graphsignal logger" ]
[ "Exception(g) class PathBuilder: def __init__(self, objects): self.objects = objects self.objects_by_id = {o['id']:o for", "= [] parent['children_ids'].append(o['id']) self.__build_path_helper__(parent) parent_ids = parent['ids'] parent_names = parent['names'] o['ids'] = parent_ids", "g in groups: if g['groupMemberCount'] != len(g['groupMembers']): raise Exception(g) class PathBuilder: def __init__(self,", "parent_names = parent['names'] o['ids'] = parent_ids + [o['id']] o['names'] = parent_names + [o['name']]", "g['groupMembers'] = [] for m in group_members: group = groups_by_id[m['workspaceGroupId']] user = users_by_id[m['workspaceUserId']]", "= objects self.objects_by_id = {o['id']:o for o in objects} def get_object(self, object_id): return", "in o: parent_ids = [] parent_names = [] if self.has_parent(o): parent = self.get_parent(o)", "objects): self.objects = objects self.objects_by_id = {o['id']:o for o in objects} def get_object(self,", "'children_ids' not in parent: parent['children_ids'] = [] parent['children_ids'].append(o['id']) self.__build_path_helper__(parent) parent_ids = parent['ids'] parent_names", "in users} groups_by_id = {g['id']:g for g in groups} for u in users:", "parent: parent['children_ids'] = [] parent['children_ids'].append(o['id']) self.__build_path_helper__(parent) parent_ids = parent['ids'] parent_names = parent['names'] o['ids']", "For educational purpose only \"\"\" def associate_users_and_groups(users, groups, group_members): users_by_id = {u['id']:u for", "= [] for g in groups: g['groupMembers'] = [] for m in group_members:", "def __init__(self, objects): self.objects = objects self.objects_by_id = {o['id']:o for o in objects}", "self.has_parent(o): parent = self.get_parent(o) if 'children_ids' not in parent: parent['children_ids'] = [] parent['children_ids'].append(o['id'])", "parent['children_ids'] = [] parent['children_ids'].append(o['id']) self.__build_path_helper__(parent) parent_ids = parent['ids'] parent_names = parent['names'] o['ids'] =", "in group_members: group = groups_by_id[m['workspaceGroupId']] user = users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id']) user['groups'].append(group['id']) for g in", "u in users: u['groups'] = [] for g in groups: g['groupMembers'] = []", "\"\"\" def associate_users_and_groups(users, groups, group_members): users_by_id = {u['id']:u for u in users} groups_by_id", "[] for g in groups: g['groupMembers'] = [] for m in group_members: group", "{o['id']:o for o in objects} def get_object(self, object_id): return self.objects_by_id[object_id] def get_parent(self, o):", "in objects} def get_object(self, object_id): return self.objects_by_id[object_id] def get_parent(self, o): return self.get_object(o['parentId']) def", "o): return 'parentId' in o and o['parentId'] != -1 def build_paths(self): for o", "[] parent_names = [] if self.has_parent(o): parent = self.get_parent(o) if 'children_ids' not in", "self.get_object(o['parentId']) def has_parent(self, o): return 'parentId' in o and o['parentId'] != -1 def", "[] if self.has_parent(o): parent = self.get_parent(o) if 'children_ids' not in parent: parent['children_ids'] =", "o in self.objects: self.__build_path_helper__(o) def __build_path_helper__(self, o): if 'ids' not in o: parent_ids", "= {o['id']:o for o in objects} def get_object(self, object_id): return self.objects_by_id[object_id] def get_parent(self,", "[o['id']] o['names'] = parent_names + [o['name']] o['fullPath'] = '/'.join(o['names']) def build_paths(*arg): objects =", "= '/'.join(o['names']) def build_paths(*arg): objects = [] for a in arg: objects.extend(a) PathBuilder(objects).build_paths()", "self.objects: self.__build_path_helper__(o) def __build_path_helper__(self, o): if 'ids' not in o: parent_ids = []", "for g in groups} for u in users: u['groups'] = [] for g", "group = groups_by_id[m['workspaceGroupId']] user = users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id']) user['groups'].append(group['id']) for g in groups: if", "in self.objects: self.__build_path_helper__(o) def __build_path_helper__(self, o): if 'ids' not in o: parent_ids =", "+ [o['id']] o['names'] = parent_names + [o['name']] o['fullPath'] = '/'.join(o['names']) def build_paths(*arg): objects", "parent_ids = parent['ids'] parent_names = parent['names'] o['ids'] = parent_ids + [o['id']] o['names'] =", "parent = self.get_parent(o) if 'children_ids' not in parent: parent['children_ids'] = [] parent['children_ids'].append(o['id']) self.__build_path_helper__(parent)", "group_members: group = groups_by_id[m['workspaceGroupId']] user = users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id']) user['groups'].append(group['id']) for g in groups:", "in groups: if g['groupMemberCount'] != len(g['groupMembers']): raise Exception(g) class PathBuilder: def __init__(self, objects):", "return 'parentId' in o and o['parentId'] != -1 def build_paths(self): for o in", "def has_parent(self, o): return 'parentId' in o and o['parentId'] != -1 def build_paths(self):", "not in parent: parent['children_ids'] = [] parent['children_ids'].append(o['id']) self.__build_path_helper__(parent) parent_ids = parent['ids'] parent_names =", "if g['groupMemberCount'] != len(g['groupMembers']): raise Exception(g) class PathBuilder: def __init__(self, objects): self.objects =", "return self.objects_by_id[object_id] def get_parent(self, o): return self.get_object(o['parentId']) def has_parent(self, o): return 'parentId' in", "for u in users: u['groups'] = [] for g in groups: g['groupMembers'] =", "g in groups: g['groupMembers'] = [] for m in group_members: group = groups_by_id[m['workspaceGroupId']]", "groups_by_id[m['workspaceGroupId']] user = users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id']) user['groups'].append(group['id']) for g in groups: if g['groupMemberCount'] !=", "parent['children_ids'].append(o['id']) self.__build_path_helper__(parent) parent_ids = parent['ids'] parent_names = parent['names'] o['ids'] = parent_ids + [o['id']]", "u['groups'] = [] for g in groups: g['groupMembers'] = [] for m in", "\"\"\" For educational purpose only \"\"\" def associate_users_and_groups(users, groups, group_members): users_by_id = {u['id']:u", "parent_ids = [] parent_names = [] if self.has_parent(o): parent = self.get_parent(o) if 'children_ids'", "groups_by_id = {g['id']:g for g in groups} for u in users: u['groups'] =", "self.objects = objects self.objects_by_id = {o['id']:o for o in objects} def get_object(self, object_id):", "object_id): return self.objects_by_id[object_id] def get_parent(self, o): return self.get_object(o['parentId']) def has_parent(self, o): return 'parentId'", "m in group_members: group = groups_by_id[m['workspaceGroupId']] user = users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id']) user['groups'].append(group['id']) for g", "= parent['names'] o['ids'] = parent_ids + [o['id']] o['names'] = parent_names + [o['name']] o['fullPath']", "len(g['groupMembers']): raise Exception(g) class PathBuilder: def __init__(self, objects): self.objects = objects self.objects_by_id =", "!= -1 def build_paths(self): for o in self.objects: self.__build_path_helper__(o) def __build_path_helper__(self, o): if", "o in objects} def get_object(self, object_id): return self.objects_by_id[object_id] def get_parent(self, o): return self.get_object(o['parentId'])", "= {u['id']:u for u in users} groups_by_id = {g['id']:g for g in groups}", "def associate_users_and_groups(users, groups, group_members): users_by_id = {u['id']:u for u in users} groups_by_id =", "groups, group_members): users_by_id = {u['id']:u for u in users} groups_by_id = {g['id']:g for", "= users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id']) user['groups'].append(group['id']) for g in groups: if g['groupMemberCount'] != len(g['groupMembers']): raise", "groups} for u in users: u['groups'] = [] for g in groups: g['groupMembers']", "for m in group_members: group = groups_by_id[m['workspaceGroupId']] user = users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id']) user['groups'].append(group['id']) for", "o['parentId'] != -1 def build_paths(self): for o in self.objects: self.__build_path_helper__(o) def __build_path_helper__(self, o):", "def get_parent(self, o): return self.get_object(o['parentId']) def has_parent(self, o): return 'parentId' in o and", "educational purpose only \"\"\" def associate_users_and_groups(users, groups, group_members): users_by_id = {u['id']:u for u", "= groups_by_id[m['workspaceGroupId']] user = users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id']) user['groups'].append(group['id']) for g in groups: if g['groupMemberCount']", "= parent_names + [o['name']] o['fullPath'] = '/'.join(o['names']) def build_paths(*arg): objects = [] for", "in groups} for u in users: u['groups'] = [] for g in groups:", "and o['parentId'] != -1 def build_paths(self): for o in self.objects: self.__build_path_helper__(o) def __build_path_helper__(self,", "group['groupMembers'].append(user['id']) user['groups'].append(group['id']) for g in groups: if g['groupMemberCount'] != len(g['groupMembers']): raise Exception(g) class", "PathBuilder: def __init__(self, objects): self.objects = objects self.objects_by_id = {o['id']:o for o in", "in o and o['parentId'] != -1 def build_paths(self): for o in self.objects: self.__build_path_helper__(o)", "o and o['parentId'] != -1 def build_paths(self): for o in self.objects: self.__build_path_helper__(o) def", "return self.get_object(o['parentId']) def has_parent(self, o): return 'parentId' in o and o['parentId'] != -1", "in groups: g['groupMembers'] = [] for m in group_members: group = groups_by_id[m['workspaceGroupId']] user", "= [] for m in group_members: group = groups_by_id[m['workspaceGroupId']] user = users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id'])", "__init__(self, objects): self.objects = objects self.objects_by_id = {o['id']:o for o in objects} def", "g['groupMemberCount'] != len(g['groupMembers']): raise Exception(g) class PathBuilder: def __init__(self, objects): self.objects = objects", "def __build_path_helper__(self, o): if 'ids' not in o: parent_ids = [] parent_names =", "for o in self.objects: self.__build_path_helper__(o) def __build_path_helper__(self, o): if 'ids' not in o:", "o['ids'] = parent_ids + [o['id']] o['names'] = parent_names + [o['name']] o['fullPath'] = '/'.join(o['names'])", "= {g['id']:g for g in groups} for u in users: u['groups'] = []", "o): return self.get_object(o['parentId']) def has_parent(self, o): return 'parentId' in o and o['parentId'] !=", "o['fullPath'] = '/'.join(o['names']) def build_paths(*arg): objects = [] for a in arg: objects.extend(a)", "!= len(g['groupMembers']): raise Exception(g) class PathBuilder: def __init__(self, objects): self.objects = objects self.objects_by_id", "parent['names'] o['ids'] = parent_ids + [o['id']] o['names'] = parent_names + [o['name']] o['fullPath'] =", "for u in users} groups_by_id = {g['id']:g for g in groups} for u", "def get_object(self, object_id): return self.objects_by_id[object_id] def get_parent(self, o): return self.get_object(o['parentId']) def has_parent(self, o):", "= parent_ids + [o['id']] o['names'] = parent_names + [o['name']] o['fullPath'] = '/'.join(o['names']) def", "<filename>intralinks/utils/associations.py \"\"\" For educational purpose only \"\"\" def associate_users_and_groups(users, groups, group_members): users_by_id =", "associate_users_and_groups(users, groups, group_members): users_by_id = {u['id']:u for u in users} groups_by_id = {g['id']:g", "= self.get_parent(o) if 'children_ids' not in parent: parent['children_ids'] = [] parent['children_ids'].append(o['id']) self.__build_path_helper__(parent) parent_ids", "users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id']) user['groups'].append(group['id']) for g in groups: if g['groupMemberCount'] != len(g['groupMembers']): raise Exception(g)", "self.__build_path_helper__(o) def __build_path_helper__(self, o): if 'ids' not in o: parent_ids = [] parent_names", "if 'children_ids' not in parent: parent['children_ids'] = [] parent['children_ids'].append(o['id']) self.__build_path_helper__(parent) parent_ids = parent['ids']", "group_members): users_by_id = {u['id']:u for u in users} groups_by_id = {g['id']:g for g", "get_parent(self, o): return self.get_object(o['parentId']) def has_parent(self, o): return 'parentId' in o and o['parentId']", "parent_ids + [o['id']] o['names'] = parent_names + [o['name']] o['fullPath'] = '/'.join(o['names']) def build_paths(*arg):", "o): if 'ids' not in o: parent_ids = [] parent_names = [] if", "{g['id']:g for g in groups} for u in users: u['groups'] = [] for", "get_object(self, object_id): return self.objects_by_id[object_id] def get_parent(self, o): return self.get_object(o['parentId']) def has_parent(self, o): return", "user = users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id']) user['groups'].append(group['id']) for g in groups: if g['groupMemberCount'] != len(g['groupMembers']):", "self.objects_by_id = {o['id']:o for o in objects} def get_object(self, object_id): return self.objects_by_id[object_id] def", "[o['name']] o['fullPath'] = '/'.join(o['names']) def build_paths(*arg): objects = [] for a in arg:", "for o in objects} def get_object(self, object_id): return self.objects_by_id[object_id] def get_parent(self, o): return", "users: u['groups'] = [] for g in groups: g['groupMembers'] = [] for m", "in parent: parent['children_ids'] = [] parent['children_ids'].append(o['id']) self.__build_path_helper__(parent) parent_ids = parent['ids'] parent_names = parent['names']", "only \"\"\" def associate_users_and_groups(users, groups, group_members): users_by_id = {u['id']:u for u in users}", "self.get_parent(o) if 'children_ids' not in parent: parent['children_ids'] = [] parent['children_ids'].append(o['id']) self.__build_path_helper__(parent) parent_ids =", "o: parent_ids = [] parent_names = [] if self.has_parent(o): parent = self.get_parent(o) if", "def build_paths(self): for o in self.objects: self.__build_path_helper__(o) def __build_path_helper__(self, o): if 'ids' not", "-1 def build_paths(self): for o in self.objects: self.__build_path_helper__(o) def __build_path_helper__(self, o): if 'ids'", "groups: g['groupMembers'] = [] for m in group_members: group = groups_by_id[m['workspaceGroupId']] user =", "users} groups_by_id = {g['id']:g for g in groups} for u in users: u['groups']", "self.__build_path_helper__(parent) parent_ids = parent['ids'] parent_names = parent['names'] o['ids'] = parent_ids + [o['id']] o['names']", "parent_names = [] if self.has_parent(o): parent = self.get_parent(o) if 'children_ids' not in parent:", "raise Exception(g) class PathBuilder: def __init__(self, objects): self.objects = objects self.objects_by_id = {o['id']:o", "'parentId' in o and o['parentId'] != -1 def build_paths(self): for o in self.objects:", "objects self.objects_by_id = {o['id']:o for o in objects} def get_object(self, object_id): return self.objects_by_id[object_id]", "class PathBuilder: def __init__(self, objects): self.objects = objects self.objects_by_id = {o['id']:o for o", "has_parent(self, o): return 'parentId' in o and o['parentId'] != -1 def build_paths(self): for", "parent['ids'] parent_names = parent['names'] o['ids'] = parent_ids + [o['id']] o['names'] = parent_names +", "user['groups'].append(group['id']) for g in groups: if g['groupMemberCount'] != len(g['groupMembers']): raise Exception(g) class PathBuilder:", "{u['id']:u for u in users} groups_by_id = {g['id']:g for g in groups} for", "= [] if self.has_parent(o): parent = self.get_parent(o) if 'children_ids' not in parent: parent['children_ids']", "o['names'] = parent_names + [o['name']] o['fullPath'] = '/'.join(o['names']) def build_paths(*arg): objects = []", "if self.has_parent(o): parent = self.get_parent(o) if 'children_ids' not in parent: parent['children_ids'] = []", "'ids' not in o: parent_ids = [] parent_names = [] if self.has_parent(o): parent", "if 'ids' not in o: parent_ids = [] parent_names = [] if self.has_parent(o):", "= parent['ids'] parent_names = parent['names'] o['ids'] = parent_ids + [o['id']] o['names'] = parent_names", "= [] parent_names = [] if self.has_parent(o): parent = self.get_parent(o) if 'children_ids' not", "[] parent['children_ids'].append(o['id']) self.__build_path_helper__(parent) parent_ids = parent['ids'] parent_names = parent['names'] o['ids'] = parent_ids +", "parent_names + [o['name']] o['fullPath'] = '/'.join(o['names']) def build_paths(*arg): objects = [] for a", "[] for m in group_members: group = groups_by_id[m['workspaceGroupId']] user = users_by_id[m['workspaceUserId']] group['groupMembers'].append(user['id']) user['groups'].append(group['id'])", "build_paths(self): for o in self.objects: self.__build_path_helper__(o) def __build_path_helper__(self, o): if 'ids' not in", "groups: if g['groupMemberCount'] != len(g['groupMembers']): raise Exception(g) class PathBuilder: def __init__(self, objects): self.objects", "objects} def get_object(self, object_id): return self.objects_by_id[object_id] def get_parent(self, o): return self.get_object(o['parentId']) def has_parent(self,", "in users: u['groups'] = [] for g in groups: g['groupMembers'] = [] for", "u in users} groups_by_id = {g['id']:g for g in groups} for u in", "self.objects_by_id[object_id] def get_parent(self, o): return self.get_object(o['parentId']) def has_parent(self, o): return 'parentId' in o", "g in groups} for u in users: u['groups'] = [] for g in", "__build_path_helper__(self, o): if 'ids' not in o: parent_ids = [] parent_names = []", "users_by_id = {u['id']:u for u in users} groups_by_id = {g['id']:g for g in", "for g in groups: g['groupMembers'] = [] for m in group_members: group =", "for g in groups: if g['groupMemberCount'] != len(g['groupMembers']): raise Exception(g) class PathBuilder: def", "not in o: parent_ids = [] parent_names = [] if self.has_parent(o): parent =", "+ [o['name']] o['fullPath'] = '/'.join(o['names']) def build_paths(*arg): objects = [] for a in", "purpose only \"\"\" def associate_users_and_groups(users, groups, group_members): users_by_id = {u['id']:u for u in" ]
[ "# Gets folder where icons are located for file in os.listdir(directory): # Gets", "folder that name ends with .png \"\"\" directory = os.fsencode( \"icons/\") # Gets", "folder where icons are located for file in os.listdir(directory): # Gets every file", "PIL import Image # https://pillow.readthedocs.io/en/stable/ def find_files(): \"\"\" Finds all files from icons", "rgb if r == 0 and g == 0 and b == 0", "b == 0 and a > 0: # If pixel is black and", "if filename.endswith(\".png\"): change_color(\"icons/\" + filename) continue else: continue def change_color(image): \"\"\" Changes every", "from PIL import Image # https://pillow.readthedocs.io/en/stable/ def find_files(): \"\"\" Finds all files from", "newimage.putpixel((x,y), (255,255,255,a)) # Change color to white. Keep transperency. newimage.save(image) # Saves a", "trought every pixel of image in X axis for y in range(newimage.size[1]): #", "# If pixel is black and not transparent. newimage.putpixel((x,y), (255,255,255,a)) # Change color", "transperent pixels \"\"\" newimage = Image.open(image) for x in range(newimage.size[0]):# Goes trought every", "icon color could be changed in code. Icon images are from https://github.com/iconic/open-iconic/tree/master/png '''", "and a > 0: # If pixel is black and not transparent. newimage.putpixel((x,y),", "find_files(): \"\"\" Finds all files from icons folder that name ends with .png", "name ends with .png \"\"\" directory = os.fsencode( \"icons/\") # Gets folder where", "ends with .png \"\"\" directory = os.fsencode( \"icons/\") # Gets folder where icons", "where icons are located for file in os.listdir(directory): # Gets every file from", "= os.fsdecode(file) if filename.endswith(\".png\"): change_color(\"icons/\" + filename) continue else: continue def change_color(image): \"\"\"", "of image in X axis for y in range(newimage.size[1]): # In Y axis", "0: # If pixel is black and not transparent. newimage.putpixel((x,y), (255,255,255,a)) # Change", "is black and not transparent. newimage.putpixel((x,y), (255,255,255,a)) # Change color to white. Keep", "def change_color(image): \"\"\" Changes every black pixel to white from image that was", "Get pixels color in rgb if r == 0 and g == 0", "is used for changing icons color from black to white, so icon color", "# In Y axis r,g,b,a = newimage.getpixel((x,y)) # Get pixels color in rgb", "transparent. newimage.putpixel((x,y), (255,255,255,a)) # Change color to white. Keep transperency. newimage.save(image) # Saves", "range(newimage.size[0]):# Goes trought every pixel of image in X axis for y in", "X axis for y in range(newimage.size[1]): # In Y axis r,g,b,a = newimage.getpixel((x,y))", "file is used for changing icons color from black to white, so icon", "pixels \"\"\" newimage = Image.open(image) for x in range(newimage.size[0]):# Goes trought every pixel", "Finds all files from icons folder that name ends with .png \"\"\" directory", "and not transparent. newimage.putpixel((x,y), (255,255,255,a)) # Change color to white. Keep transperency. newimage.save(image)", "in range(newimage.size[0]):# Goes trought every pixel of image in X axis for y", "== 0 and a > 0: # If pixel is black and not", "# Gets every file from folder filename = os.fsdecode(file) if filename.endswith(\".png\"): change_color(\"icons/\" +", "not transparent. newimage.putpixel((x,y), (255,255,255,a)) # Change color to white. Keep transperency. newimage.save(image) #", "os.listdir(directory): # Gets every file from folder filename = os.fsdecode(file) if filename.endswith(\".png\"): change_color(\"icons/\"", "color in rgb if r == 0 and g == 0 and b", "for changing icons color from black to white, so icon color could be", "os from PIL import Image # https://pillow.readthedocs.io/en/stable/ def find_files(): \"\"\" Finds all files", "change_color(\"icons/\" + filename) continue else: continue def change_color(image): \"\"\" Changes every black pixel", "Changes every black pixel to white from image that was send to it.", "Y axis r,g,b,a = newimage.getpixel((x,y)) # Get pixels color in rgb if r", "black pixel to white from image that was send to it. Skips transperent", "to white. Keep transperency. newimage.save(image) # Saves a file over the old one.", "g == 0 and b == 0 and a > 0: # If", "else: continue def change_color(image): \"\"\" Changes every black pixel to white from image", "Goes trought every pixel of image in X axis for y in range(newimage.size[1]):", "Icon images are from https://github.com/iconic/open-iconic/tree/master/png ''' import os from PIL import Image #", "every pixel of image in X axis for y in range(newimage.size[1]): # In", "axis for y in range(newimage.size[1]): # In Y axis r,g,b,a = newimage.getpixel((x,y)) #", "black and not transparent. newimage.putpixel((x,y), (255,255,255,a)) # Change color to white. Keep transperency.", "that name ends with .png \"\"\" directory = os.fsencode( \"icons/\") # Gets folder", "0 and g == 0 and b == 0 and a > 0:", "changing icons color from black to white, so icon color could be changed", "color from black to white, so icon color could be changed in code.", "import Image # https://pillow.readthedocs.io/en/stable/ def find_files(): \"\"\" Finds all files from icons folder", "== 0 and b == 0 and a > 0: # If pixel", "be changed in code. Icon images are from https://github.com/iconic/open-iconic/tree/master/png ''' import os from", "\"\"\" newimage = Image.open(image) for x in range(newimage.size[0]):# Goes trought every pixel of", "= Image.open(image) for x in range(newimage.size[0]):# Goes trought every pixel of image in", "black to white, so icon color could be changed in code. Icon images", "changed in code. Icon images are from https://github.com/iconic/open-iconic/tree/master/png ''' import os from PIL", "continue else: continue def change_color(image): \"\"\" Changes every black pixel to white from", "pixels color in rgb if r == 0 and g == 0 and", "send to it. Skips transperent pixels \"\"\" newimage = Image.open(image) for x in", "to white, so icon color could be changed in code. Icon images are", "\"\"\" directory = os.fsencode( \"icons/\") # Gets folder where icons are located for", "images are from https://github.com/iconic/open-iconic/tree/master/png ''' import os from PIL import Image # https://pillow.readthedocs.io/en/stable/", ".png \"\"\" directory = os.fsencode( \"icons/\") # Gets folder where icons are located", "icons color from black to white, so icon color could be changed in", "image that was send to it. Skips transperent pixels \"\"\" newimage = Image.open(image)", "x in range(newimage.size[0]):# Goes trought every pixel of image in X axis for", "could be changed in code. Icon images are from https://github.com/iconic/open-iconic/tree/master/png ''' import os", "filename) continue else: continue def change_color(image): \"\"\" Changes every black pixel to white", "pixel to white from image that was send to it. Skips transperent pixels", "filename.endswith(\".png\"): change_color(\"icons/\" + filename) continue else: continue def change_color(image): \"\"\" Changes every black", "os.fsdecode(file) if filename.endswith(\".png\"): change_color(\"icons/\" + filename) continue else: continue def change_color(image): \"\"\" Changes", "# Change color to white. Keep transperency. newimage.save(image) # Saves a file over", "Change color to white. Keep transperency. newimage.save(image) # Saves a file over the", "color to white. Keep transperency. newimage.save(image) # Saves a file over the old", "that was send to it. Skips transperent pixels \"\"\" newimage = Image.open(image) for", "\"icons/\") # Gets folder where icons are located for file in os.listdir(directory): #", "\"\"\" Changes every black pixel to white from image that was send to", "filename = os.fsdecode(file) if filename.endswith(\".png\"): change_color(\"icons/\" + filename) continue else: continue def change_color(image):", "''' import os from PIL import Image # https://pillow.readthedocs.io/en/stable/ def find_files(): \"\"\" Finds", "a > 0: # If pixel is black and not transparent. newimage.putpixel((x,y), (255,255,255,a))", "pixel of image in X axis for y in range(newimage.size[1]): # In Y", "folder filename = os.fsdecode(file) if filename.endswith(\".png\"): change_color(\"icons/\" + filename) continue else: continue def", "color could be changed in code. Icon images are from https://github.com/iconic/open-iconic/tree/master/png ''' import", "to it. Skips transperent pixels \"\"\" newimage = Image.open(image) for x in range(newimage.size[0]):#", "0 and b == 0 and a > 0: # If pixel is", "it. Skips transperent pixels \"\"\" newimage = Image.open(image) for x in range(newimage.size[0]):# Goes", "axis r,g,b,a = newimage.getpixel((x,y)) # Get pixels color in rgb if r ==", "if r == 0 and g == 0 and b == 0 and", "from image that was send to it. Skips transperent pixels \"\"\" newimage =", "If pixel is black and not transparent. newimage.putpixel((x,y), (255,255,255,a)) # Change color to", "from https://github.com/iconic/open-iconic/tree/master/png ''' import os from PIL import Image # https://pillow.readthedocs.io/en/stable/ def find_files():", "so icon color could be changed in code. Icon images are from https://github.com/iconic/open-iconic/tree/master/png", "file in os.listdir(directory): # Gets every file from folder filename = os.fsdecode(file) if", "= newimage.getpixel((x,y)) # Get pixels color in rgb if r == 0 and", "Gets folder where icons are located for file in os.listdir(directory): # Gets every", "pixel is black and not transparent. newimage.putpixel((x,y), (255,255,255,a)) # Change color to white.", "newimage.getpixel((x,y)) # Get pixels color in rgb if r == 0 and g", "https://github.com/iconic/open-iconic/tree/master/png ''' import os from PIL import Image # https://pillow.readthedocs.io/en/stable/ def find_files(): \"\"\"", "located for file in os.listdir(directory): # Gets every file from folder filename =", "for file in os.listdir(directory): # Gets every file from folder filename = os.fsdecode(file)", "Image.open(image) for x in range(newimage.size[0]):# Goes trought every pixel of image in X", "range(newimage.size[1]): # In Y axis r,g,b,a = newimage.getpixel((x,y)) # Get pixels color in", "code. Icon images are from https://github.com/iconic/open-iconic/tree/master/png ''' import os from PIL import Image", "directory = os.fsencode( \"icons/\") # Gets folder where icons are located for file", "white from image that was send to it. Skips transperent pixels \"\"\" newimage", "icons folder that name ends with .png \"\"\" directory = os.fsencode( \"icons/\") #", "are located for file in os.listdir(directory): # Gets every file from folder filename", "in rgb if r == 0 and g == 0 and b ==", "r == 0 and g == 0 and b == 0 and a", "change_color(image): \"\"\" Changes every black pixel to white from image that was send", "+ filename) continue else: continue def change_color(image): \"\"\" Changes every black pixel to", "In Y axis r,g,b,a = newimage.getpixel((x,y)) # Get pixels color in rgb if", "icons are located for file in os.listdir(directory): # Gets every file from folder", "''' This file is used for changing icons color from black to white,", "in code. Icon images are from https://github.com/iconic/open-iconic/tree/master/png ''' import os from PIL import", "in X axis for y in range(newimage.size[1]): # In Y axis r,g,b,a =", "in range(newimage.size[1]): # In Y axis r,g,b,a = newimage.getpixel((x,y)) # Get pixels color", "https://pillow.readthedocs.io/en/stable/ def find_files(): \"\"\" Finds all files from icons folder that name ends", "continue def change_color(image): \"\"\" Changes every black pixel to white from image that", "r,g,b,a = newimage.getpixel((x,y)) # Get pixels color in rgb if r == 0", "newimage = Image.open(image) for x in range(newimage.size[0]):# Goes trought every pixel of image", "from black to white, so icon color could be changed in code. Icon", "# Get pixels color in rgb if r == 0 and g ==", "white, so icon color could be changed in code. Icon images are from", "Gets every file from folder filename = os.fsdecode(file) if filename.endswith(\".png\"): change_color(\"icons/\" + filename)", "all files from icons folder that name ends with .png \"\"\" directory =", "def find_files(): \"\"\" Finds all files from icons folder that name ends with", "from icons folder that name ends with .png \"\"\" directory = os.fsencode( \"icons/\")", "in os.listdir(directory): # Gets every file from folder filename = os.fsdecode(file) if filename.endswith(\".png\"):", "= os.fsencode( \"icons/\") # Gets folder where icons are located for file in", "files from icons folder that name ends with .png \"\"\" directory = os.fsencode(", "every black pixel to white from image that was send to it. Skips", "== 0 and g == 0 and b == 0 and a >", "and g == 0 and b == 0 and a > 0: #", "with .png \"\"\" directory = os.fsencode( \"icons/\") # Gets folder where icons are", "import os from PIL import Image # https://pillow.readthedocs.io/en/stable/ def find_files(): \"\"\" Finds all", "and b == 0 and a > 0: # If pixel is black", "Skips transperent pixels \"\"\" newimage = Image.open(image) for x in range(newimage.size[0]):# Goes trought", "os.fsencode( \"icons/\") # Gets folder where icons are located for file in os.listdir(directory):", "(255,255,255,a)) # Change color to white. Keep transperency. newimage.save(image) # Saves a file", "from folder filename = os.fsdecode(file) if filename.endswith(\".png\"): change_color(\"icons/\" + filename) continue else: continue", "> 0: # If pixel is black and not transparent. newimage.putpixel((x,y), (255,255,255,a)) #", "used for changing icons color from black to white, so icon color could", "file from folder filename = os.fsdecode(file) if filename.endswith(\".png\"): change_color(\"icons/\" + filename) continue else:", "for x in range(newimage.size[0]):# Goes trought every pixel of image in X axis", "to white from image that was send to it. Skips transperent pixels \"\"\"", "Image # https://pillow.readthedocs.io/en/stable/ def find_files(): \"\"\" Finds all files from icons folder that", "y in range(newimage.size[1]): # In Y axis r,g,b,a = newimage.getpixel((x,y)) # Get pixels", "0 and a > 0: # If pixel is black and not transparent.", "every file from folder filename = os.fsdecode(file) if filename.endswith(\".png\"): change_color(\"icons/\" + filename) continue", "# https://pillow.readthedocs.io/en/stable/ def find_files(): \"\"\" Finds all files from icons folder that name", "image in X axis for y in range(newimage.size[1]): # In Y axis r,g,b,a", "was send to it. Skips transperent pixels \"\"\" newimage = Image.open(image) for x", "This file is used for changing icons color from black to white, so", "for y in range(newimage.size[1]): # In Y axis r,g,b,a = newimage.getpixel((x,y)) # Get", "are from https://github.com/iconic/open-iconic/tree/master/png ''' import os from PIL import Image # https://pillow.readthedocs.io/en/stable/ def", "\"\"\" Finds all files from icons folder that name ends with .png \"\"\"" ]
[ "os.path.join( os.path.split(sys.argv[0])[0], \"reader\", \"importer\", \"perseus_import_policy.json\") selection_policy = JSONImportPolicy() selection_policy.load_policy( import_policy_file ) perseus_batch_importer =", "perseus_batch_importer = PerseusBatchImporter( perseus_directory= directory, book_selection_policy = selection_policy.should_be_processed, overwrite_existing = overwrite, test =", "policy accounting for the fact that the command may be run outside of", "Perseus XML documents from a directory that match the import policy\" def add_arguments(self,", "= args[0] # Validate the arguments if directory is None: print(\"No directory was", "from\", directory) perseus_batch_importer.do_import() if test: print(\"Files from the\", directory, \"evaluated\") else: print(\"Files from", "parameters for any works that would be imported\") def handle(self, *args, **options): directory", "selection_policy.should_be_processed, overwrite_existing = overwrite, test = test) if test: print(\"Testing import for files", "Already a boolean elif overwrite.lower() in [\"true\", \"1\"]: overwrite = True else: overwrite", "reader.importer.batch_import import JSONImportPolicy import os import sys class Command(BaseCommand): help = \"Imports all", "if test: print(\"Testing import for files from\", directory) else: print(\"Importing files from\", directory)", "> 0: directory = args[0] # Validate the arguments if directory is None:", "the import policy\" def add_arguments(self, parser): parser.add_argument('-d', '--directory', dest='directory', help='The directory containing the", "run outside of the path where manage.py resides import_policy_file = os.path.join( os.path.split(sys.argv[0])[0], \"reader\",", "dest='directory', help='The directory containing the files to import') parser.add_argument('-o', '--overwrite', action=\"store_true\", dest=\"overwrite\", default=False,", "pass # Already a boolean elif test.lower() in [\"true\", \"1\"]: test = True", "selection_policy.load_policy( import_policy_file ) perseus_batch_importer = PerseusBatchImporter( perseus_directory= directory, book_selection_policy = selection_policy.should_be_processed, overwrite_existing =", "the files to import') parser.add_argument('-o', '--overwrite', action=\"store_true\", dest=\"overwrite\", default=False, help=\"Overwrite and replace existing", "directory) else: print(\"Importing files from\", directory) perseus_batch_importer.do_import() if test: print(\"Files from the\", directory,", "directory = args[0] # Validate the arguments if directory is None: print(\"No directory", "= options['overwrite'] if overwrite is None: overwrite = False elif overwrite in [True,", "os.path.split(sys.argv[0])[0], \"reader\", \"importer\", \"perseus_import_policy.json\") selection_policy = JSONImportPolicy() selection_policy.load_policy( import_policy_file ) perseus_batch_importer = PerseusBatchImporter(", "to import') parser.add_argument('-o', '--overwrite', action=\"store_true\", dest=\"overwrite\", default=False, help=\"Overwrite and replace existing items\") parser.add_argument(\"-t\",", "any works that would be imported\") def handle(self, *args, **options): directory = options['directory']", "= selection_policy.should_be_processed, overwrite_existing = overwrite, test = test) if test: print(\"Testing import for", "test = True else: test = False # Get the path to the", "default=False, help=\"Overwrite and replace existing items\") parser.add_argument(\"-t\", \"--test\", action=\"store_true\", dest=\"test\", help=\"Output the import", "all Perseus XML documents from a directory that match the import policy\" def", "test in [True, False]: pass # Already a boolean elif test.lower() in [\"true\",", "\"perseus_import_policy.json\") selection_policy = JSONImportPolicy() selection_policy.load_policy( import_policy_file ) perseus_batch_importer = PerseusBatchImporter( perseus_directory= directory, book_selection_policy", "options['test'] if test is None: test = False elif test in [True, False]:", "perseus_directory= directory, book_selection_policy = selection_policy.should_be_processed, overwrite_existing = overwrite, test = test) if test:", "help=\"Overwrite and replace existing items\") parser.add_argument(\"-t\", \"--test\", action=\"store_true\", dest=\"test\", help=\"Output the import parameters", "boolean elif test.lower() in [\"true\", \"1\"]: test = True else: test = False", "from reader.importer.PerseusBatchImporter import PerseusBatchImporter from reader.importer.batch_import import JSONImportPolicy import os import sys class", "reader.importer.PerseusBatchImporter import PerseusBatchImporter from reader.importer.batch_import import JSONImportPolicy import os import sys class Command(BaseCommand):", "\"reader\", \"importer\", \"perseus_import_policy.json\") selection_policy = JSONImportPolicy() selection_policy.load_policy( import_policy_file ) perseus_batch_importer = PerseusBatchImporter( perseus_directory=", "def add_arguments(self, parser): parser.add_argument('-d', '--directory', dest='directory', help='The directory containing the files to import')", "test = options['test'] if test is None: test = False elif test in", "test = False elif test in [True, False]: pass # Already a boolean", "from a directory that match the import policy\" def add_arguments(self, parser): parser.add_argument('-d', '--directory',", "directory is None: print(\"No directory was provided to import\") return overwrite = options['overwrite']", "is None and len(args) > 0: directory = args[0] # Validate the arguments", "print(\"Files from the\", directory, \"evaluated\") else: print(\"Files from the\", directory, \"directory successfully imported\")", "\"1\"]: test = True else: test = False # Get the path to", "accounting for the fact that the command may be run outside of the", "match the import policy\" def add_arguments(self, parser): parser.add_argument('-d', '--directory', dest='directory', help='The directory containing", "class Command(BaseCommand): help = \"Imports all Perseus XML documents from a directory that", "if directory is None and len(args) > 0: directory = args[0] # Validate", "\"Imports all Perseus XML documents from a directory that match the import policy\"", "import policy\" def add_arguments(self, parser): parser.add_argument('-d', '--directory', dest='directory', help='The directory containing the files", "and len(args) > 0: directory = args[0] # Validate the arguments if directory", "# Already a boolean elif overwrite.lower() in [\"true\", \"1\"]: overwrite = True else:", "is None: overwrite = False elif overwrite in [True, False]: pass # Already", "print(\"No directory was provided to import\") return overwrite = options['overwrite'] if overwrite is", "= False # Get the path to the import policy accounting for the", "in [True, False]: pass # Already a boolean elif test.lower() in [\"true\", \"1\"]:", "test: print(\"Files from the\", directory, \"evaluated\") else: print(\"Files from the\", directory, \"directory successfully", "def handle(self, *args, **options): directory = options['directory'] if directory is None and len(args)", "is None: print(\"No directory was provided to import\") return overwrite = options['overwrite'] if", "parser.add_argument('-o', '--overwrite', action=\"store_true\", dest=\"overwrite\", default=False, help=\"Overwrite and replace existing items\") parser.add_argument(\"-t\", \"--test\", action=\"store_true\",", "elif test.lower() in [\"true\", \"1\"]: test = True else: test = False #", "test is None: test = False elif test in [True, False]: pass #", "import sys class Command(BaseCommand): help = \"Imports all Perseus XML documents from a", "overwrite = False elif overwrite in [True, False]: pass # Already a boolean", "if test is None: test = False elif test in [True, False]: pass", "a boolean elif test.lower() in [\"true\", \"1\"]: test = True else: test =", "None: print(\"No directory was provided to import\") return overwrite = options['overwrite'] if overwrite", "was provided to import\") return overwrite = options['overwrite'] if overwrite is None: overwrite", "True else: overwrite = False test = options['test'] if test is None: test", "the import policy accounting for the fact that the command may be run", "the command may be run outside of the path where manage.py resides import_policy_file", "BaseCommand from reader.importer.PerseusBatchImporter import PerseusBatchImporter from reader.importer.batch_import import JSONImportPolicy import os import sys", "a directory that match the import policy\" def add_arguments(self, parser): parser.add_argument('-d', '--directory', dest='directory',", "XML documents from a directory that match the import policy\" def add_arguments(self, parser):", "dest=\"test\", help=\"Output the import parameters for any works that would be imported\") def", "= options['test'] if test is None: test = False elif test in [True,", "Validate the arguments if directory is None: print(\"No directory was provided to import\")", "PerseusBatchImporter( perseus_directory= directory, book_selection_policy = selection_policy.should_be_processed, overwrite_existing = overwrite, test = test) if", "parser): parser.add_argument('-d', '--directory', dest='directory', help='The directory containing the files to import') parser.add_argument('-o', '--overwrite',", "directory = options['directory'] if directory is None and len(args) > 0: directory =", "0: directory = args[0] # Validate the arguments if directory is None: print(\"No", "documents from a directory that match the import policy\" def add_arguments(self, parser): parser.add_argument('-d',", "overwrite = options['overwrite'] if overwrite is None: overwrite = False elif overwrite in", "handle(self, *args, **options): directory = options['directory'] if directory is None and len(args) >", "the import parameters for any works that would be imported\") def handle(self, *args,", "\"1\"]: overwrite = True else: overwrite = False test = options['test'] if test", "*args, **options): directory = options['directory'] if directory is None and len(args) > 0:", "from\", directory) else: print(\"Importing files from\", directory) perseus_batch_importer.do_import() if test: print(\"Files from the\",", "= test) if test: print(\"Testing import for files from\", directory) else: print(\"Importing files", "directory containing the files to import') parser.add_argument('-o', '--overwrite', action=\"store_true\", dest=\"overwrite\", default=False, help=\"Overwrite and", "import') parser.add_argument('-o', '--overwrite', action=\"store_true\", dest=\"overwrite\", default=False, help=\"Overwrite and replace existing items\") parser.add_argument(\"-t\", \"--test\",", "else: overwrite = False test = options['test'] if test is None: test =", "options['directory'] if directory is None and len(args) > 0: directory = args[0] #", "elif overwrite in [True, False]: pass # Already a boolean elif overwrite.lower() in", "in [\"true\", \"1\"]: overwrite = True else: overwrite = False test = options['test']", "else: test = False # Get the path to the import policy accounting", "**options): directory = options['directory'] if directory is None and len(args) > 0: directory", "False elif overwrite in [True, False]: pass # Already a boolean elif overwrite.lower()", "the path to the import policy accounting for the fact that the command", "test = False # Get the path to the import policy accounting for", "path where manage.py resides import_policy_file = os.path.join( os.path.split(sys.argv[0])[0], \"reader\", \"importer\", \"perseus_import_policy.json\") selection_policy =", "be imported\") def handle(self, *args, **options): directory = options['directory'] if directory is None", "django.core.management.base import BaseCommand from reader.importer.PerseusBatchImporter import PerseusBatchImporter from reader.importer.batch_import import JSONImportPolicy import os", "sys class Command(BaseCommand): help = \"Imports all Perseus XML documents from a directory", "JSONImportPolicy() selection_policy.load_policy( import_policy_file ) perseus_batch_importer = PerseusBatchImporter( perseus_directory= directory, book_selection_policy = selection_policy.should_be_processed, overwrite_existing", "= True else: overwrite = False test = options['test'] if test is None:", "False test = options['test'] if test is None: test = False elif test", "overwrite_existing = overwrite, test = test) if test: print(\"Testing import for files from\",", "\"--test\", action=\"store_true\", dest=\"test\", help=\"Output the import parameters for any works that would be", "items\") parser.add_argument(\"-t\", \"--test\", action=\"store_true\", dest=\"test\", help=\"Output the import parameters for any works that", "overwrite, test = test) if test: print(\"Testing import for files from\", directory) else:", "action=\"store_true\", dest=\"overwrite\", default=False, help=\"Overwrite and replace existing items\") parser.add_argument(\"-t\", \"--test\", action=\"store_true\", dest=\"test\", help=\"Output", "provided to import\") return overwrite = options['overwrite'] if overwrite is None: overwrite =", "os import sys class Command(BaseCommand): help = \"Imports all Perseus XML documents from", "= \"Imports all Perseus XML documents from a directory that match the import", "'--directory', dest='directory', help='The directory containing the files to import') parser.add_argument('-o', '--overwrite', action=\"store_true\", dest=\"overwrite\",", "directory that match the import policy\" def add_arguments(self, parser): parser.add_argument('-d', '--directory', dest='directory', help='The", "from django.core.management.base import BaseCommand from reader.importer.PerseusBatchImporter import PerseusBatchImporter from reader.importer.batch_import import JSONImportPolicy import", "directory was provided to import\") return overwrite = options['overwrite'] if overwrite is None:", "= False elif test in [True, False]: pass # Already a boolean elif", "False # Get the path to the import policy accounting for the fact", ") perseus_batch_importer = PerseusBatchImporter( perseus_directory= directory, book_selection_policy = selection_policy.should_be_processed, overwrite_existing = overwrite, test", "= False test = options['test'] if test is None: test = False elif", "files to import') parser.add_argument('-o', '--overwrite', action=\"store_true\", dest=\"overwrite\", default=False, help=\"Overwrite and replace existing items\")", "parser.add_argument(\"-t\", \"--test\", action=\"store_true\", dest=\"test\", help=\"Output the import parameters for any works that would", "outside of the path where manage.py resides import_policy_file = os.path.join( os.path.split(sys.argv[0])[0], \"reader\", \"importer\",", "perseus_batch_importer.do_import() if test: print(\"Files from the\", directory, \"evaluated\") else: print(\"Files from the\", directory,", "elif overwrite.lower() in [\"true\", \"1\"]: overwrite = True else: overwrite = False test", "len(args) > 0: directory = args[0] # Validate the arguments if directory is", "imported\") def handle(self, *args, **options): directory = options['directory'] if directory is None and", "None: overwrite = False elif overwrite in [True, False]: pass # Already a", "args[0] # Validate the arguments if directory is None: print(\"No directory was provided", "help=\"Output the import parameters for any works that would be imported\") def handle(self,", "containing the files to import') parser.add_argument('-o', '--overwrite', action=\"store_true\", dest=\"overwrite\", default=False, help=\"Overwrite and replace", "None and len(args) > 0: directory = args[0] # Validate the arguments if", "elif test in [True, False]: pass # Already a boolean elif test.lower() in", "directory) perseus_batch_importer.do_import() if test: print(\"Files from the\", directory, \"evaluated\") else: print(\"Files from the\",", "command may be run outside of the path where manage.py resides import_policy_file =", "else: print(\"Importing files from\", directory) perseus_batch_importer.do_import() if test: print(\"Files from the\", directory, \"evaluated\")", "add_arguments(self, parser): parser.add_argument('-d', '--directory', dest='directory', help='The directory containing the files to import') parser.add_argument('-o',", "= options['directory'] if directory is None and len(args) > 0: directory = args[0]", "manage.py resides import_policy_file = os.path.join( os.path.split(sys.argv[0])[0], \"reader\", \"importer\", \"perseus_import_policy.json\") selection_policy = JSONImportPolicy() selection_policy.load_policy(", "and replace existing items\") parser.add_argument(\"-t\", \"--test\", action=\"store_true\", dest=\"test\", help=\"Output the import parameters for", "options['overwrite'] if overwrite is None: overwrite = False elif overwrite in [True, False]:", "for any works that would be imported\") def handle(self, *args, **options): directory =", "in [True, False]: pass # Already a boolean elif overwrite.lower() in [\"true\", \"1\"]:", "Already a boolean elif test.lower() in [\"true\", \"1\"]: test = True else: test", "\"importer\", \"perseus_import_policy.json\") selection_policy = JSONImportPolicy() selection_policy.load_policy( import_policy_file ) perseus_batch_importer = PerseusBatchImporter( perseus_directory= directory,", "import policy accounting for the fact that the command may be run outside", "replace existing items\") parser.add_argument(\"-t\", \"--test\", action=\"store_true\", dest=\"test\", help=\"Output the import parameters for any", "to import\") return overwrite = options['overwrite'] if overwrite is None: overwrite = False", "for files from\", directory) else: print(\"Importing files from\", directory) perseus_batch_importer.do_import() if test: print(\"Files", "overwrite is None: overwrite = False elif overwrite in [True, False]: pass #", "overwrite = True else: overwrite = False test = options['test'] if test is", "files from\", directory) perseus_batch_importer.do_import() if test: print(\"Files from the\", directory, \"evaluated\") else: print(\"Files", "= os.path.join( os.path.split(sys.argv[0])[0], \"reader\", \"importer\", \"perseus_import_policy.json\") selection_policy = JSONImportPolicy() selection_policy.load_policy( import_policy_file ) perseus_batch_importer", "dest=\"overwrite\", default=False, help=\"Overwrite and replace existing items\") parser.add_argument(\"-t\", \"--test\", action=\"store_true\", dest=\"test\", help=\"Output the", "arguments if directory is None: print(\"No directory was provided to import\") return overwrite", "= JSONImportPolicy() selection_policy.load_policy( import_policy_file ) perseus_batch_importer = PerseusBatchImporter( perseus_directory= directory, book_selection_policy = selection_policy.should_be_processed,", "existing items\") parser.add_argument(\"-t\", \"--test\", action=\"store_true\", dest=\"test\", help=\"Output the import parameters for any works", "= PerseusBatchImporter( perseus_directory= directory, book_selection_policy = selection_policy.should_be_processed, overwrite_existing = overwrite, test = test)", "that match the import policy\" def add_arguments(self, parser): parser.add_argument('-d', '--directory', dest='directory', help='The directory", "'--overwrite', action=\"store_true\", dest=\"overwrite\", default=False, help=\"Overwrite and replace existing items\") parser.add_argument(\"-t\", \"--test\", action=\"store_true\", dest=\"test\",", "that the command may be run outside of the path where manage.py resides", "help='The directory containing the files to import') parser.add_argument('-o', '--overwrite', action=\"store_true\", dest=\"overwrite\", default=False, help=\"Overwrite", "the arguments if directory is None: print(\"No directory was provided to import\") return", "directory is None and len(args) > 0: directory = args[0] # Validate the", "overwrite.lower() in [\"true\", \"1\"]: overwrite = True else: overwrite = False test =", "import for files from\", directory) else: print(\"Importing files from\", directory) perseus_batch_importer.do_import() if test:", "if test: print(\"Files from the\", directory, \"evaluated\") else: print(\"Files from the\", directory, \"directory", "to the import policy accounting for the fact that the command may be", "import\") return overwrite = options['overwrite'] if overwrite is None: overwrite = False elif", "in [\"true\", \"1\"]: test = True else: test = False # Get the", "resides import_policy_file = os.path.join( os.path.split(sys.argv[0])[0], \"reader\", \"importer\", \"perseus_import_policy.json\") selection_policy = JSONImportPolicy() selection_policy.load_policy( import_policy_file", "the fact that the command may be run outside of the path where", "selection_policy = JSONImportPolicy() selection_policy.load_policy( import_policy_file ) perseus_batch_importer = PerseusBatchImporter( perseus_directory= directory, book_selection_policy =", "Get the path to the import policy accounting for the fact that the", "where manage.py resides import_policy_file = os.path.join( os.path.split(sys.argv[0])[0], \"reader\", \"importer\", \"perseus_import_policy.json\") selection_policy = JSONImportPolicy()", "import parameters for any works that would be imported\") def handle(self, *args, **options):", "None: test = False elif test in [True, False]: pass # Already a", "return overwrite = options['overwrite'] if overwrite is None: overwrite = False elif overwrite", "[True, False]: pass # Already a boolean elif overwrite.lower() in [\"true\", \"1\"]: overwrite", "be run outside of the path where manage.py resides import_policy_file = os.path.join( os.path.split(sys.argv[0])[0],", "is None: test = False elif test in [True, False]: pass # Already", "for the fact that the command may be run outside of the path", "help = \"Imports all Perseus XML documents from a directory that match the", "import_policy_file ) perseus_batch_importer = PerseusBatchImporter( perseus_directory= directory, book_selection_policy = selection_policy.should_be_processed, overwrite_existing = overwrite,", "= overwrite, test = test) if test: print(\"Testing import for files from\", directory)", "[\"true\", \"1\"]: test = True else: test = False # Get the path", "book_selection_policy = selection_policy.should_be_processed, overwrite_existing = overwrite, test = test) if test: print(\"Testing import", "path to the import policy accounting for the fact that the command may", "boolean elif overwrite.lower() in [\"true\", \"1\"]: overwrite = True else: overwrite = False", "True else: test = False # Get the path to the import policy", "import JSONImportPolicy import os import sys class Command(BaseCommand): help = \"Imports all Perseus", "False elif test in [True, False]: pass # Already a boolean elif test.lower()", "parser.add_argument('-d', '--directory', dest='directory', help='The directory containing the files to import') parser.add_argument('-o', '--overwrite', action=\"store_true\",", "# Already a boolean elif test.lower() in [\"true\", \"1\"]: test = True else:", "print(\"Importing files from\", directory) perseus_batch_importer.do_import() if test: print(\"Files from the\", directory, \"evaluated\") else:", "would be imported\") def handle(self, *args, **options): directory = options['directory'] if directory is", "policy\" def add_arguments(self, parser): parser.add_argument('-d', '--directory', dest='directory', help='The directory containing the files to", "if directory is None: print(\"No directory was provided to import\") return overwrite =", "[\"true\", \"1\"]: overwrite = True else: overwrite = False test = options['test'] if", "print(\"Testing import for files from\", directory) else: print(\"Importing files from\", directory) perseus_batch_importer.do_import() if", "test) if test: print(\"Testing import for files from\", directory) else: print(\"Importing files from\",", "that would be imported\") def handle(self, *args, **options): directory = options['directory'] if directory", "Command(BaseCommand): help = \"Imports all Perseus XML documents from a directory that match", "fact that the command may be run outside of the path where manage.py", "# Validate the arguments if directory is None: print(\"No directory was provided to", "pass # Already a boolean elif overwrite.lower() in [\"true\", \"1\"]: overwrite = True", "of the path where manage.py resides import_policy_file = os.path.join( os.path.split(sys.argv[0])[0], \"reader\", \"importer\", \"perseus_import_policy.json\")", "works that would be imported\") def handle(self, *args, **options): directory = options['directory'] if", "= False elif overwrite in [True, False]: pass # Already a boolean elif", "test = test) if test: print(\"Testing import for files from\", directory) else: print(\"Importing", "JSONImportPolicy import os import sys class Command(BaseCommand): help = \"Imports all Perseus XML", "import PerseusBatchImporter from reader.importer.batch_import import JSONImportPolicy import os import sys class Command(BaseCommand): help", "the path where manage.py resides import_policy_file = os.path.join( os.path.split(sys.argv[0])[0], \"reader\", \"importer\", \"perseus_import_policy.json\") selection_policy", "overwrite = False test = options['test'] if test is None: test = False", "import BaseCommand from reader.importer.PerseusBatchImporter import PerseusBatchImporter from reader.importer.batch_import import JSONImportPolicy import os import", "import_policy_file = os.path.join( os.path.split(sys.argv[0])[0], \"reader\", \"importer\", \"perseus_import_policy.json\") selection_policy = JSONImportPolicy() selection_policy.load_policy( import_policy_file )", "import os import sys class Command(BaseCommand): help = \"Imports all Perseus XML documents", "from reader.importer.batch_import import JSONImportPolicy import os import sys class Command(BaseCommand): help = \"Imports", "False]: pass # Already a boolean elif overwrite.lower() in [\"true\", \"1\"]: overwrite =", "False]: pass # Already a boolean elif test.lower() in [\"true\", \"1\"]: test =", "may be run outside of the path where manage.py resides import_policy_file = os.path.join(", "= True else: test = False # Get the path to the import", "test: print(\"Testing import for files from\", directory) else: print(\"Importing files from\", directory) perseus_batch_importer.do_import()", "if overwrite is None: overwrite = False elif overwrite in [True, False]: pass", "directory, book_selection_policy = selection_policy.should_be_processed, overwrite_existing = overwrite, test = test) if test: print(\"Testing", "# Get the path to the import policy accounting for the fact that", "action=\"store_true\", dest=\"test\", help=\"Output the import parameters for any works that would be imported\")", "a boolean elif overwrite.lower() in [\"true\", \"1\"]: overwrite = True else: overwrite =", "files from\", directory) else: print(\"Importing files from\", directory) perseus_batch_importer.do_import() if test: print(\"Files from", "test.lower() in [\"true\", \"1\"]: test = True else: test = False # Get", "PerseusBatchImporter from reader.importer.batch_import import JSONImportPolicy import os import sys class Command(BaseCommand): help =", "overwrite in [True, False]: pass # Already a boolean elif overwrite.lower() in [\"true\",", "[True, False]: pass # Already a boolean elif test.lower() in [\"true\", \"1\"]: test" ]
[ "config.batch_size num_batches = 1000 / batch_size eval_dataloader = DataLoader(small_eval_dataset, batch_size=8) model = AutoModelForSequenceClassification.from_pretrained(model_path)", "print(f\"\\tRunning batch {ite+1} of {num_batches}\", end=\"\\r\") batch = {k: v.to(device) for k, v", "v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) logits =", "model = AutoModelForSequenceClassification.from_pretrained(model_path) device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\") model.to(device) model.eval() accuracy", "num_batches = 1000 / batch_size eval_dataloader = DataLoader(small_eval_dataset, batch_size=8) model = AutoModelForSequenceClassification.from_pretrained(model_path) device", "wandb_helpers import wandb_arg_parser import wandb def test_model(): \"\"\" Tests the trained model using", "for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) logits = outputs.logits", "eval_dataloader = DataLoader(small_eval_dataset, batch_size=8) model = AutoModelForSequenceClassification.from_pretrained(model_path) device = torch.device(\"cuda\") if torch.cuda.is_available() else", "AutoModelForSequenceClassification.from_pretrained(model_path) device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\") model.to(device) model.eval() accuracy = 0.0", "loss are calculated and logged using Wandb. \"\"\" input_filepath = \"./data/processed\" model_path =", "dim=-1) accuracy += sum(predictions == batch[\"labels\"]) / predictions.numel() loss = outputs.loss validation_loss +=", "* (accuracy / num_batches) wandb.log( { \"validation_loss\": validation_loss / num_batches, \"validation_accuracy\": accuracy, }", "batch.items()} with torch.no_grad(): outputs = model(**batch) logits = outputs.logits predictions = torch.argmax(logits, dim=-1)", "The accuracy and loss are calculated and logged using Wandb. \"\"\" input_filepath =", "{k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) logits", "accuracy = 100 * (accuracy / num_batches) wandb.log( { \"validation_loss\": validation_loss / num_batches,", "= \"./models/finetuned_bert\" small_eval_dataset = torch.load(input_filepath + \"/eval_small.pt\") print(\"The test set concists of\") print(small_eval_dataset)", "AutoModelForSequenceClassification from wandb_helpers import wandb_arg_parser import wandb def test_model(): \"\"\" Tests the trained", "torch.argmax(logits, dim=-1) accuracy += sum(predictions == batch[\"labels\"]) / predictions.numel() loss = outputs.loss validation_loss", "batch {ite+1} of {num_batches}\", end=\"\\r\") batch = {k: v.to(device) for k, v in", "batch_size=8) model = AutoModelForSequenceClassification.from_pretrained(model_path) device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\") model.to(device) model.eval()", "the trained model using the small version of the evaluation dataset. The accuracy", "\"/eval_small.pt\") print(\"The test set concists of\") print(small_eval_dataset) config = wandb_arg_parser() batch_size = config.batch_size", "enumerate(eval_dataloader): print(f\"\\tRunning batch {ite+1} of {num_batches}\", end=\"\\r\") batch = {k: v.to(device) for k,", "= DataLoader(small_eval_dataset, batch_size=8) model = AutoModelForSequenceClassification.from_pretrained(model_path) device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")", "DataLoader from transformers import AutoModelForSequenceClassification from wandb_helpers import wandb_arg_parser import wandb def test_model():", "test set concists of\") print(small_eval_dataset) config = wandb_arg_parser() batch_size = config.batch_size num_batches =", "= torch.argmax(logits, dim=-1) accuracy += sum(predictions == batch[\"labels\"]) / predictions.numel() loss = outputs.loss", "/ batch_size eval_dataloader = DataLoader(small_eval_dataset, batch_size=8) model = AutoModelForSequenceClassification.from_pretrained(model_path) device = torch.device(\"cuda\") if", "(accuracy / num_batches) wandb.log( { \"validation_loss\": validation_loss / num_batches, \"validation_accuracy\": accuracy, } )", "sum(predictions == batch[\"labels\"]) / predictions.numel() loss = outputs.loss validation_loss += loss.item() accuracy =", "= AutoModelForSequenceClassification.from_pretrained(model_path) device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\") model.to(device) model.eval() accuracy =", "torch from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification from wandb_helpers import wandb_arg_parser", "accuracy += sum(predictions == batch[\"labels\"]) / predictions.numel() loss = outputs.loss validation_loss += loss.item()", "1000 / batch_size eval_dataloader = DataLoader(small_eval_dataset, batch_size=8) model = AutoModelForSequenceClassification.from_pretrained(model_path) device = torch.device(\"cuda\")", "= wandb_arg_parser() batch_size = config.batch_size num_batches = 1000 / batch_size eval_dataloader = DataLoader(small_eval_dataset,", "Tests the trained model using the small version of the evaluation dataset. The", "import torch from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification from wandb_helpers import", "<reponame>MariaFogh/MLOps_Transformers<filename>src/models/predict_model.py import torch from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification from wandb_helpers", "accuracy and loss are calculated and logged using Wandb. \"\"\" input_filepath = \"./data/processed\"", "batch_size eval_dataloader = DataLoader(small_eval_dataset, batch_size=8) model = AutoModelForSequenceClassification.from_pretrained(model_path) device = torch.device(\"cuda\") if torch.cuda.is_available()", "import DataLoader from transformers import AutoModelForSequenceClassification from wandb_helpers import wandb_arg_parser import wandb def", "= torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\") model.to(device) model.eval() accuracy = 0.0 validation_loss =", "using Wandb. \"\"\" input_filepath = \"./data/processed\" model_path = \"./models/finetuned_bert\" small_eval_dataset = torch.load(input_filepath +", "def test_model(): \"\"\" Tests the trained model using the small version of the", "= 1000 / batch_size eval_dataloader = DataLoader(small_eval_dataset, batch_size=8) model = AutoModelForSequenceClassification.from_pretrained(model_path) device =", "wandb_arg_parser() batch_size = config.batch_size num_batches = 1000 / batch_size eval_dataloader = DataLoader(small_eval_dataset, batch_size=8)", "in batch.items()} with torch.no_grad(): outputs = model(**batch) logits = outputs.logits predictions = torch.argmax(logits,", "logits = outputs.logits predictions = torch.argmax(logits, dim=-1) accuracy += sum(predictions == batch[\"labels\"]) /", "ite, batch in enumerate(eval_dataloader): print(f\"\\tRunning batch {ite+1} of {num_batches}\", end=\"\\r\") batch = {k:", "= outputs.logits predictions = torch.argmax(logits, dim=-1) accuracy += sum(predictions == batch[\"labels\"]) / predictions.numel()", "of the evaluation dataset. The accuracy and loss are calculated and logged using", "\"./models/finetuned_bert\" small_eval_dataset = torch.load(input_filepath + \"/eval_small.pt\") print(\"The test set concists of\") print(small_eval_dataset) config", "outputs.logits predictions = torch.argmax(logits, dim=-1) accuracy += sum(predictions == batch[\"labels\"]) / predictions.numel() loss", "+= sum(predictions == batch[\"labels\"]) / predictions.numel() loss = outputs.loss validation_loss += loss.item() accuracy", "/ num_batches) wandb.log( { \"validation_loss\": validation_loss / num_batches, \"validation_accuracy\": accuracy, } ) if", "wandb_arg_parser import wandb def test_model(): \"\"\" Tests the trained model using the small", "v in batch.items()} with torch.no_grad(): outputs = model(**batch) logits = outputs.logits predictions =", "from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification from wandb_helpers import wandb_arg_parser import", "using the small version of the evaluation dataset. The accuracy and loss are", "torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\") model.to(device) model.eval() accuracy = 0.0 validation_loss = 0.0", "= 0.0 for ite, batch in enumerate(eval_dataloader): print(f\"\\tRunning batch {ite+1} of {num_batches}\", end=\"\\r\")", "{num_batches}\", end=\"\\r\") batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad():", "the small version of the evaluation dataset. The accuracy and loss are calculated", "import wandb_arg_parser import wandb def test_model(): \"\"\" Tests the trained model using the", "outputs = model(**batch) logits = outputs.logits predictions = torch.argmax(logits, dim=-1) accuracy += sum(predictions", "of {num_batches}\", end=\"\\r\") batch = {k: v.to(device) for k, v in batch.items()} with", "of\") print(small_eval_dataset) config = wandb_arg_parser() batch_size = config.batch_size num_batches = 1000 / batch_size", "outputs.loss validation_loss += loss.item() accuracy = 100 * (accuracy / num_batches) wandb.log( {", "torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification from wandb_helpers import wandb_arg_parser import wandb", "\"./data/processed\" model_path = \"./models/finetuned_bert\" small_eval_dataset = torch.load(input_filepath + \"/eval_small.pt\") print(\"The test set concists", "if torch.cuda.is_available() else torch.device(\"cpu\") model.to(device) model.eval() accuracy = 0.0 validation_loss = 0.0 for", "0.0 for ite, batch in enumerate(eval_dataloader): print(f\"\\tRunning batch {ite+1} of {num_batches}\", end=\"\\r\") batch", "torch.no_grad(): outputs = model(**batch) logits = outputs.logits predictions = torch.argmax(logits, dim=-1) accuracy +=", "torch.load(input_filepath + \"/eval_small.pt\") print(\"The test set concists of\") print(small_eval_dataset) config = wandb_arg_parser() batch_size", "model.to(device) model.eval() accuracy = 0.0 validation_loss = 0.0 for ite, batch in enumerate(eval_dataloader):", "transformers import AutoModelForSequenceClassification from wandb_helpers import wandb_arg_parser import wandb def test_model(): \"\"\" Tests", "set concists of\") print(small_eval_dataset) config = wandb_arg_parser() batch_size = config.batch_size num_batches = 1000", "and logged using Wandb. \"\"\" input_filepath = \"./data/processed\" model_path = \"./models/finetuned_bert\" small_eval_dataset =", "wandb def test_model(): \"\"\" Tests the trained model using the small version of", "dataset. The accuracy and loss are calculated and logged using Wandb. \"\"\" input_filepath", "in enumerate(eval_dataloader): print(f\"\\tRunning batch {ite+1} of {num_batches}\", end=\"\\r\") batch = {k: v.to(device) for", "predictions.numel() loss = outputs.loss validation_loss += loss.item() accuracy = 100 * (accuracy /", "import AutoModelForSequenceClassification from wandb_helpers import wandb_arg_parser import wandb def test_model(): \"\"\" Tests the", "validation_loss = 0.0 for ite, batch in enumerate(eval_dataloader): print(f\"\\tRunning batch {ite+1} of {num_batches}\",", "calculated and logged using Wandb. \"\"\" input_filepath = \"./data/processed\" model_path = \"./models/finetuned_bert\" small_eval_dataset", "logged using Wandb. \"\"\" input_filepath = \"./data/processed\" model_path = \"./models/finetuned_bert\" small_eval_dataset = torch.load(input_filepath", "wandb.log( { \"validation_loss\": validation_loss / num_batches, \"validation_accuracy\": accuracy, } ) if __name__ ==", "for ite, batch in enumerate(eval_dataloader): print(f\"\\tRunning batch {ite+1} of {num_batches}\", end=\"\\r\") batch =", "end=\"\\r\") batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs", "batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs =", "batch_size = config.batch_size num_batches = 1000 / batch_size eval_dataloader = DataLoader(small_eval_dataset, batch_size=8) model", "num_batches) wandb.log( { \"validation_loss\": validation_loss / num_batches, \"validation_accuracy\": accuracy, } ) if __name__", "test_model(): \"\"\" Tests the trained model using the small version of the evaluation", "model_path = \"./models/finetuned_bert\" small_eval_dataset = torch.load(input_filepath + \"/eval_small.pt\") print(\"The test set concists of\")", "torch.device(\"cpu\") model.to(device) model.eval() accuracy = 0.0 validation_loss = 0.0 for ite, batch in", "= {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch)", "DataLoader(small_eval_dataset, batch_size=8) model = AutoModelForSequenceClassification.from_pretrained(model_path) device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\") model.to(device)", "torch.cuda.is_available() else torch.device(\"cpu\") model.to(device) model.eval() accuracy = 0.0 validation_loss = 0.0 for ite,", "model(**batch) logits = outputs.logits predictions = torch.argmax(logits, dim=-1) accuracy += sum(predictions == batch[\"labels\"])", "version of the evaluation dataset. The accuracy and loss are calculated and logged", "= config.batch_size num_batches = 1000 / batch_size eval_dataloader = DataLoader(small_eval_dataset, batch_size=8) model =", "print(small_eval_dataset) config = wandb_arg_parser() batch_size = config.batch_size num_batches = 1000 / batch_size eval_dataloader", "100 * (accuracy / num_batches) wandb.log( { \"validation_loss\": validation_loss / num_batches, \"validation_accuracy\": accuracy,", "k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) logits = outputs.logits predictions", "predictions = torch.argmax(logits, dim=-1) accuracy += sum(predictions == batch[\"labels\"]) / predictions.numel() loss =", "print(\"The test set concists of\") print(small_eval_dataset) config = wandb_arg_parser() batch_size = config.batch_size num_batches", "loss = outputs.loss validation_loss += loss.item() accuracy = 100 * (accuracy / num_batches)", "{ite+1} of {num_batches}\", end=\"\\r\") batch = {k: v.to(device) for k, v in batch.items()}", "/ predictions.numel() loss = outputs.loss validation_loss += loss.item() accuracy = 100 * (accuracy", "= \"./data/processed\" model_path = \"./models/finetuned_bert\" small_eval_dataset = torch.load(input_filepath + \"/eval_small.pt\") print(\"The test set", "model using the small version of the evaluation dataset. The accuracy and loss", "from transformers import AutoModelForSequenceClassification from wandb_helpers import wandb_arg_parser import wandb def test_model(): \"\"\"", "= 0.0 validation_loss = 0.0 for ite, batch in enumerate(eval_dataloader): print(f\"\\tRunning batch {ite+1}", "accuracy = 0.0 validation_loss = 0.0 for ite, batch in enumerate(eval_dataloader): print(f\"\\tRunning batch", "0.0 validation_loss = 0.0 for ite, batch in enumerate(eval_dataloader): print(f\"\\tRunning batch {ite+1} of", "the evaluation dataset. The accuracy and loss are calculated and logged using Wandb.", "\"\"\" input_filepath = \"./data/processed\" model_path = \"./models/finetuned_bert\" small_eval_dataset = torch.load(input_filepath + \"/eval_small.pt\") print(\"The", "Wandb. \"\"\" input_filepath = \"./data/processed\" model_path = \"./models/finetuned_bert\" small_eval_dataset = torch.load(input_filepath + \"/eval_small.pt\")", "with torch.no_grad(): outputs = model(**batch) logits = outputs.logits predictions = torch.argmax(logits, dim=-1) accuracy", "batch[\"labels\"]) / predictions.numel() loss = outputs.loss validation_loss += loss.item() accuracy = 100 *", "small_eval_dataset = torch.load(input_filepath + \"/eval_small.pt\") print(\"The test set concists of\") print(small_eval_dataset) config =", "\"\"\" Tests the trained model using the small version of the evaluation dataset.", "== batch[\"labels\"]) / predictions.numel() loss = outputs.loss validation_loss += loss.item() accuracy = 100", "= torch.load(input_filepath + \"/eval_small.pt\") print(\"The test set concists of\") print(small_eval_dataset) config = wandb_arg_parser()", "validation_loss += loss.item() accuracy = 100 * (accuracy / num_batches) wandb.log( { \"validation_loss\":", "= 100 * (accuracy / num_batches) wandb.log( { \"validation_loss\": validation_loss / num_batches, \"validation_accuracy\":", "model.eval() accuracy = 0.0 validation_loss = 0.0 for ite, batch in enumerate(eval_dataloader): print(f\"\\tRunning", "are calculated and logged using Wandb. \"\"\" input_filepath = \"./data/processed\" model_path = \"./models/finetuned_bert\"", "= model(**batch) logits = outputs.logits predictions = torch.argmax(logits, dim=-1) accuracy += sum(predictions ==", "\"validation_loss\": validation_loss / num_batches, \"validation_accuracy\": accuracy, } ) if __name__ == \"__main__\": test_model()", "input_filepath = \"./data/processed\" model_path = \"./models/finetuned_bert\" small_eval_dataset = torch.load(input_filepath + \"/eval_small.pt\") print(\"The test", "else torch.device(\"cpu\") model.to(device) model.eval() accuracy = 0.0 validation_loss = 0.0 for ite, batch", "+ \"/eval_small.pt\") print(\"The test set concists of\") print(small_eval_dataset) config = wandb_arg_parser() batch_size =", "small version of the evaluation dataset. The accuracy and loss are calculated and", "+= loss.item() accuracy = 100 * (accuracy / num_batches) wandb.log( { \"validation_loss\": validation_loss", "trained model using the small version of the evaluation dataset. The accuracy and", "batch in enumerate(eval_dataloader): print(f\"\\tRunning batch {ite+1} of {num_batches}\", end=\"\\r\") batch = {k: v.to(device)", "concists of\") print(small_eval_dataset) config = wandb_arg_parser() batch_size = config.batch_size num_batches = 1000 /", "{ \"validation_loss\": validation_loss / num_batches, \"validation_accuracy\": accuracy, } ) if __name__ == \"__main__\":", "loss.item() accuracy = 100 * (accuracy / num_batches) wandb.log( { \"validation_loss\": validation_loss /", "from wandb_helpers import wandb_arg_parser import wandb def test_model(): \"\"\" Tests the trained model", "config = wandb_arg_parser() batch_size = config.batch_size num_batches = 1000 / batch_size eval_dataloader =", "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\") model.to(device) model.eval() accuracy = 0.0 validation_loss", "import wandb def test_model(): \"\"\" Tests the trained model using the small version", "evaluation dataset. The accuracy and loss are calculated and logged using Wandb. \"\"\"", "and loss are calculated and logged using Wandb. \"\"\" input_filepath = \"./data/processed\" model_path", "= outputs.loss validation_loss += loss.item() accuracy = 100 * (accuracy / num_batches) wandb.log(" ]
[ "HeavyIonsAnalysis.TrackAnalysis.trackAnalyzer_cfi import * anaTrack = ppTrack.clone( trackPtMin = 0.49, trackSrc = cms.InputTag(\"hiGeneralTracks\"), vertexSrc", "cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc = cms.InputTag(\"particleFlowTmp\"), doMVA = False ) pixelTrack = anaTrack.clone( trackPtMin =", "ppTrack.clone( trackPtMin = 0.49, trackSrc = cms.InputTag(\"hiGeneralTracks\"), vertexSrc = cms.VInputTag('hiSelectedVertex'), mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'),", "= ppTrack.clone( trackPtMin = 0.49, trackSrc = cms.InputTag(\"hiGeneralTracks\"), vertexSrc = cms.VInputTag('hiSelectedVertex'), mvaSrc =", "pfCandSrc = cms.InputTag(\"particleFlowTmp\"), doMVA = False ) pixelTrack = anaTrack.clone( trackPtMin = 0.4,", "cms.InputTag(\"particleFlowTmp\"), doMVA = False ) pixelTrack = anaTrack.clone( trackPtMin = 0.4, trackSrc =", "False ) pixelTrack = anaTrack.clone( trackPtMin = 0.4, trackSrc = cms.InputTag(\"hiConformalPixelTracks\"), qualityStrings =", "* anaTrack = ppTrack.clone( trackPtMin = 0.49, trackSrc = cms.InputTag(\"hiGeneralTracks\"), vertexSrc = cms.VInputTag('hiSelectedVertex'),", ") pixelTrack = anaTrack.clone( trackPtMin = 0.4, trackSrc = cms.InputTag(\"hiConformalPixelTracks\"), qualityStrings = cms.untracked.vstring('highPurity'))", "trackSrc = cms.InputTag(\"hiGeneralTracks\"), vertexSrc = cms.VInputTag('hiSelectedVertex'), mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc = cms.InputTag(\"particleFlowTmp\"), doMVA", "= 0.4, trackSrc = cms.InputTag(\"hiConformalPixelTracks\"), qualityStrings = cms.untracked.vstring('highPurity')) trackSequencesPbPb = cms.Sequence(anaTrack) trackSequencesPP =", "cms.InputTag(\"hiGeneralTracks\"), vertexSrc = cms.VInputTag('hiSelectedVertex'), mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc = cms.InputTag(\"particleFlowTmp\"), doMVA = False", "cms from HeavyIonsAnalysis.TrackAnalysis.trackAnalyzer_cfi import * anaTrack = ppTrack.clone( trackPtMin = 0.49, trackSrc =", "as cms from HeavyIonsAnalysis.TrackAnalysis.trackAnalyzer_cfi import * anaTrack = ppTrack.clone( trackPtMin = 0.49, trackSrc", "vertexSrc = cms.VInputTag('hiSelectedVertex'), mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc = cms.InputTag(\"particleFlowTmp\"), doMVA = False )", "0.4, trackSrc = cms.InputTag(\"hiConformalPixelTracks\"), qualityStrings = cms.untracked.vstring('highPurity')) trackSequencesPbPb = cms.Sequence(anaTrack) trackSequencesPP = cms.Sequence(ppTrack)", "= cms.InputTag(\"hiGeneralTracks\"), vertexSrc = cms.VInputTag('hiSelectedVertex'), mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc = cms.InputTag(\"particleFlowTmp\"), doMVA =", "cms.VInputTag('hiSelectedVertex'), mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc = cms.InputTag(\"particleFlowTmp\"), doMVA = False ) pixelTrack =", "= cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc = cms.InputTag(\"particleFlowTmp\"), doMVA = False ) pixelTrack = anaTrack.clone( trackPtMin", "0.49, trackSrc = cms.InputTag(\"hiGeneralTracks\"), vertexSrc = cms.VInputTag('hiSelectedVertex'), mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc = cms.InputTag(\"particleFlowTmp\"),", "anaTrack = ppTrack.clone( trackPtMin = 0.49, trackSrc = cms.InputTag(\"hiGeneralTracks\"), vertexSrc = cms.VInputTag('hiSelectedVertex'), mvaSrc", "<gh_stars>0 import FWCore.ParameterSet.Config as cms from HeavyIonsAnalysis.TrackAnalysis.trackAnalyzer_cfi import * anaTrack = ppTrack.clone( trackPtMin", "= cms.InputTag(\"particleFlowTmp\"), doMVA = False ) pixelTrack = anaTrack.clone( trackPtMin = 0.4, trackSrc", "= cms.VInputTag('hiSelectedVertex'), mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc = cms.InputTag(\"particleFlowTmp\"), doMVA = False ) pixelTrack", "trackPtMin = 0.4, trackSrc = cms.InputTag(\"hiConformalPixelTracks\"), qualityStrings = cms.untracked.vstring('highPurity')) trackSequencesPbPb = cms.Sequence(anaTrack) trackSequencesPP", "pixelTrack = anaTrack.clone( trackPtMin = 0.4, trackSrc = cms.InputTag(\"hiConformalPixelTracks\"), qualityStrings = cms.untracked.vstring('highPurity')) trackSequencesPbPb", "mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc = cms.InputTag(\"particleFlowTmp\"), doMVA = False ) pixelTrack = anaTrack.clone(", "trackPtMin = 0.49, trackSrc = cms.InputTag(\"hiGeneralTracks\"), vertexSrc = cms.VInputTag('hiSelectedVertex'), mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc", "anaTrack.clone( trackPtMin = 0.4, trackSrc = cms.InputTag(\"hiConformalPixelTracks\"), qualityStrings = cms.untracked.vstring('highPurity')) trackSequencesPbPb = cms.Sequence(anaTrack)", "FWCore.ParameterSet.Config as cms from HeavyIonsAnalysis.TrackAnalysis.trackAnalyzer_cfi import * anaTrack = ppTrack.clone( trackPtMin = 0.49,", "= False ) pixelTrack = anaTrack.clone( trackPtMin = 0.4, trackSrc = cms.InputTag(\"hiConformalPixelTracks\"), qualityStrings", "from HeavyIonsAnalysis.TrackAnalysis.trackAnalyzer_cfi import * anaTrack = ppTrack.clone( trackPtMin = 0.49, trackSrc = cms.InputTag(\"hiGeneralTracks\"),", "import FWCore.ParameterSet.Config as cms from HeavyIonsAnalysis.TrackAnalysis.trackAnalyzer_cfi import * anaTrack = ppTrack.clone( trackPtMin =", "doMVA = False ) pixelTrack = anaTrack.clone( trackPtMin = 0.4, trackSrc = cms.InputTag(\"hiConformalPixelTracks\"),", "= anaTrack.clone( trackPtMin = 0.4, trackSrc = cms.InputTag(\"hiConformalPixelTracks\"), qualityStrings = cms.untracked.vstring('highPurity')) trackSequencesPbPb =", "= 0.49, trackSrc = cms.InputTag(\"hiGeneralTracks\"), vertexSrc = cms.VInputTag('hiSelectedVertex'), mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'), pfCandSrc =", "import * anaTrack = ppTrack.clone( trackPtMin = 0.49, trackSrc = cms.InputTag(\"hiGeneralTracks\"), vertexSrc =" ]
[ "k = len(arr) for i in range(k): for j in range(k): if j", "= [] for num in reversed(arr): if suffix_products: suffix_products.append(suffix_products[-1] * num) else: suffix_products.append(num)", "of numbers before the i'th index and after i'th index def products(arr): #Generate", "numbers before the i'th index and after i'th index def products(arr): #Generate prefix", "*= arr[i] return new_arr #Solution with products of numbers before the i'th index", "== len(arr) - 1: result.append(prefix_products[i-1]) else: result.append(prefix_products[i-1] * suffix_products[i+1]) return result #Works only", "if i == 0: result.append(suffix_products[i+1]) elif i == len(arr) - 1: result.append(prefix_products[i-1]) else:", "i: new_arr[j] *= arr[i] return new_arr #Solution with products of numbers before the", "as np def MultiForEachInd(arr): new_arr = np.ones((len(arr), 1)) np.array(arr) k = len(arr) for", "return arr_b #Speed comparision between implementations test_array = [1,2,3,4,5,6,7] %timeit products(test_array) %timeit MultiForEachInd(test_array)", "products of numbers before the i'th index and after i'th index def products(arr):", "np.array(arr) k = len(arr) for i in range(k): for j in range(k): if", "== 0: result.append(suffix_products[i+1]) elif i == len(arr) - 1: result.append(prefix_products[i-1]) else: result.append(prefix_products[i-1] *", "else: result.append(prefix_products[i-1] * suffix_products[i+1]) return result #Works only for arrays without 0 def", "#Generate Sufix products suffix_products = [] for num in reversed(arr): if suffix_products: suffix_products.append(suffix_products[-1]", "in range(k): if j != i: new_arr[j] *= arr[i] return new_arr #Solution with", "= np.ones(len(array)) arr_b /= arr_a arr_b *= np.prod(arr_a) return arr_b #Speed comparision between", "num) else: suffix_products.append(num) suffix_products = list(reversed(suffix_products)) #Generate result result = [] for i", "arrays without 0 def vectorized_products(array): arr_a = np.array(array) arr_b = np.ones(len(array)) arr_b /=", "def MultiForEachInd(arr): new_arr = np.ones((len(arr), 1)) np.array(arr) k = len(arr) for i in", "products prefix_products = [] for num in arr: if prefix_products: prefix_products.append(prefix_products[-1] * num)", "prefix_products = [] for num in arr: if prefix_products: prefix_products.append(prefix_products[-1] * num) else:", "[] for i in range(len(arr)): if i == 0: result.append(suffix_products[i+1]) elif i ==", "for i in range(len(arr)): if i == 0: result.append(suffix_products[i+1]) elif i == len(arr)", "* num) else: suffix_products.append(num) suffix_products = list(reversed(suffix_products)) #Generate result result = [] for", "numpy as np def MultiForEachInd(arr): new_arr = np.ones((len(arr), 1)) np.array(arr) k = len(arr)", "and after i'th index def products(arr): #Generate prefix products prefix_products = [] for", "0 def vectorized_products(array): arr_a = np.array(array) arr_b = np.ones(len(array)) arr_b /= arr_a arr_b", "arr[i] return new_arr #Solution with products of numbers before the i'th index and", "1: result.append(prefix_products[i-1]) else: result.append(prefix_products[i-1] * suffix_products[i+1]) return result #Works only for arrays without", "1)) np.array(arr) k = len(arr) for i in range(k): for j in range(k):", "in arr: if prefix_products: prefix_products.append(prefix_products[-1] * num) else: prefix_products.append(num) #Generate Sufix products suffix_products", "for j in range(k): if j != i: new_arr[j] *= arr[i] return new_arr", "arr: if prefix_products: prefix_products.append(prefix_products[-1] * num) else: prefix_products.append(num) #Generate Sufix products suffix_products =", "def products(arr): #Generate prefix products prefix_products = [] for num in arr: if", "after i'th index def products(arr): #Generate prefix products prefix_products = [] for num", "len(arr) for i in range(k): for j in range(k): if j != i:", "new_arr = np.ones((len(arr), 1)) np.array(arr) k = len(arr) for i in range(k): for", "np.prod(arr_a) return arr_b #Speed comparision between implementations test_array = [1,2,3,4,5,6,7] %timeit products(test_array) %timeit", "prefix_products.append(num) #Generate Sufix products suffix_products = [] for num in reversed(arr): if suffix_products:", "in range(len(arr)): if i == 0: result.append(suffix_products[i+1]) elif i == len(arr) - 1:", "arr_b *= np.prod(arr_a) return arr_b #Speed comparision between implementations test_array = [1,2,3,4,5,6,7] %timeit", "else: suffix_products.append(num) suffix_products = list(reversed(suffix_products)) #Generate result result = [] for i in", "if suffix_products: suffix_products.append(suffix_products[-1] * num) else: suffix_products.append(num) suffix_products = list(reversed(suffix_products)) #Generate result result", "Sufix products suffix_products = [] for num in reversed(arr): if suffix_products: suffix_products.append(suffix_products[-1] *", "with products of numbers before the i'th index and after i'th index def", "prefix_products.append(prefix_products[-1] * num) else: prefix_products.append(num) #Generate Sufix products suffix_products = [] for num", "= [] for num in arr: if prefix_products: prefix_products.append(prefix_products[-1] * num) else: prefix_products.append(num)", "arr_a = np.array(array) arr_b = np.ones(len(array)) arr_b /= arr_a arr_b *= np.prod(arr_a) return", "#Generate result result = [] for i in range(len(arr)): if i == 0:", "range(k): if j != i: new_arr[j] *= arr[i] return new_arr #Solution with products", "import numpy as np def MultiForEachInd(arr): new_arr = np.ones((len(arr), 1)) np.array(arr) k =", "result #Works only for arrays without 0 def vectorized_products(array): arr_a = np.array(array) arr_b", "range(k): for j in range(k): if j != i: new_arr[j] *= arr[i] return", "suffix_products[i+1]) return result #Works only for arrays without 0 def vectorized_products(array): arr_a =", "i in range(len(arr)): if i == 0: result.append(suffix_products[i+1]) elif i == len(arr) -", "* suffix_products[i+1]) return result #Works only for arrays without 0 def vectorized_products(array): arr_a", "list(reversed(suffix_products)) #Generate result result = [] for i in range(len(arr)): if i ==", "prefix products prefix_products = [] for num in arr: if prefix_products: prefix_products.append(prefix_products[-1] *", "i == 0: result.append(suffix_products[i+1]) elif i == len(arr) - 1: result.append(prefix_products[i-1]) else: result.append(prefix_products[i-1]", "result.append(prefix_products[i-1]) else: result.append(prefix_products[i-1] * suffix_products[i+1]) return result #Works only for arrays without 0", "[] for num in arr: if prefix_products: prefix_products.append(prefix_products[-1] * num) else: prefix_products.append(num) #Generate", "products suffix_products = [] for num in reversed(arr): if suffix_products: suffix_products.append(suffix_products[-1] * num)", "[] for num in reversed(arr): if suffix_products: suffix_products.append(suffix_products[-1] * num) else: suffix_products.append(num) suffix_products", "in reversed(arr): if suffix_products: suffix_products.append(suffix_products[-1] * num) else: suffix_products.append(num) suffix_products = list(reversed(suffix_products)) #Generate", "suffix_products: suffix_products.append(suffix_products[-1] * num) else: suffix_products.append(num) suffix_products = list(reversed(suffix_products)) #Generate result result =", "if prefix_products: prefix_products.append(prefix_products[-1] * num) else: prefix_products.append(num) #Generate Sufix products suffix_products = []", "= np.ones((len(arr), 1)) np.array(arr) k = len(arr) for i in range(k): for j", "vectorized_products(array): arr_a = np.array(array) arr_b = np.ones(len(array)) arr_b /= arr_a arr_b *= np.prod(arr_a)", "in range(k): for j in range(k): if j != i: new_arr[j] *= arr[i]", "suffix_products.append(num) suffix_products = list(reversed(suffix_products)) #Generate result result = [] for i in range(len(arr)):", "for num in reversed(arr): if suffix_products: suffix_products.append(suffix_products[-1] * num) else: suffix_products.append(num) suffix_products =", "before the i'th index and after i'th index def products(arr): #Generate prefix products", "else: prefix_products.append(num) #Generate Sufix products suffix_products = [] for num in reversed(arr): if", "def vectorized_products(array): arr_a = np.array(array) arr_b = np.ones(len(array)) arr_b /= arr_a arr_b *=", "i == len(arr) - 1: result.append(prefix_products[i-1]) else: result.append(prefix_products[i-1] * suffix_products[i+1]) return result #Works", "= list(reversed(suffix_products)) #Generate result result = [] for i in range(len(arr)): if i", "= np.array(array) arr_b = np.ones(len(array)) arr_b /= arr_a arr_b *= np.prod(arr_a) return arr_b", "result.append(suffix_products[i+1]) elif i == len(arr) - 1: result.append(prefix_products[i-1]) else: result.append(prefix_products[i-1] * suffix_products[i+1]) return", "np.ones((len(arr), 1)) np.array(arr) k = len(arr) for i in range(k): for j in", "* num) else: prefix_products.append(num) #Generate Sufix products suffix_products = [] for num in", "j != i: new_arr[j] *= arr[i] return new_arr #Solution with products of numbers", "#Generate prefix products prefix_products = [] for num in arr: if prefix_products: prefix_products.append(prefix_products[-1]", "suffix_products = [] for num in reversed(arr): if suffix_products: suffix_products.append(suffix_products[-1] * num) else:", "result result = [] for i in range(len(arr)): if i == 0: result.append(suffix_products[i+1])", "= [] for i in range(len(arr)): if i == 0: result.append(suffix_products[i+1]) elif i", "elif i == len(arr) - 1: result.append(prefix_products[i-1]) else: result.append(prefix_products[i-1] * suffix_products[i+1]) return result", "for i in range(k): for j in range(k): if j != i: new_arr[j]", "i'th index def products(arr): #Generate prefix products prefix_products = [] for num in", "without 0 def vectorized_products(array): arr_a = np.array(array) arr_b = np.ones(len(array)) arr_b /= arr_a", "num) else: prefix_products.append(num) #Generate Sufix products suffix_products = [] for num in reversed(arr):", "/= arr_a arr_b *= np.prod(arr_a) return arr_b #Speed comparision between implementations test_array =", "0: result.append(suffix_products[i+1]) elif i == len(arr) - 1: result.append(prefix_products[i-1]) else: result.append(prefix_products[i-1] * suffix_products[i+1])", "len(arr) - 1: result.append(prefix_products[i-1]) else: result.append(prefix_products[i-1] * suffix_products[i+1]) return result #Works only for", "products(arr): #Generate prefix products prefix_products = [] for num in arr: if prefix_products:", "!= i: new_arr[j] *= arr[i] return new_arr #Solution with products of numbers before", "for num in arr: if prefix_products: prefix_products.append(prefix_products[-1] * num) else: prefix_products.append(num) #Generate Sufix", "index and after i'th index def products(arr): #Generate prefix products prefix_products = []", "suffix_products = list(reversed(suffix_products)) #Generate result result = [] for i in range(len(arr)): if", "#Works only for arrays without 0 def vectorized_products(array): arr_a = np.array(array) arr_b =", "= len(arr) for i in range(k): for j in range(k): if j !=", "index def products(arr): #Generate prefix products prefix_products = [] for num in arr:", "*= np.prod(arr_a) return arr_b #Speed comparision between implementations test_array = [1,2,3,4,5,6,7] %timeit products(test_array)", "prefix_products: prefix_products.append(prefix_products[-1] * num) else: prefix_products.append(num) #Generate Sufix products suffix_products = [] for", "only for arrays without 0 def vectorized_products(array): arr_a = np.array(array) arr_b = np.ones(len(array))", "num in reversed(arr): if suffix_products: suffix_products.append(suffix_products[-1] * num) else: suffix_products.append(num) suffix_products = list(reversed(suffix_products))", "new_arr[j] *= arr[i] return new_arr #Solution with products of numbers before the i'th", "i in range(k): for j in range(k): if j != i: new_arr[j] *=", "new_arr #Solution with products of numbers before the i'th index and after i'th", "np def MultiForEachInd(arr): new_arr = np.ones((len(arr), 1)) np.array(arr) k = len(arr) for i", "MultiForEachInd(arr): new_arr = np.ones((len(arr), 1)) np.array(arr) k = len(arr) for i in range(k):", "j in range(k): if j != i: new_arr[j] *= arr[i] return new_arr #Solution", "if j != i: new_arr[j] *= arr[i] return new_arr #Solution with products of", "num in arr: if prefix_products: prefix_products.append(prefix_products[-1] * num) else: prefix_products.append(num) #Generate Sufix products", "arr_a arr_b *= np.prod(arr_a) return arr_b #Speed comparision between implementations test_array = [1,2,3,4,5,6,7]", "result = [] for i in range(len(arr)): if i == 0: result.append(suffix_products[i+1]) elif", "result.append(prefix_products[i-1] * suffix_products[i+1]) return result #Works only for arrays without 0 def vectorized_products(array):", "the i'th index and after i'th index def products(arr): #Generate prefix products prefix_products", "<reponame>Tsubanee/Algorithms-Practice<gh_stars>0 import numpy as np def MultiForEachInd(arr): new_arr = np.ones((len(arr), 1)) np.array(arr) k", "- 1: result.append(prefix_products[i-1]) else: result.append(prefix_products[i-1] * suffix_products[i+1]) return result #Works only for arrays", "return result #Works only for arrays without 0 def vectorized_products(array): arr_a = np.array(array)", "np.ones(len(array)) arr_b /= arr_a arr_b *= np.prod(arr_a) return arr_b #Speed comparision between implementations", "suffix_products.append(suffix_products[-1] * num) else: suffix_products.append(num) suffix_products = list(reversed(suffix_products)) #Generate result result = []", "np.array(array) arr_b = np.ones(len(array)) arr_b /= arr_a arr_b *= np.prod(arr_a) return arr_b #Speed", "i'th index and after i'th index def products(arr): #Generate prefix products prefix_products =", "for arrays without 0 def vectorized_products(array): arr_a = np.array(array) arr_b = np.ones(len(array)) arr_b", "return new_arr #Solution with products of numbers before the i'th index and after", "range(len(arr)): if i == 0: result.append(suffix_products[i+1]) elif i == len(arr) - 1: result.append(prefix_products[i-1])", "arr_b /= arr_a arr_b *= np.prod(arr_a) return arr_b #Speed comparision between implementations test_array", "arr_b = np.ones(len(array)) arr_b /= arr_a arr_b *= np.prod(arr_a) return arr_b #Speed comparision", "#Solution with products of numbers before the i'th index and after i'th index", "reversed(arr): if suffix_products: suffix_products.append(suffix_products[-1] * num) else: suffix_products.append(num) suffix_products = list(reversed(suffix_products)) #Generate result" ]
[ "itr in range(1, int(number_of_token)): token = { 'token': randomStringDigits(40), 'counter': 0, 'isAvailable': True,", "\"\"\"Generate a random string of letters and digits \"\"\" lettersAndDigits = string.ascii_letters +", "token = { 'token': randomStringDigits(40), 'counter': 0, 'isAvailable': True, 'last_active_date_time': '', 'last_inactive_date_time': ''", "return ''.join(random.choice(lettersAndDigits) for i in range(stringLength)) def getTokens(number_of_token = 5): token_list = []", "= string.ascii_letters + string.digits return ''.join(random.choice(lettersAndDigits) for i in range(stringLength)) def getTokens(number_of_token =", "= {} for itr in range(1, int(number_of_token)): token = { 'token': randomStringDigits(40), 'counter':", "def getTokens(number_of_token = 5): token_list = [] token = {} for itr in", "'token': randomStringDigits(40), 'counter': 0, 'isAvailable': True, 'last_active_date_time': '', 'last_inactive_date_time': '' } token_list.append(token) return", "string def randomStringDigits(stringLength=15): \"\"\"Generate a random string of letters and digits \"\"\" lettersAndDigits", "def randomStringDigits(stringLength=15): \"\"\"Generate a random string of letters and digits \"\"\" lettersAndDigits =", "int(number_of_token)): token = { 'token': randomStringDigits(40), 'counter': 0, 'isAvailable': True, 'last_active_date_time': '', 'last_inactive_date_time':", "for itr in range(1, int(number_of_token)): token = { 'token': randomStringDigits(40), 'counter': 0, 'isAvailable':", "randomStringDigits(stringLength=15): \"\"\"Generate a random string of letters and digits \"\"\" lettersAndDigits = string.ascii_letters", "string.ascii_letters + string.digits return ''.join(random.choice(lettersAndDigits) for i in range(stringLength)) def getTokens(number_of_token = 5):", "+ string.digits return ''.join(random.choice(lettersAndDigits) for i in range(stringLength)) def getTokens(number_of_token = 5): token_list", "{ 'token': randomStringDigits(40), 'counter': 0, 'isAvailable': True, 'last_active_date_time': '', 'last_inactive_date_time': '' } token_list.append(token)", "and digits \"\"\" lettersAndDigits = string.ascii_letters + string.digits return ''.join(random.choice(lettersAndDigits) for i in", "letters and digits \"\"\" lettersAndDigits = string.ascii_letters + string.digits return ''.join(random.choice(lettersAndDigits) for i", "range(stringLength)) def getTokens(number_of_token = 5): token_list = [] token = {} for itr", "lettersAndDigits = string.ascii_letters + string.digits return ''.join(random.choice(lettersAndDigits) for i in range(stringLength)) def getTokens(number_of_token", "for i in range(stringLength)) def getTokens(number_of_token = 5): token_list = [] token =", "randomStringDigits(40), 'counter': 0, 'isAvailable': True, 'last_active_date_time': '', 'last_inactive_date_time': '' } token_list.append(token) return token_list", "\"\"\" lettersAndDigits = string.ascii_letters + string.digits return ''.join(random.choice(lettersAndDigits) for i in range(stringLength)) def", "of letters and digits \"\"\" lettersAndDigits = string.ascii_letters + string.digits return ''.join(random.choice(lettersAndDigits) for", "import string def randomStringDigits(stringLength=15): \"\"\"Generate a random string of letters and digits \"\"\"", "in range(1, int(number_of_token)): token = { 'token': randomStringDigits(40), 'counter': 0, 'isAvailable': True, 'last_active_date_time':", "random string of letters and digits \"\"\" lettersAndDigits = string.ascii_letters + string.digits return", "[] token = {} for itr in range(1, int(number_of_token)): token = { 'token':", "= { 'token': randomStringDigits(40), 'counter': 0, 'isAvailable': True, 'last_active_date_time': '', 'last_inactive_date_time': '' }", "''.join(random.choice(lettersAndDigits) for i in range(stringLength)) def getTokens(number_of_token = 5): token_list = [] token", "getTokens(number_of_token = 5): token_list = [] token = {} for itr in range(1,", "{} for itr in range(1, int(number_of_token)): token = { 'token': randomStringDigits(40), 'counter': 0,", "string.digits return ''.join(random.choice(lettersAndDigits) for i in range(stringLength)) def getTokens(number_of_token = 5): token_list =", "= 5): token_list = [] token = {} for itr in range(1, int(number_of_token)):", "a random string of letters and digits \"\"\" lettersAndDigits = string.ascii_letters + string.digits", "= [] token = {} for itr in range(1, int(number_of_token)): token = {", "in range(stringLength)) def getTokens(number_of_token = 5): token_list = [] token = {} for", "5): token_list = [] token = {} for itr in range(1, int(number_of_token)): token", "string of letters and digits \"\"\" lettersAndDigits = string.ascii_letters + string.digits return ''.join(random.choice(lettersAndDigits)", "import random import string def randomStringDigits(stringLength=15): \"\"\"Generate a random string of letters and", "range(1, int(number_of_token)): token = { 'token': randomStringDigits(40), 'counter': 0, 'isAvailable': True, 'last_active_date_time': '',", "i in range(stringLength)) def getTokens(number_of_token = 5): token_list = [] token = {}", "token = {} for itr in range(1, int(number_of_token)): token = { 'token': randomStringDigits(40),", "token_list = [] token = {} for itr in range(1, int(number_of_token)): token =", "digits \"\"\" lettersAndDigits = string.ascii_letters + string.digits return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))", "<filename>pool_service/tokens.py import random import string def randomStringDigits(stringLength=15): \"\"\"Generate a random string of letters", "random import string def randomStringDigits(stringLength=15): \"\"\"Generate a random string of letters and digits" ]
[ "conclusion): count += 1 # 输出分段,提示是否继续显示内容 continueRet, flag = self.inputContinue(i, count, total, flag,", "else: s += \"\\n\" return s def __showDetailsBody(self, sessUUID = \"\", targConclusion =", "\"\", sessUUID = \"\", targConclusion=\"\"): fileNameList = [] sessLogInfoDict = self.getSessLogInfoDict() newPath =", "= 0 if sessUUID: # 若输入了callNumber则认为需要过滤 if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else", "ms: (\\d+)\", 7, [0], self.SIGN_FLAG_RTP), # RTP通道信息 (\"Flipping CID from \\\"(.*)\\\" \\<(.*)\\> to", "warningCount, \"失败\", errorCount, \"成功\", okCount) return s def __outputReslut(self, outputPath, sessUUID = \"\",", "+= \"-\" * 160 + \"\\n\" s += \"\\n\" + \"{0:*^160}\".format(\" 基本信息 \")", "= \"\" conclusion = \"\" if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList =", "if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) return len(fileNameList), newPath, fileNameList def outputDetails(self, outputPath,", "= 'cancel' SIGN_FLAG_R_INVITE = \"recv_invite\" # SIP信令 SIP_INVITE = 'INVITE' SIP_CANCEL = 'CANCEL'", "errorCount, okCount): s = \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" % (\"总计\", errorCount + okCount + warningCount, \"告警\",", "= detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion # print \"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#, \"\\n\", keyInfoList,", "self.printList(dupl, 8, \"重复的号码:\", \"总数:%d\" % len(dupl)) # ----------------------------------------------显示详细分析结果---------------------------------------------- def __showDetailsHeader(self, sessUUID = \"\",", "sessLogInfoDict = self.getSessLogInfoDict() sessLen = len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys():", "无 \"\"\" codeList = [] for x in keyInfoList: if x[2] == self.SIGN_FLAG_CHAN:", "self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"), } # 标志性处理类的状态 case_calling_invite = {\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,} case_ringing_180 = {\"proceeding_180\":True,}", "0 if sessUUID: # 若输入了callNumber if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True)", "x[3][0].strip()) if param1 != \"\" else True) and \\ ((len(x[3]) >= 2 and", "i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber", "(183<- or 180<-) 错误应答<- elif self.caseMatch(detailsDict, case_hangup_180) or self.caseMatch(detailsDict, case_hangup_183): note += \"", "OUTPUT_POSTFIX_RESULT = \".result\" OUTPUT_POSTFIX_DETAILS = \".details\" def __init__(self): self.__sessLogInfoDict = {} self.__ignoreLinesDict =", "getSignInfo(self, flag, context): \"\"\"信令的收发方向(用于上层显示输出) 参数列表: flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段 返回值: 元组(FromModule, ToModule, Sign) 异常: 无", "= \"[CALLING\" + (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and \"(R)\" or \"(S)\") # invite-> 200<- if", "callNumber, conclusion = conclusion) # ----------------------------------------------显示分析结果---------------------------------------------- def __showAnalysisResultHeader(self, targConclusion=\"\"): s = \"%-30s %-36s", "返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" if PY2: return LogAnalyzer.load(self, path, rl)", "\") + \"\\n\" if k[0] not in l: s += self.getPathEx(k[0]) + \"\\n\"", "= self.findDupl(callNumberList) len(dupl) and self.printList(dupl, 8, \"重复的号码:\", \"总数:%d\" % len(dupl)) # ----------------------------------------------显示详细分析结果---------------------------------------------- def", "self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else: self.__sessLogInfoDict = sessLogInfoDict self.__ignoreLinesDict = ignoreLinesDict for sessUUID in sessLogInfoDict.keys():", "= \"\"): \"\"\"清理FS的日志 参数列表: UUID:会话的UUID key:内部的字典名 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\"", "self.OUTPUT_POSTFIX_LOG if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) return len(fileNameList), newPath, fileNameList", "+= \"[NOT COMPLETE\" note += \"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK]", "# 取一行日志 sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] # 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 # 默认按照此行日志取号码 res = self.reMatch(\"New Channel", "line_len = len(line) # 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if pos is -1 or pos < 36", "if not self.makeDir(newPath): return len(fileNameList), newPath, fileNameList for sessUUID in sessLogInfoDict.keys(): sessDict =", "= [] # 输出的文件列表 # 如果存在UUID(只输出一个文件) if sessUUID: if sessLogInfoDict.get(sessUUID, False): logDict =", "if signTimePrev and (signTimeThis - signTimePrev).seconds > 4: s += \"{0:^40}\".format(\" ↑ \")", "for p in self.getPath()]) orgLogFileNames = callNumber + sessUUID + targConclusion + \"_tmp\"", "\"HANGUP\"), \"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList, \"ACTIVE\", \"HANGUP\"), \"calling_0\": self.__matchChannelStateDesc(keyInfoList, \"calling\"), \"proceeding_180\": self.__matchChannelStateCode(keyInfoList, \"180\"), \"proceeding_183\": self.__matchChannelStateCode(keyInfoList,", "= \"\", f = -1, l = -1, mod=\"normal\"): l = [(i, x)", "case_answerd_183 = {\"EARLY__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_hangup_invite = {\"DOWN__HANGUP\":True,} case_hangup_180 = {\"RINGING__HANGUP\":True,} case_hangup_183 =", "= self.MATCH_MOD_DETAILS) locIp, locPort, RmtIp, RmtPort, audioPayLoad, audioPTime = res if res else", "or not logDict: return s, conclusion callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber =", "for line, log in logList: for reExpr, expLen, dropPos, flag in reExpInfo: res", "__match(self, keyInfoList, flag, param1 = \"\", param2 = \"\", f = -1, l", "if f != -1 else True) and \\ (x[1] >= l if l", "def __matchCsStateChange(self, keyInfoList, fromState, toState): \"\"\"CS状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool", "无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2 = code) def __matchChannelStateDesc(self, keyInfoList, desc): \"\"\"通道状态描述匹配", "\"(R)\" or \"(S)\") # invite-> 200<- if self.caseMatch(detailsDict, case_answer_invite): note += \" ->", "self.SIGN_FLAG_CALLNUMBER), # 呼叫号码 (\"952 Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_R_BYE), (\"Hangup (.*)", "PRINT(s) def __getAnalysisResultBody(self, sessUUID, targConclusion = \"\", show = True): sessLogInfoDict = self.getSessLogInfoDict()", "sessLogInfoDict[sessUUID] if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName", "\" -> TALKING\" # invite-> (183<- or 180<-) 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve):", "reExpr = \"(\" + fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\") + \")\" res = self.reMatch(reExpr, x[3][1], 1)", "SIGN_CHAN_COMPLETE = 'completing' SIGN_CHAN_TERMINATED = 'terminated' SIGN_FLAG_CALL = \"channel sm\" SIGN_CALL_HANGUP = 'HANGUP'", "= sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not keyInfoList or not logDict: return s,", "else: sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\", \\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"}, \\ self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else:", "[], self.SIGN_FLAG_CALLNUMBER), # 呼叫号码 (\"952 Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_R_BYE), (\"Hangup", "s += \"{0:^40}\".format(\" ↓ \") + \"\\n\" if k[0] not in l: s", "-- call类 entering state [proceeding][180] 为收响应消息的处理 -- channel类 AUDIO RTP [sofia/external/6797@10.0.7.152:5080] 10.0.7.176 port", "res else reason) else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) reason =", "输出到文件 if sessUUID: if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s, c", "count def showResult(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return", "(.*)\", 2, [], self.SIGN_FLAG_CS), # 状态转移类的日志 (\"entering state \\[(.*)\\]\\[(.*)\\]\", 2, [], self.SIGN_FLAG_CHAN), #", "+ \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)):", "% (\"媒体信息\", \"本端地址\", locIp, locPort, \"远端地址\", RmtIp, RmtPort, \"Payload\", audioPayLoad, \"ptime\", audioPTime) res", "参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CALL,", "color = color) s += \"%-30s %-36s %-30s %-7s %-s\\n\" % (callTime, sessUUID,", "= \"\"): s = \"-\" * 160 + \"\\n\" s += \"\\n总数:%d\" %", "codeList = [] for x in keyInfoList: if x[2] == self.SIGN_FLAG_CHAN: reExpr =", "in enumerate(sessList): # 若输入了callNumber或UUID则认为需要过滤 if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\", "(\"UUID\", sessUUID) if numberFrom: s += \"%-16s: %-s\\n\" % (\"显示号码\", numberFrom) s +=", "获取呼叫号码列表 def getCallNumberList(self): sessLogInfoDict = self.getSessLogInfoDict() return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID in sessLogInfoDict.keys() if", "# } __ignoreLinesDict = {}# 忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS = 'fs' # 会话类字典key SESS_FS_CALLNUMBER_DK =", "self.reMatch(reExpr, log, expLen) if res: l = list(res) for dPos in [x for", "fileNameList.append(fileName) return len(fileNameList), newPath, fileNameList def outputOriginLog(self, outputPath, sessUUID = \"\", callNumber =", "self.SIP_BYE elif flag in [self.SIGN_FLAG_CANCEL]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL elif flag in [self.SIGN_FLAG_S_BYE]:", "= self.getSessLogInfoDict() # 需要匹配的正则表达式 reExpInfo = [ (\"State Change (.*) -> (.*)\", 2,", "< 36 or line[0:pos].count('-') != 4: if f not in ignoreLinesDict: ignoreLinesDict[f] =", "= self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\", \\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"}, \\ self.SESS_KEYINFO_DK:[],", "# 抽取出的关键信息分类 SIGN_FLAG_CHAN = \"chan proc\" SIGN_CHAN_CALLING = 'calling' SIGN_CHAN_PROCEDDING = 'proceeding' SIGN_CHAN_COMPLETE", "0 for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\", begin=0,", "locIp, locPort, \"远端地址\", RmtIp, RmtPort, \"Payload\", audioPayLoad, \"ptime\", audioPTime) res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP,", "if reason: res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS) s", "state [proceeding][180] 为收响应消息的处理 -- channel类 AUDIO RTP [sofia/external/6797@10.0.7.152:5080] 10.0.7.176 port 24776 -> 192.168.0.178", "SIP_INVITE = 'INVITE' SIP_CANCEL = 'CANCEL' SIP_BYE = 'BYE' # 匹配模式 MATCH_MOD_NORMAL =", "1, [], self.SIGN_FLAG_R_INVITE), (\"AUDIO RTP \\[(.*)\\] (.*) port (\\d+) -> (.*) port (\\d+)", "\"\"\"通道状态码匹配 精确匹配状态码 参数列表: keyInfoList:关键信息列表 code:状态码 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList,", "\"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\", \"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"), \"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"RINGING\"),", "flag in [self.SIGN_FLAG_CHAN]: if context[0] in [self.SIGN_CHAN_CALLING]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE elif context[0]", "\"DOWN\", \"EARLY\"), \"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"ACTIVE\"), \"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"RINGING\"), \"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"EARLY\",", "flag = self.inputContinue(i, count, total, flag, self.__showAnalysisResultHeader, conclusion) if not continueRet: break #", "sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return self.__outputDetails(outputPath, fileName =", "not logDict: return s, conclusion callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]", "str(k[1]), str(k[2]), str(k[3])) else: s += \"\\n\" return s def __showDetailsBody(self, sessUUID =", "= \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showDetailsHeader() # 显示Body count = 0", "bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = desc) # 分析会话过程 def", "self.__showDetailsBody(sessUUID, conclusion): count += 1 continueRet, flag = self.inputContinue(i, count, total, flag, self.__showDetailsHeader)", "[self.MATCH_MOD_EXTEND]: return (l[0][1][0], l[0][1][1], l[0][0]) if any(l) else False elif mod in [self.MATCH_MOD_DETAILS]:", "sessUUID): s = \"呼叫号码:%s\\nUUID:%s\\n\" % (callNumber, sessUUID) return s def __outputOriginLog(self, outputPath, sessUUID", "return \"\" logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not logDict or not", "process = self.printProc(process, fileLen) for i, line in enumerate(lines): # 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532", "= line.find(' ') line_len = len(line) # 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if pos is -1 or", "self.getSessLogInfoDict() s = \"\" conclusion = \"\" if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]", "from base.base import PRINT, INPUT, getColor if PY2: from analyzer import LogAnalyzer else:", "% (time2 - time1) PRINT(s, color='green') s = \"正在提取号码...\" PRINT(s, end='') self.__getCallNumber() time3", "elif context[0] in [self.SIGN_CHAN_PROCEDDING]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_COMPLETE]: return", "count += 1 # 输出分段,提示是否继续显示内容 continueRet, flag = self.inputContinue(i, count, total, flag, self.__showAnalysisResultHeader,", "and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 else: total = len(sessLogInfoDict) flag = False", "0 for f, lines in enumerate(self.getLines()): process = self.printProc(process, fileLen) for i, line", "break else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList def __match(self, keyInfoList, flag, param1 = \"\", param2", "case_ringinged_180) or self.caseMatch(detailsDict, case_ringinged_183): note += \" -> RINGING\" # invite-> (183<- or", "l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): # 取一行日志 sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] # 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 # 默认按照此行日志取号码 res", "if self.caseMatch(detailsDict, case_r_183): note += \" -> RINGING(183)\" note += \" -> HANGUP\"", "conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showAnalysisResultHeader(conclusion) # 显示Body count =", "__sessCollect(self): \"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表: 无 返回值: 成功解析的会话日志字典和无法解析的会话日志字典 dict,dict 异常: 无 \"\"\"", "sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\", \\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\",", "返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" self.__sessLogInfoDict = {} self.__ignoreLinesDict = {}", "getColor(\"{0:<20}\".format(\"null\"), color='gray', need=show), conclusion, note) return s, conclusion def __showAnalysisResultBody(self, sessUUID, targConclusion =", "= name) # ----------------------------------------------输出详细分析结果到文件---------------------------------------------- def __outputDetails(self, outputPath, fileName = \"\", callNumber = \"\",", "time\") return sessLogInfoDict, ignoreLinesDict # 获取会话中的呼叫号码 def __getCallNumber(self): \"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表: 无", "\"总数:%d\" % len(dupl)) # ----------------------------------------------显示详细分析结果---------------------------------------------- def __showDetailsHeader(self, sessUUID = \"\", callNumber = \"\",", "sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\", begin=50, end=100) keyInfoList", "= 'completing' SIGN_CHAN_TERMINATED = 'terminated' SIGN_FLAG_CALL = \"channel sm\" SIGN_CALL_HANGUP = 'HANGUP' SIGN_FLAG_CS", "targConclusion=\"\"): fileNameList = [] sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath if sessUUID: sessDict", "getResultDict(self, UUID = \"\"): \"\"\"获取结果字典 参数列表: UUID:会话的UUID 返回值: 结果字典 {'conclusion':\"\", 'details':{}, 'note':\"\"} 异常:", "= line[0:pos], line[pos + 1:-1] # 按照UUID归类存放日志信息 if sessUUID in sessLogInfoDict: if f", "in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): # 取一行日志 sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] # 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 # 默认按照此行日志取号码 res =", "return s def __showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"):", "+= \" -> TALKING\" # invite-> 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note +=", "else False else: return False def __matchCsStateChange(self, keyInfoList, fromState, toState): \"\"\"CS状态变迁匹配 参数列表: keyInfoList:关键信息列表", "\"183\"), \"completing_200\": self.__matchChannelStateDesc(keyInfoList, \"completing\"), \"completed_200\": self.__matchChannelStateDesc(keyInfoList, \"completed\"), \"ready_200\": self.__matchChannelStateDesc(keyInfoList, \"ready\"), \"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\")", "flag = True break if flag: break # 没有找到号码,可能是日志文件的格式发生了变化 else: #print \"Not find", "\"\"): s = \"-\" * 160 + \"\\n\" s += \"\\n总数:%d\" % count", "callNumber = \"\", conclusion = \"\"): return self.__outputDetails(outputPath, fileName = fileName, sessUUID =", "元组(bool, str) 异常: 无 \"\"\" sessDict = self.getSessLogInfoDict() if UUID: if sessDict.get(UUID, False):", "detailsDict[\"terminated_list\"]: conclusion = \"ERROR\" note += \"(recv %s)\" % detailsDict[\"terminated_list\"][0][1] else: conclusion =", "1, [0], self.SIGN_FLAG_CANCEL), ] sessLen = len(sessLogInfoDict) process = 0 for sessUUID in", "%-36s %-30s %-7s %-s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") PRINT(s) def __getAnalysisResultBody(self,", "self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2 = code) def __matchChannelStateDesc(self, keyInfoList, desc): \"\"\"通道状态描述匹配 匹配状态描述 参数列表: keyInfoList:关键信息列表", "True)] if mod in [self.MATCH_MOD_NORMAL]: return any(l) elif mod in [self.MATCH_MOD_EXTEND]: return (l[0][1][0],", "self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) reason = res[1] if res else \"\" if", "line[0:pos], line[pos + 1:-1] # 按照UUID归类存放日志信息 if sessUUID in sessLogInfoDict: if f not", "from \\\"(.*)\\\" \\<(.*)\\> to \\\"(.*)\\\" \\<(.*)\\>\", 4, [], self.SIGN_FLAG_CALLNUMBER), # 呼叫号码 (\"952 Hangup", "warningCount += 1 elif s and c.upper() in ['OK']: okCount += 1 else:", "return len(fileNameList), newPath, fileNameList def outputDetails(self, outputPath, fileName = \"\", sessUUID = \"\",", "self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (183<- or 180<-) 错误应答<- elif self.caseMatch(detailsDict,", "name = name) # ----------------------------------------------输出详细分析结果到文件---------------------------------------------- def __outputDetails(self, outputPath, fileName = \"\", callNumber =", "参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 例如 2016-03-21", "sessLogInfoDict = self.getSessLogInfoDict() # 例如 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080) Running State Change", "(self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and \"(R)\" or \"(S)\") # invite-> 200<- if self.caseMatch(detailsDict, case_answer_invite): note", "1 continueRet, flag = self.inputContinue(i, count, total, flag, self.__showDetailsHeader) if not continueRet: break", "= {\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,} case_ringing_180 = {\"proceeding_180\":True,} case_ringing_183 = {\"proceeding_183\":True,} case_ringinged_180 = {\"DOWN__RINGING\":True,}", "okCount) return s def __outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\", conclusion", "need=show), conclusion, note) return s, conclusion def __showAnalysisResultBody(self, sessUUID, targConclusion = \"\"): s,", "if x[2] == flag and \\ ((len(x[3]) >= 1 and param1.strip() == x[3][0].strip())", "'HANGUP' SIGN_FLAG_CS = \"core sm\" SIGN_FLAG_RTP = \"rtp\" SIGN_FLAG_CALLNUMBER = \"callnumber\" SIGN_FLAG_HANGUP =", "+ \"\\n\\n\" s += \"%-4s %-35s %-16s %-16s %s\\n\\n\" % (\"序号\",\"信令时间\", \"源日志行号\", \"消息类型\",", "= sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if sessUUID == \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": # print(\"\\n\") # for k in keyInfoList:", "\"\"\"获取结果字典 参数列表: UUID:会话的UUID 返回值: 结果字典 {'conclusion':\"\", 'details':{}, 'note':\"\"} 异常: 无 \"\"\" return self.getSessInfo(UUID,", "def __outputDetails(self, outputPath, fileName = \"\", callNumber = \"\", sessUUID = \"\", targConclusion=\"\"):", "okCount += 1 else: for sessUUID in sessLogInfoDict.keys(): if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if", "[] sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath if sessUUID: sessDict = sessLogInfoDict.get(sessUUID, False)", "异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState, toState) def __matchCallStateChange(self, keyInfoList, fromState, toState):", "else: ignoreLinesDict[f][i] = line continue # 拆分出UUID和日志信息 sessUUID, sessLog = line[0:pos], line[pos +", "self.SIGN_FLAG_CHAN), # 收到消息类的日志 (\"Callstate Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CALL), # 呼叫状态类的日志", "fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS)", "会话类字典key SESS_FS_CALLNUMBER_DK = \"callNumber\" # 抽取出的关键信息分类 SIGN_FLAG_CHAN = \"chan proc\" SIGN_CHAN_CALLING = 'calling'", "\"%s\" % self.getLogTime(logDict.get(k[0], {}).get(k[1], \"\")) res = self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime, 7) if res:", "self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) return len(fileNameList), newPath, fileNameList def outputDetails(self, outputPath, fileName = \"\",", "res = self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod = self.MATCH_MOD_DETAILS) disFrom, numberFrom, disTo, numberTo = res", "False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not keyInfoList or not logDict:", "fuzzyCode:模糊状态码 返回值: 匹配到的值 异常: 无 \"\"\" codeList = [] for x in keyInfoList:", "s def __outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\", conclusion = \"\",", "or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict,", "len(dupl)) # ----------------------------------------------显示详细分析结果---------------------------------------------- def __showDetailsHeader(self, sessUUID = \"\", callNumber = \"\", conclusion =", "+= \"\\n\" return s def __showDetailsBody(self, sessUUID = \"\", targConclusion = \"\"): s", "\"ACTIVE\"), \"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"ACTIVE\"), \"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"HANGUP\"), \"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"HANGUP\"),", "True break if flag: break # 没有找到号码,可能是日志文件的格式发生了变化 else: #print \"Not find the call", "\"\"\" return self.__ignoreLinesDict def load(self, path, rl=False): \"\"\"加载FS的日志 参数列表: path:日志路径 rl:是否重新加载 返回值: 成功标志和错误信息", "note += \"(recv %s)\" % detailsDict[\"terminated_list\"][0][1] else: conclusion = \"WARNING\" note += \"[NOT", "any(l) elif mod in [self.MATCH_MOD_EXTEND]: return (l[0][1][0], l[0][1][1], l[0][0]) if any(l) else False", "i, x in enumerate(keyInfoList) if x[2] == flag and \\ ((len(x[3]) >= 1", "callNumber: # 确定新的目录,以源日志文件名作为目录名 if not fileName: #orgLogFileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()])", "RmtIp, RmtPort, \"Payload\", audioPayLoad, \"ptime\", audioPTime) res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS)", "= signTimeThis signTimeThis = datetime(int(res[0]), int(res[1]), int(res[2]), int(res[3]), int(res[4]), int(res[5])) if signTimePrev and", "callNumber = \"\", conclusion = \"\"): return self.__showResult(sessUUID = sessUUID, callNumber = callNumber,", "flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段 返回值: 元组(FromModule, ToModule, Sign) 异常: 无 \"\"\" if flag in [self.SIGN_FLAG_CHAN]:", "l: s += self.getPathEx(k[0]) + \"\\n\" l.append(k[0]) s += \"%02d. %-35s %-16s %-16s", "# 拆分出UUID和日志信息 sessUUID, sessLog = line[0:pos], line[pos + 1:-1] # 按照UUID归类存放日志信息 if sessUUID", "(\"AUDIO RTP \\[(.*)\\] (.*) port (\\d+) -> (.*) port (\\d+) codec: (\\d+) ms:", "!= \"\" else True) and \\ ((len(x[3]) >= 2 and param2.strip() == x[3][1].strip())", "else True): s, c = self.__getAnalysisResultBody(sessUUID, conclusion, show=False) context += s if s", "okCount): s = \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" % (\"总计\", errorCount + okCount + warningCount, \"告警\", warningCount,", "return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_COMPLETE]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif", "= sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c if callNumber", "self.__sessLogInfoDict def getIgnoreLinesDict(self): \"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表: 无 返回值: 忽略的行字典 例如: {文件索引:{行数:日志}} 异常: 无", "if sessUUID: if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]", "= self.MATCH_MOD_DETAILS) s += \"%-16s: %s\\n\" % (\"挂断原因\", res[1] if res else reason)", "case_answer_183 = {\"EARLY__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_183 = {\"EARLY__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_hangup_invite = {\"DOWN__HANGUP\":True,}", "s += \"\\n总数:%d\" % count PRINT(s) return s def __showDetails(self, sessUUID = \"\",", "\\[\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True # 若有号码变换,需要取变换的号码", "(x[1] >= l if l != -1 else True)] if mod in [self.MATCH_MOD_NORMAL]:", "s = \"呼叫号码:%s\\nUUID:%s\\n\" % (callNumber, sessUUID) return s def __outputOriginLog(self, outputPath, sessUUID =", "in ['ERROR']: errorCount += 1 elif s and c.upper() in ['WARNING']: warningCount +=", "context): return 1, outputPath, [fileName] else: return 0, outputPath, [] else: return 0,", "sessUUID = \"\", targConclusion = \"\", mode = \"normal\"): sessLogInfoDict = self.getSessLogInfoDict() if", "[self.SIGN_CHAN_COMPLETE]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_TERMINATED]: return self.MOD_OUTSIDE, self.MOD_FS, context[1]", "= res[1] flag = True break if flag: break # 没有找到号码,可能是日志文件的格式发生了变化 else: #print", "number. UUID:%s\" % sessUUID pass else: pass # 会话关键信息收集 def __sessKeyInfoCollect(self): \"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。", "flag and \\ ((len(x[3]) >= 1 and param1.strip() == x[3][0].strip()) if param1 !=", "reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" if detailsDict[\"terminated_list\"]: conclusion = \"ERROR\"", "\"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG) if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName)", "self.MATCH_MOD_DETAILS) disFrom, numberFrom, disTo, numberTo = res if res else (\"\",\"\",\"\",\"\") callTime =", "conclusion + \"_tmp\" fileName = \"Result\" + fileNames + self.OUTPUT_POSTFIX_RESULT context = \"\"", "conclusion) return count def showResult(self, sessUUID = \"\", callNumber = \"\", conclusion =", "% (\"呼叫开始时间\", callTime) s += \"%-16s: %-s\\n\" % (\"UUID\", sessUUID) if numberFrom: s", "sessDict = sessLogInfoDict[sessUUID] if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True: logDict =", "\") + \"\\n\\n\" s += \"%-4s %-35s %-16s %-16s %s\\n\\n\" % (\"序号\",\"信令时间\", \"源日志行号\",", "self.caseMatch(detailsDict, case_calling_invite) or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE): conclusion = \"OK\" note = \"[CALLING\" + (self.__match(keyInfoList,", "\"__\" + sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS if self.outputEx(newPath, newFileName, self.getDetails(sessUUID,", "else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 # 输出分段,提示是否继续显示内容 continueRet, flag", "0, 0 # 输出到文件 if sessUUID: if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else", "orgLogFileNames) else: newPath = os.path.join(outputPath, fileName) # 创建新的目录 if not self.makeDir(newPath): return len(fileNameList),", "except Exception as Err: s = str(Err, reExpInfo[i], res) PRINT(s) raise res =", "{\"RINGING__HANGUP\":True,} case_hangup_183 = {\"EARLY__HANGUP\":True,} case_hangup_acitve = {\"ACTIVE__HANGUP\":True,} case_r_183 = {\"proceeding_183\":True,} # invite-> if", "'fs' # 会话类字典key SESS_FS_CALLNUMBER_DK = \"callNumber\" # 抽取出的关键信息分类 SIGN_FLAG_CHAN = \"chan proc\" SIGN_CHAN_CALLING", "= True break if flag: break # 没有找到号码,可能是日志文件的格式发生了变化 else: #print \"Not find the", "匹配模式 MATCH_MOD_NORMAL = \"normal\" MATCH_MOD_EXTEND = \"extend\" MATCH_MOD_DETAILS = \"details\" # 输出文件 OUTPUT_POSTFIX_LOG", "sessUUID in sessLogInfoDict: if f not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog} if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK]", "sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] -- 挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表: 无 返回值: 无 异常: 无 \"\"\"", "self.printList(tmp, 8, \"呼叫号码列表:\", \"总数:%d\" % len(tmp)) # 重复的呼叫号码 dupl = self.findDupl(callNumberList) len(dupl) and", "成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState, toState) def __fuzzyMatchChannelStateCode(self, keyInfoList,", "PRINT(s, end='') self.__getCallNumber() time3 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time3 -", "= self.getSessLogInfoDict() s = \"\" conclusion = \"\" if sessLogInfoDict.get(sessUUID, False): logDict =", "s def __showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): sessLogInfoDict", "# 若输入了callNumber或UUID则认为需要过滤 if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID,", "'', '', '' # 按照会话,收集日志信息 def __sessCollect(self): \"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表: 无", "elif context[0] in [self.SIGN_CHAN_COMPLETE]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_TERMINATED]: return", "time.clock() s = \"正在收集会话信息...\" PRINT(s, end='') self.__sessCollect() time2 = time.clock() s = \"OK", "flag = False for l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): # 取一行日志 sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] #", "True) and \\ (x[1] >= l if l != -1 else True)] if", "def showCallNumberList(self): # 呼叫号码 callNumberList = self.getCallNumberList() tmp = set(callNumberList) self.printList(tmp, 8, \"呼叫号码列表:\",", "+ self.OUTPUT_POSTFIX_DETAILS if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) return len(fileNameList), newPath, fileNameList def", "\"(R)\") # 判断挂断原因 res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) st, reason =", "__init__(self): self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} if PY2: return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS) else:", "(\"Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_HANGUP), (\"Sending BYE to(.*)\", 1, [0], self.SIGN_FLAG_S_BYE),", "(183<- or 180<-) if self.caseMatch(detailsDict, case_ringing_180) or self.caseMatch(detailsDict, case_ringing_183) or self.caseMatch(detailsDict, case_ringing_183_180) or", "codeList def __matchChannelStateCode(self, keyInfoList, code): \"\"\"通道状态码匹配 精确匹配状态码 参数列表: keyInfoList:关键信息列表 code:状态码 返回值: 成功或失败 bool", "= self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod = self.MATCH_MOD_DETAILS) disFrom, numberFrom, disTo, numberTo = res if", "keyInfoList, flag, param1 = \"\", param2 = \"\", f = -1, l =", "callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 # 输出分段,提示是否继续显示内容 continueRet,", "res = self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime, 7) if res: signTimePrev = signTimeThis signTimeThis =", "in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\", begin=0, end=50) keyInfoList =", "s, c = self.__getAnalysisResultBody(sessUUID, targConclusion) if s: PRINT(s) return s def __showAnalysisResultTail(self, count,", "conclusion = \"ERROR\" else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) st, reason", "count += 1 continueRet, flag = self.inputContinue(i, count, total, flag, self.__showDetailsHeader) if not", "ignoreLinesDict[f][i] = line continue # 拆分出UUID和日志信息 sessUUID, sessLog = line[0:pos], line[pos + 1:-1]", "conclusion): count += 1 continueRet, flag = self.inputContinue(i, count, total, flag, self.__showDetailsHeader) if", "in ignoreLinesDict: ignoreLinesDict[f] = {} else: ignoreLinesDict[f][i] = line continue # 拆分出UUID和日志信息 sessUUID,", "param1.strip() == x[3][0].strip()) if param1 != \"\" else True) and \\ ((len(x[3]) >=", "\\ self.caseMatch(detailsDict, case_ringinged_180) or self.caseMatch(detailsDict, case_ringinged_183): note += \" -> RINGING\" # invite->", "def __showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): sessLogInfoDict =", "i, line in enumerate(lines): # 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080 Standard INIT", "= \"ERROR\" note += \"(recv %s)\" % detailsDict[\"terminated_list\"][0][1] else: conclusion = \"WARNING\" note", "%-30s %-6s %s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") return s def __getOutputResultTail(self,", "if not sessDict: return len(fileNameList), newPath, fileNameList if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber", "f != -1 else True) and \\ (x[1] >= l if l !=", "返回值: 成功解析的会话日志字典和无法解析的会话日志字典 dict,dict 异常: 无 \"\"\" ignoreLinesDict = {} sessLogInfoDict = {} fileLen", "if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName =", "self.SESS_FS_CALLNUMBER_DK) def getResultDict(self, UUID = \"\"): \"\"\"获取结果字典 参数列表: UUID:会话的UUID 返回值: 结果字典 {'conclusion':\"\", 'details':{},", "(\"Callstate Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CALL), # 呼叫状态类的日志 (\"receiving invite from", "= [] logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList = sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0]) for f, logDict", "-- RTP信息类 Hangup sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] -- 挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表: 无 返回值: 无", "[0], self.SIGN_FLAG_CANCEL), ] sessLen = len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys():", "getkeyInfoList(self, UUID = \"\"): \"\"\"获取关键信息列表 参数列表: UUID:会话的UUID 返回值: 关键信息 [(文件索引,行数,状态类型,(信息)),] 异常: 无 \"\"\"", "Hangup sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] -- 挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表: 无 返回值: 无 异常: 无", "= self.__getAnalysisResultBody(sessUUID, targConclusion) if s: PRINT(s) return s def __showAnalysisResultTail(self, count, targConclusion=\"\"): s", "callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 else: total =", "note += \" -> RINGING\" # invite-> (183<- or 180<-) 200<- if self.caseMatch(detailsDict,", "else: return UUID, None else: return [(UUID, sessDict[UUID].get(key, False)) for UUID in sessDict.keys()]", "sorted(dropPos, reverse=True) if dropPos and x < len(res)]: try: del l[dPos] except Exception", "= note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion # print \"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"],", "= \"\"): \"\"\"获取结果字典 参数列表: UUID:会话的UUID 返回值: 结果字典 {'conclusion':\"\", 'details':{}, 'note':\"\"} 异常: 无 \"\"\"", "fileName = \"\", sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return", "__sessKeyInfoCollect(self): \"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如: State Change CS_CONSUME_MEDIA -> CS_EXECUTE 为核心层状态机迁移 -- CS类 Callstate", "conclusion = \"\"): return self.__showResult(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion)", "and 'yellow' or \\ conclusion.upper() in ['OK'] and 'green' conclusion = getColor(\"{0:<7}\".format(conclusion), color", "or \\ self.caseMatch(detailsDict, case_answer_183) or self.caseMatch(detailsDict, case_answerd_183): note += \" -> TALKING\" #", "if res else reason) else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) reason", "or 180<-) 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \" -> HANGUP\" +", "self.__outputDetails(outputPath, fileName = fileName, sessUUID = sessUUID, callNumber = callNumber, targConclusion = conclusion)", "\"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS) if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) else: if", "numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if locIp and RmtIp: s += \"%-16s: %s:%s:%s -> %s:%s:%s", "\"\"): \"\"\"获取关键信息列表 参数列表: UUID:会话的UUID 返回值: 关键信息 [(文件索引,行数,状态类型,(信息)),] 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_KEYINFO_DK)", "(callNumber == c if callNumber else True): fileName = (callNumber or c) +", "sessUUID = \"\", targConclusion = \"\"): s = self.getDetails(sessUUID, targConclusion) if s: PRINT(s)", "SIGN_FLAG_S_BYE = \"send_bye\" SIGN_FLAG_CANCEL = 'cancel' SIGN_FLAG_R_INVITE = \"recv_invite\" # SIP信令 SIP_INVITE =", "outputPath, fileName = \"\", sessUUID = \"\", callNumber = \"\", conclusion = \"\"):", "(callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count +=", "s = \"-\" * 160 + \"\\n\" s += \"\\n总数:%d\" % count PRINT(s)", "> 4: s += \"{0:^40}\".format(\" ↑ \") + \"\\n\" s += \"%s \\n\"", "context[1] elif context[0] in [self.SIGN_CHAN_COMPLETE]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_TERMINATED]:", "s = \"\" conclusion = \"\" if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList", "case_ringinged_180 = {\"DOWN__RINGING\":True,} case_ringinged_183 = {\"DOWN__EARLY\":True,} case_ringing_183_180 = {\"DOWN__EARLY\":True, \"proceeding_183\":True, \"EARLY__RINGING\":True, \"proceeding_180\":True,} case_answer_invite", "elif s and c.upper() in ['OK']: okCount += 1 if context: context =", "= self.reMatch(reExpr, log, expLen) if res: l = list(res) for dPos in [x", "\"\" else True) and \\ ((len(x[3]) >= 2 and param2.strip() == x[3][1].strip()) if", "-> CS_EXECUTE 为核心层状态机迁移 -- CS类 Callstate Change ACTIVE -> HANGUP 为呼叫层状态机迁移 -- call类", "= \"\"): return self.__showResult(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion) #", "sessLen, widgetType = \"percent\") for f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag = False for l", "return True, \"\" # 获取UUID列表 def getSessUUIDList(self): sessLogInfoDict = self.getSessLogInfoDict() return sessLogInfoDict.keys() #", "TALKING\" # invite-> 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \" -> HANGUP\"", "conclusion callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note", "= keyInfoList def __match(self, keyInfoList, flag, param1 = \"\", param2 = \"\", f", "\"hangup_reason\" SIGN_FLAG_R_BYE = 'recv_bye' SIGN_FLAG_S_BYE = \"send_bye\" SIGN_FLAG_CANCEL = 'cancel' SIGN_FLAG_R_INVITE = \"recv_invite\"", "4, \"UUID列表:\", \"总数:%d\" % len(sessUUIDList)) # 显示呼叫号码列表 def showCallNumberList(self): # 呼叫号码 callNumberList =", "fileNameList.append(newFileName) return len(fileNameList), newPath, fileNameList def outputDetails(self, outputPath, fileName = \"\", sessUUID =", "True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 # 输出分段,提示是否继续显示内容 continueRet, flag =", "\"proceeding_180\":True,} case_answer_invite = {\"DOWN__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_invite = {\"DOWN__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_180 =", "rl=False): \"\"\"加载FS的日志 参数列表: path:日志路径 rl:是否重新加载 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" if", "%-s\\n\" % (\"呼叫开始时间\", callTime) s += \"%-16s: %-s\\n\" % (\"UUID\", sessUUID) if numberFrom:", "st, reason = res if res else (\"\", \"\") if reason: note +=", "conclusion): count += 1 else: total = len(sessLogInfoDict) flag = False sessList =", "__analysis(self): self.__sessKeyInfoCollect() self.__sessAnalysis() # 运行 def run(self, mode = \"Normal\"): time1 = time.clock()", "return len(fileNameList), newPath, fileNameList def outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\",", "self.__analysis() time4 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time4 - time3) PRINT(s,", "sessLog if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\",", "keyInfoList:关键信息列表 code:状态码 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2 =", "showCallNumberList(self): # 呼叫号码 callNumberList = self.getCallNumberList() tmp = set(callNumberList) self.printList(tmp, 8, \"呼叫号码列表:\", \"总数:%d\"", "in ['OK']: okCount += 1 else: for sessUUID in sessLogInfoDict.keys(): if (callNumber ==", "(l[0][1][0], l[0][1][1], l[0][0]) if any(l) else False elif mod in [self.MATCH_MOD_DETAILS]: return l[0][1][3]", "enumerate(keyInfoList): signTime = \"%s\" % self.getLogTime(logDict.get(k[0], {}).get(k[1], \"\")) res = self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime,", "%s:%s:%s (%s:%s %s:%s)\\n\" % (\"媒体信息\", \"本端地址\", locIp, locPort, \"远端地址\", RmtIp, RmtPort, \"Payload\", audioPayLoad,", "callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 else: total =", "in sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] # 显示UUID列表 def showSessUUIDList(self): sessUUIDList = self.getSessUUIDList() self.printList(sessUUIDList, 4,", "and (signTimeThis - signTimePrev).seconds > 4: s += \"{0:^40}\".format(\" ↑ \") + \"\\n\"", "color=\"red\", need=True)) s += \"{0:^40}\".format(\" ↓ \") + \"\\n\" if k[0] not in", "for sessUUID in sessLogInfoDict.keys(): if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s,", "\"CS_INIT\", \"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\", \"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"), \"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList, \"DOWN\",", "import LogAnalyzer # FS日志分析器 class FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict = {}# 按照会话归类的日志信息 # {会话UUID:{log:{文件索引:{行数:日志}}, #", "name) # 创建新的目录,若存在则删除 self.makeDir(newPath) for sessUUID in sessLogInfoDict.keys(): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码", "numberFrom, disTo, numberTo = res if res else (\"\",\"\",\"\",\"\") callTime = \"%s\" %", "= \"\"): return self.__showDetails(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion) #", "+= \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite->", "targConclusion)): fileNameList.append(newFileName) return len(fileNameList), newPath, fileNameList def outputDetails(self, outputPath, fileName = \"\", sessUUID", "(\"receiving invite from (.*) version\", 1, [], self.SIGN_FLAG_R_INVITE), (\"AUDIO RTP \\[(.*)\\] (.*) port", "utf-8 -*- import os import time import sys from datetime import datetime PY2", "fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState, toState)", "\"calling_0\": self.__matchChannelStateDesc(keyInfoList, \"calling\"), \"proceeding_180\": self.__matchChannelStateCode(keyInfoList, \"180\"), \"proceeding_183\": self.__matchChannelStateCode(keyInfoList, \"183\"), \"completing_200\": self.__matchChannelStateDesc(keyInfoList, \"completing\"), \"completed_200\":", "(bye-> or 错误应答<-) elif self.caseMatch(detailsDict, case_hangup_invite): if self.caseMatch(detailsDict, case_r_183): note += \" ->", "\"(S)\" or \"(R)\") else: # invite-> (183<- or 180<-) if self.caseMatch(detailsDict, case_ringing_180) or", "% (\"显示号码\", numberFrom) s += \"%-16s: %-s\\n\" % (\"呼叫号码\", numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if", "total, flag, self.__showDetailsHeader) if not continueRet: break # 显示尾 self.__showDetailsTail(count) return count #", "%-30s %-7s %-s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") PRINT(s) def __getAnalysisResultBody(self, sessUUID,", "# 创建新的目录,若存在则删除 self.makeDir(newPath) for sessUUID in sessLogInfoDict.keys(): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c", "mod = self.MATCH_MOD_DETAILS) st, reason = res if res else (\"\", \"\") if", "元组(FromModule, ToModule, Sign) 异常: 无 \"\"\" if flag in [self.SIGN_FLAG_CHAN]: if context[0] in", "# 按照UUID归类存放日志信息 if sessUUID in sessLogInfoDict: if f not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] =", "#fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames = sessUUID + callNumber +", "参数列表: UUID:会话的UUID 返回值: 结果字典 {'conclusion':\"\", 'details':{}, 'note':\"\"} 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_RESULT_DK)", "\"[NOT COMPLETE\" note += \"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] =", "time2) PRINT(s, color='green') s = \"正在分析会话过程...\" PRINT(s, end='') self.__analysis() time4 = time.clock() s", "+ (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (183<- or 180<-) 错误应答<-", "self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState, toState) def __fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode): \"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表: keyInfoList:关键信息列表 fuzzyCode:模糊状态码", "to \\\"(.*)\\\" \\<(.*)\\>\", 4, [], self.SIGN_FLAG_CALLNUMBER), # 呼叫号码 (\"952 Hangup (.*) \\[(.*)\\] \\[(.*)\\]\",", "\"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"HANGUP\"), \"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList, \"ACTIVE\", \"HANGUP\"), \"calling_0\": self.__matchChannelStateDesc(keyInfoList, \"calling\"), \"proceeding_180\": self.__matchChannelStateCode(keyInfoList,", "== sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1", "reExpr, expLen, dropPos, flag in reExpInfo: res = self.reMatch(reExpr, log, expLen) if res:", "# invite-> (183<- or 180<-) 200<- if self.caseMatch(detailsDict, case_answer_180) or self.caseMatch(detailsDict, case_answerd_180) or", "or \\ self.caseMatch(detailsDict, case_ringinged_180) or self.caseMatch(detailsDict, case_ringinged_183): note += \" -> RINGING\" #", "def __showDetailsTail(self, count, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): s", "= \"normal\"): sessLogInfoDict = self.getSessLogInfoDict() if not sessLogInfoDict.get(sessUUID, False): return \"\" conclusion =", "else: newPath = os.path.join(outputPath, fileName) # 创建新的目录 if not self.makeDir(newPath): return len(fileNameList), newPath,", "sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber或UUID则认为需要过滤 if (callNumber == context[self.SESS_FS_CALLNUMBER_DK]", "conclusion) # ----------------------------------------------显示分析结果---------------------------------------------- def __showAnalysisResultHeader(self, targConclusion=\"\"): s = \"%-30s %-36s %-30s %-7s %-s\\n\"", "sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not keyInfoList or not logDict: return s, conclusion callTime = \"%s\"", "not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" if detailsDict[\"terminated_list\"]: conclusion = \"ERROR\" note", "= self.getSessLogInfoDict() return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID in sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] # 显示UUID列表 def", "self.getSessLogInfoDict() # 例如 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080) Running State Change CS_INIT sessLen", "\"ERROR\" if detailsDict[\"terminated_list\"]: conclusion = \"ERROR\" note += \"(recv %s)\" % detailsDict[\"terminated_list\"][0][1] else:", "[0], self.SIGN_FLAG_HANGUP), (\"Sending BYE to(.*)\", 1, [0], self.SIGN_FLAG_S_BYE), (\"Sending CANCEL to(.*)\", 1, [0],", "invite-> (183<- or 180<-) 200<- if self.caseMatch(detailsDict, case_answer_180) or self.caseMatch(detailsDict, case_answerd_180) or \\", "flag, self.__showDetailsHeader) if not continueRet: break # 显示尾 self.__showDetailsTail(count) return count # 按照UUID搜索日志,并显示详细分析信息", "= \"OK (耗时:%.2f秒)\" % (time3 - time2) PRINT(s, color='green') s = \"正在分析会话过程...\" PRINT(s,", "SIGN_FLAG_CALLNUMBER = \"callnumber\" SIGN_FLAG_HANGUP = \"hangup_reason\" SIGN_FLAG_R_BYE = 'recv_bye' SIGN_FLAG_S_BYE = \"send_bye\" SIGN_FLAG_CANCEL", "context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 #", "= res[1] flag = True # 若有号码变换,需要取变换的号码 res = self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*) Action transfer\\((\\d*)", "7076 codec: 18 ms: 20 -- RTP信息类 Hangup sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] -- 挂断原因类", "\"\\nis not get time\") return sessLogInfoDict, ignoreLinesDict # 获取会话中的呼叫号码 def __getCallNumber(self): \"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。", "lines in enumerate(self.getLines()): process = self.printProc(process, fileLen) for i, line in enumerate(lines): #", "self.SIGN_FLAG_CHAN: reExpr = \"(\" + fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\") + \")\" res = self.reMatch(reExpr, x[3][1],", "l[0][1][3] if any(l) else False else: return False def __matchCsStateChange(self, keyInfoList, fromState, toState):", "case_answer_183) or self.caseMatch(detailsDict, case_answerd_183): note += \" -> TALKING\" # invite-> (183<- or", "in sessLogInfoDict.keys(): if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: print(sessUUID, \"\\nis not get time\") return sessLogInfoDict,", "\"CS_INIT\"), \"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList, \"CS_INIT\", \"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\", \"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"),", "UUID = \"\"): \"\"\"获取呼叫号码 参数列表: UUID:会话的UUID 返回值: 呼叫号码 str 异常: 无 \"\"\" return", "self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"}, \\ self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else: self.__sessLogInfoDict = sessLogInfoDict self.__ignoreLinesDict = ignoreLinesDict", "True break res = self.reMatch(\"<(\\d*)>->(\\d*) in context\", sessLog, 2) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] =", "fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState, toState)", "(\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") PRINT(s) def __getAnalysisResultBody(self, sessUUID, targConclusion = \"\", show", "\"EARLY__RINGING\":True, \"proceeding_180\":True,} case_answer_invite = {\"DOWN__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_invite = {\"DOWN__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_180", "{\"ACTIVE__HANGUP\":True,} case_r_183 = {\"proceeding_183\":True,} # invite-> if self.caseMatch(detailsDict, case_calling_invite) or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE): conclusion", "] sessLen = len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys(): process =", "in [self.SIGN_FLAG_CANCEL]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL elif flag in [self.SIGN_FLAG_S_BYE]: return self.MOD_FS, self.MOD_OUTSIDE,", "HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (bye-> or 错误应答<-)", "+ st + \"]\" + reason + \"}\" if reason not in [\"NORMAL_CLEARING\",", "\"HANGUP\"), \"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"HANGUP\"), \"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"HANGUP\"), \"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList, \"ACTIVE\", \"HANGUP\"),", "= callNumber, name = name) # ----------------------------------------------输出详细分析结果到文件---------------------------------------------- def __outputDetails(self, outputPath, fileName = \"\",", "import PRINT, INPUT, getColor if PY2: from analyzer import LogAnalyzer else: from analyzer.analyzer", "c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG) if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c,", "return self.__ignoreLinesDict def load(self, path, rl=False): \"\"\"加载FS的日志 参数列表: path:日志路径 rl:是否重新加载 返回值: 成功标志和错误信息 元组(bool,", "import time import sys from datetime import datetime PY2 = sys.version_info[0] == 2", "getDetails(self, sessUUID = \"\", targConclusion = \"\", mode = \"normal\"): sessLogInfoDict = self.getSessLogInfoDict()", "+= \"{[\" + st + \"]\" + reason + \"}\" if reason not", "in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\", begin=50, end=100) keyInfoList =", "self.__matchCallStateChange(keyInfoList, \"DOWN\", \"ACTIVE\"), \"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"RINGING\"), \"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"ACTIVE\"), \"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList,", "= {\"EARLY__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_183 = {\"EARLY__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_hangup_invite = {\"DOWN__HANGUP\":True,} case_hangup_180", "return 1, outputPath, [fileName] else: return 0, outputPath, [] else: return 0, outputPath,", "return UUID, None else: return [(UUID, sessDict[UUID].get(key, False)) for UUID in sessDict.keys()] def", "+= \"%-16s: %-s\\n\" % (\"呼叫开始时间\", callTime) s += \"%-16s: %-s\\n\" % (\"UUID\", sessUUID)", "为核心层状态机迁移 -- CS类 Callstate Change ACTIVE -> HANGUP 为呼叫层状态机迁移 -- call类 entering state", "= res if res else (\"\", \"\") if reason: note += \"{[\" +", "s and c.upper() in ['WARNING']: warningCount += 1 elif s and c.upper() in", "callNumber = callNumber, name = name) # ----------------------------------------------输出详细分析结果到文件---------------------------------------------- def __outputDetails(self, outputPath, fileName =", "\"{0:^40}\".format(\" ↑ \") + \"\\n\" s += \"%s \\n\" % getColor(\"{0:^40}\".format(\"时差:\" + str((signTimeThis", "newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS", "} __ignoreLinesDict = {}# 忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS = 'fs' # 会话类字典key SESS_FS_CALLNUMBER_DK = \"callNumber\"", "enumerate(sessList): # 若输入了callNumber或UUID则认为需要过滤 if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and", "and c.upper() in ['OK']: okCount += 1 else: for sessUUID in sessLogInfoDict.keys(): if", "= callNumber + sessUUID + targConclusion + \"_tmp\" newPath = os.path.join(outputPath, orgLogFileNames) else:", "sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\") for f in", "res[1] if res else \"\" if reason: res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 =", "res if res else (\"\",\"\",\"\",\"\") callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], \"\")) res", "if x[2] == self.SIGN_FLAG_CHAN: reExpr = \"(\" + fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\") + \")\" res", "self.caseMatch(detailsDict, case_answerd_183): note += \" -> TALKING\" # invite-> (183<- or 180<-) 200<-", "conclusion = conclusion) # ----------------------------------------------输出简单分析结果到文件---------------------------------------------- def __getOutputResultHeader(self): s = \"%-30s %-36s %-30s %-6s", "= {} else: ignoreLinesDict[f][i] = line continue # 拆分出UUID和日志信息 sessUUID, sessLog = line[0:pos],", "无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 例如 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080) Running", "invite-> 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \" -> HANGUP\" + (self.__match(keyInfoList,", "or 180<-) 错误应答<- elif self.caseMatch(detailsDict, case_hangup_180) or self.caseMatch(detailsDict, case_hangup_183): note += \" ->", "need=True)) s += \"{0:^40}\".format(\" ↓ \") + \"\\n\" if k[0] not in l:", "\"\", callNumber = \"\", conclusion = \"\"): return self.__showResult(sessUUID = sessUUID, callNumber =", "bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and", "= sessUUID, callNumber = callNumber, name = name) # ----------------------------------------------输出详细分析结果到文件---------------------------------------------- def __outputDetails(self, outputPath,", "self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"}, \\ self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else: self.__sessLogInfoDict = sessLogInfoDict self.__ignoreLinesDict = ignoreLinesDict for", "(self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # 判断挂断原因 res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod", "self.__showAnalysisResultTail(count, conclusion) return count def showResult(self, sessUUID = \"\", callNumber = \"\", conclusion", "'details':{}, 'note':\"\"} 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_RESULT_DK) def getkeyInfoList(self, UUID = \"\"):", "(\"\",\"\",\"\",\"\") callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], \"\")) res = self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod", "\".result\" OUTPUT_POSTFIX_DETAILS = \".details\" def __init__(self): self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} if", "return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE else: pass return '', '', '' # 按照会话,收集日志信息 def", "- time1) PRINT(s, color='green') s = \"正在提取号码...\" PRINT(s, end='') self.__getCallNumber() time3 = time.clock()", "print(sessUUID, \"\\nis not get time\") return sessLogInfoDict, ignoreLinesDict # 获取会话中的呼叫号码 def __getCallNumber(self): \"\"\"获取呼叫号码", "mod = self.MATCH_MOD_DETAILS) disFrom, numberFrom, disTo, numberTo = res if res else (\"\",\"\",\"\",\"\")", "switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080) Running State Change CS_INIT sessLen = len(sessLogInfoDict) process = 0 for", "+= 1 elif s and c.upper() in ['OK']: okCount += 1 else: for", "not in conclusion.upper(): return \"\" logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not", "s def __showDetailsTail(self, count, sessUUID = \"\", callNumber = \"\", conclusion = \"\"):", "toState): \"\"\"CS状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return", "self.printProc(process, sessLen, widgetType = \"percent\", begin=0, end=50) keyInfoList = [] logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]", "toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState, toState) def", "key = \"\"): \"\"\"清理FS的日志 参数列表: UUID:会话的UUID key:内部的字典名 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无", "== 3 from base.base import PRINT, INPUT, getColor if PY2: from analyzer import", "= \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) orgLogFileNames = callNumber + sessUUID + targConclusion", "(183<- or 180<-) 200<- if self.caseMatch(detailsDict, case_answer_180) or self.caseMatch(detailsDict, case_answerd_180) or \\ self.caseMatch(detailsDict,", "self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE elif flag in [self.SIGN_FLAG_CANCEL]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL elif flag", "元组(bool, str) 异常: 无 \"\"\" self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} return super(FsLogAnalyzer,", "\"%-16s: %-s\\n\" % (\"UUID\", sessUUID) if numberFrom: s += \"%-16s: %-s\\n\" % (\"显示号码\",", "例如: {文件索引:{行数:日志}} 异常: 无 \"\"\" return self.__ignoreLinesDict def load(self, path, rl=False): \"\"\"加载FS的日志 参数列表:", "= \"正在分析会话过程...\" PRINT(s, end='') self.__analysis() time4 = time.clock() s = \"OK (耗时:%.2f秒)\" %", "\"成功\", okCount) return s def __outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\",", "time3 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time3 - time2) PRINT(s, color='green')", "= sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS if", "[sofia/external/6797@10.0.7.152:5080] 10.0.7.176 port 24776 -> 192.168.0.178 port 7076 codec: 18 ms: 20 --", "= \"\" if mode in ['normal']: s += \"-\" * 160 + \"\\n\"", "and self.printList(dupl, 8, \"重复的号码:\", \"总数:%d\" % len(dupl)) # ----------------------------------------------显示详细分析结果---------------------------------------------- def __showDetailsHeader(self, sessUUID =", "line[0:pos].count('-') != 4: if f not in ignoreLinesDict: ignoreLinesDict[f] = {} else: ignoreLinesDict[f][i]", "# SIP信令 SIP_INVITE = 'INVITE' SIP_CANCEL = 'CANCEL' SIP_BYE = 'BYE' # 匹配模式", "TALKING\" # invite-> (183<- or 180<-) 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note +=", "sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c if callNumber else True): fileName = name or", "log in logList: for reExpr, expLen, dropPos, flag in reExpInfo: res = self.reMatch(reExpr,", "# 输出简单分析结果到文件 def outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\", conclusion =", "showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return self.__showDetails(sessUUID =", "# 显示呼叫号码列表 def showCallNumberList(self): # 呼叫号码 callNumberList = self.getCallNumberList() tmp = set(callNumberList) self.printList(tmp,", "+= \" -> RINGING\" # invite-> (183<- or 180<-) 200<- if self.caseMatch(detailsDict, case_answer_180)", "signTimePrev).seconds) + \"s\", color=\"red\", need=True)) s += \"{0:^40}\".format(\" ↓ \") + \"\\n\" if", "def getResultDict(self, UUID = \"\"): \"\"\"获取结果字典 参数列表: UUID:会话的UUID 返回值: 结果字典 {'conclusion':\"\", 'details':{}, 'note':\"\"}", "in reExpInfo: res = self.reMatch(reExpr, log, expLen) if res: l = list(res) for", "UUID, sessDict[UUID].get(key, False) else: return UUID, None else: return [(UUID, sessDict[UUID].get(key, False)) for", "= \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames = sessUUID + callNumber + \"_tmp\"", "= True break res = self.reMatch(\"<(\\d*)>->(\\d*) in context\", sessLog, 2) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]", "{} fileLen = len(self.getLines()) process = 0 for f, lines in enumerate(self.getLines()): process", "%s)\" % detailsDict[\"terminated_list\"][0][1] else: conclusion = \"WARNING\" note += \"[NOT COMPLETE\" note +=", "l[dPos] except Exception as Err: s = str(Err, reExpInfo[i], res) PRINT(s) raise res", "{} self.__ignoreLinesDict = {} return super(FsLogAnalyzer, self).clear() def getSessInfo(self, UUID = \"\", key", "\"\\n\" if k[0] not in l: s += self.getPathEx(k[0]) + \"\\n\" l.append(k[0]) s", "= conclusion, fileName = fileName) # ----------------------------------------------输出原始日志到文件---------------------------------------------- def __getOutputHeader(self, logDict, callNumber, sessUUID): s", "3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2] flag = True break res = self.reMatch(\"<(\\d*)>->(\\d*)", "-> RINGING\" # invite-> (183<- or 180<-) 200<- if self.caseMatch(detailsDict, case_answer_180) or self.caseMatch(detailsDict,", "sessDict.get(UUID, False): return UUID, sessDict[UUID].get(key, False) else: return UUID, None else: return [(UUID,", "detailsDict[\"terminated_list\"][0][1] else: conclusion = \"WARNING\" note += \"[NOT COMPLETE\" note += \"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK]", "self.SIGN_FLAG_R_BYE), (\"Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_HANGUP), (\"Sending BYE to(.*)\", 1, [0],", "enumerate(sessList): # 若输入了callNumber if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\", "PY3 = sys.version_info[0] == 3 from base.base import PRINT, INPUT, getColor if PY2:", "= \"ERROR\" if detailsDict[\"terminated_list\"]: conclusion = \"ERROR\" note += \"(recv %s)\" % detailsDict[\"terminated_list\"][0][1]", "os.path.join(outputPath, fileName) # 创建新的目录 if not self.makeDir(newPath): return len(fileNameList), newPath, fileNameList for sessUUID", "getIgnoreLinesDict(self): \"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表: 无 返回值: 忽略的行字典 例如: {文件索引:{行数:日志}} 异常: 无 \"\"\" return", "+ reason + \"}\" if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\"", "len(fileNameList), newPath, fileNameList if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True: logDict =", "l.append(k[0]) s += \"%02d. %-35s %-16s %-16s %s\\n\" % (i + 1, signTime,", "sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber或UUID则认为需要过滤 if (callNumber", "flag, self.__showAnalysisResultHeader, conclusion) if not continueRet: break # 显示尾 self.__showAnalysisResultTail(count, conclusion) return count", "in [x for x in sorted(dropPos, reverse=True) if dropPos and x < len(res)]:", "callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], \"\")) res = self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod =", "self.SESS_RESULT_NOTE_DK:\"\"}, \\ self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else: self.__sessLogInfoDict = sessLogInfoDict self.__ignoreLinesDict = ignoreLinesDict for sessUUID", "note += \"{[\" + st + \"]\" + reason + \"}\" if reason", "{\"DOWN__EARLY\":True,} case_ringing_183_180 = {\"DOWN__EARLY\":True, \"proceeding_183\":True, \"EARLY__RINGING\":True, \"proceeding_180\":True,} case_answer_invite = {\"DOWN__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_invite", "180<-) 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \" -> HANGUP\" + (self.__match(keyInfoList,", "res[1] if res else reason) else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS)", "(sofia/external/6010@10.0.7.152:5080) Running State Change CS_INIT sessLen = len(sessLogInfoDict) process = 0 for sessUUID", "确定新的目录,以源日志文件名作为目录名 if not fileName: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames =", "4, [], self.SIGN_FLAG_CALLNUMBER), # 呼叫号码 (\"952 Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_R_BYE),", "self.__matchChannelStateDesc(keyInfoList, \"ready\"), \"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"),", "self.caseMatch(detailsDict, case_answerd_180) or \\ self.caseMatch(detailsDict, case_answer_183) or self.caseMatch(detailsDict, case_answerd_183): note += \" ->", "ignoreLinesDict for sessUUID in sessLogInfoDict.keys(): if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: print(sessUUID, \"\\nis not get", "%s:%s:%s -> %s:%s:%s (%s:%s %s:%s)\\n\" % (\"媒体信息\", \"本端地址\", locIp, locPort, \"远端地址\", RmtIp, RmtPort,", "\"\" conclusion = \"\" if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK]", "= self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod = self.MATCH_MOD_DETAILS) locIp, locPort, RmtIp, RmtPort, audioPayLoad, audioPTime =", "模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表: keyInfoList:关键信息列表 fuzzyCode:模糊状态码 返回值: 匹配到的值 异常: 无 \"\"\" codeList = [] for", "参数列表: UUID:会话的UUID 返回值: 关键信息 [(文件索引,行数,状态类型,(信息)),] 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_KEYINFO_DK) def getSignInfo(self,", "widgetType = \"percent\") for f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag = False for l in", "参数列表: keyInfoList:关键信息列表 code:状态码 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2", "\\n\" % getColor(\"{0:^40}\".format(\"时差:\" + str((signTimeThis - signTimePrev).seconds) + \"s\", color=\"red\", need=True)) s +=", "if sessUUID: # 若输入了callNumber则认为需要过滤 if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\", "def __showDetailsHeader(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return \"\"", "忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS = 'fs' # 会话类字典key SESS_FS_CALLNUMBER_DK = \"callNumber\" # 抽取出的关键信息分类 SIGN_FLAG_CHAN =", "参数列表: keyInfoList:关键信息列表 desc:描述 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1", "\"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"), \"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"RINGING\"), \"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"EARLY\"),", "else: return super(FsLogAnalyzer, self).load(path, rl) def clear(self): \"\"\"清理FS的日志 参数列表: 无 返回值: 成功标志和错误信息 元组(bool,", "elif s and c.upper() in ['WARNING']: warningCount += 1 elif s and c.upper()", "self.getSessLogInfoDict() # 显示头 self.__showAnalysisResultHeader(conclusion) # 显示Body count = 0 if sessUUID: # 若输入了callNumber则认为需要过滤", "and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 else: total = len(sessLogInfoDict) flag =", "(i + 1, signTime, str(k[1]), str(k[2]), str(k[3])) else: s += \"\\n\" return s", "INIT # 找到第一个空格,左边就是会话ID,右边就是日志信息 pos = line.find(' ') line_len = len(line) # 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if", "x in sorted(dropPos, reverse=True) if dropPos and x < len(res)]: try: del l[dPos]", "\"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,} case_ringing_180 = {\"proceeding_180\":True,} case_ringing_183 = {\"proceeding_183\":True,} case_ringinged_180 = {\"DOWN__RINGING\":True,} case_ringinged_183 =", "HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (183<- or 180<-)", "key:内部的字典名 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" sessDict = self.getSessLogInfoDict() if UUID:", "sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not logDict or not keyInfoList: return \"\" res = self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER,", "name or ((callNumber or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG) if self.output(logDict,", "sessDict.keys()] def getLogDict(self, UUID = \"\"): \"\"\"获取日志字典 参数列表: UUID:会话的UUID 返回值: 日志字典 参照__sessLogInfoDict定义 异常:", "len(fileNameList), newPath, fileNameList def outputDetails(self, outputPath, fileName = \"\", sessUUID = \"\", callNumber", "[self.SIGN_FLAG_S_BYE]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE else: pass return '', '', '' # 按照会话,收集日志信息", "Callstate Change ACTIVE -> HANGUP 为呼叫层状态机迁移 -- call类 entering state [proceeding][180] 为收响应消息的处理 --", "\"\"\" if PY2: return LogAnalyzer.load(self, path, rl) else: return super(FsLogAnalyzer, self).load(path, rl) def", "sessLen = len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process,", "\"\", conclusion = \"\"): return \"\" def getDetails(self, sessUUID = \"\", targConclusion =", "case_ringinged_183): note += \" -> RINGING\" # invite-> (183<- or 180<-) 200<- if", "in [self.MATCH_MOD_NORMAL]: return any(l) elif mod in [self.MATCH_MOD_EXTEND]: return (l[0][1][0], l[0][1][1], l[0][0]) if", "= self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) reason = res[1] if res else \"\"", "RmtIp: s += \"%-16s: %s:%s:%s -> %s:%s:%s (%s:%s %s:%s)\\n\" % (\"媒体信息\", \"本端地址\", locIp,", "self.getDetails(sessUUID, targConclusion) if s: PRINT(s) return s def __showDetailsTail(self, count, sessUUID = \"\",", "self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else:", "conclusion = getColor(\"{0:<7}\".format(conclusion), color = color) s += \"%-30s %-36s %-30s %-7s %-s\\n\"", "= \"\", conclusion = \"\"): return \"\" def getDetails(self, sessUUID = \"\", targConclusion", "<filename>floga/analyzer/analyzer_fs.py # -*- coding: utf-8 -*- import os import time import sys from", "\"EARLY\", \"ACTIVE\"), \"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"ACTIVE\"), \"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"HANGUP\"), \"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList, \"EARLY\",", "\"\"\" return self.getSessInfo(UUID, self.SESS_KEYINFO_DK) def getSignInfo(self, flag, context): \"\"\"信令的收发方向(用于上层显示输出) 参数列表: flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段 返回值:", "self.reMatch(reExpr, x[3][1], 1) res and codeList.append(x[3]) return codeList def __matchChannelStateCode(self, keyInfoList, code): \"\"\"通道状态码匹配", "(self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (183<- or 180<-) 错误应答<- elif", "\"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList, \"CS_NEW\", \"CS_INIT\"), \"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList, \"CS_INIT\", \"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\", \"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\":", "self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"), } # 标志性处理类的状态 case_calling_invite = {\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True,", "按照UUID搜索日志,并显示详细分析信息 def showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return", "+= self.__getOutputResultTail(warningCount, errorCount, okCount) if self.outputEx(outputPath, fileName, context): return 1, outputPath, [fileName] else:", "else: return 0, outputPath, [] else: return 0, outputPath, [] # 输出简单分析结果到文件 def", "无 \"\"\" return self.__ignoreLinesDict def load(self, path, rl=False): \"\"\"加载FS的日志 参数列表: path:日志路径 rl:是否重新加载 返回值:", "[], self.SIGN_FLAG_CHAN), # 收到消息类的日志 (\"Callstate Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CALL), #", "↓ \") + \"\\n\" if k[0] not in l: s += self.getPathEx(k[0]) +", "super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS) def getSessLogInfoDict(self): \"\"\"获取会话信息字典 参数列表: 无 返回值: 会话信息字典 例如: {UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果,", "(.*)\", 2, [], self.SIGN_FLAG_CALL), # 呼叫状态类的日志 (\"receiving invite from (.*) version\", 1, [],", "f, lines in enumerate(self.getLines()): process = self.printProc(process, fileLen) for i, line in enumerate(lines):", "# 确定新的目录,以源日志文件名作为目录名 if not fileName: #orgLogFileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) orgLogFileNames", "\"percent\") for f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag = False for l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): #", "= \"\", callNumber = \"\", conclusion = \"\"): return \"\" def getDetails(self, sessUUID", "= {\"DOWN__HANGUP\":True,} case_hangup_180 = {\"RINGING__HANGUP\":True,} case_hangup_183 = {\"EARLY__HANGUP\":True,} case_hangup_acitve = {\"ACTIVE__HANGUP\":True,} case_r_183 =", "# 显示尾 self.__showAnalysisResultTail(count, conclusion) return count def showResult(self, sessUUID = \"\", callNumber =", "\"消息类型\", \"详情\") l = [] for i, k in enumerate(keyInfoList): signTime = \"%s\"", "AUDIO RTP [sofia/external/6797@10.0.7.152:5080] 10.0.7.176 port 24776 -> 192.168.0.178 port 7076 codec: 18 ms:", "= self.getCallNumberList() tmp = set(callNumberList) self.printList(tmp, 8, \"呼叫号码列表:\", \"总数:%d\" % len(tmp)) # 重复的呼叫号码", "\"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\", \"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"), \"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"RINGING\"), \"DOWN__EARLY\":", "= \"extend\" MATCH_MOD_DETAILS = \"details\" # 输出文件 OUTPUT_POSTFIX_LOG = \".log\" OUTPUT_POSTFIX_RESULT = \".result\"", "= sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\"", "keyInfoList: # print(k) conclusion = \"\" note = \"\" detailsDict = { \"CS_NEW__CS_INIT\":", "= time.clock() s = \"OK (耗时:%.2f秒)\" % (time2 - time1) PRINT(s, color='green') s", "self.makeDir(newPath) for sessUUID in sessLogInfoDict.keys(): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]", "\".details\" def __init__(self): self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} if PY2: return LogAnalyzer.__init__(self,", "s += \"%-30s %-36s %-30s %-7s %-s\\n\" % (callTime, sessUUID, callNumber or getColor(\"{0:<20}\".format(\"null\"),", "\"\"): return self.__showResult(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------输出简单分析结果到文件----------------------------------------------", "创建新的目录 if not self.makeDir(newPath): return len(fileNameList), newPath, fileNameList for sessUUID in sessLogInfoDict.keys(): sessDict", "PY2: return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS) else: return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS) def getSessLogInfoDict(self): \"\"\"获取会话信息字典 参数列表: 无", "sessUUID: # 若输入了callNumber if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\", "__showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict()", "无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = desc) # 分析会话过程 def __sessAnalysis(self): \"\"\"会话分析", "sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS) if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName)", "\"proceeding_183\":True, \"EARLY__RINGING\":True, \"proceeding_180\":True,} case_answer_invite = {\"DOWN__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_invite = {\"DOWN__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,}", "p in self.getPath()]) fileNames = sessUUID + callNumber + \"_tmp\" newPath = os.path.join(outputPath,", "UUID:会话的UUID 返回值: 结果字典 {'conclusion':\"\", 'details':{}, 'note':\"\"} 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_RESULT_DK) def", "not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog} if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog)", "if self.outputEx(outputPath, fileName, context): return 1, outputPath, [fileName] else: return 0, outputPath, []", "begin=0, end=50) keyInfoList = [] logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList = sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0])", "= \"\" if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not", "return self.__outputDetails(outputPath, fileName = fileName, sessUUID = sessUUID, callNumber = callNumber, targConclusion =", "\"\"\" if flag in [self.SIGN_FLAG_CHAN]: if context[0] in [self.SIGN_CHAN_CALLING]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE", "result:{分析结果}, # keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} # } __ignoreLinesDict = {}# 忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS = 'fs' #", "case_hangup_acitve): note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\")", "\"UUID\", \"呼叫号码\", \"结果\", \"备注\") PRINT(s) def __getAnalysisResultBody(self, sessUUID, targConclusion = \"\", show =", "if (callNumber == c if callNumber else True): fileName = name or ((callNumber", "in ['OK']: okCount += 1 if context: context = self.__getOutputResultHeader() + context context", "%-s\\n\" % (\"结果\", conclusion) s += \"%-16s: %-s\\n\" % (\"消息流\", self.showNote(note)) s +=", "呼叫号码 (\"952 Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_R_BYE), (\"Hangup (.*) \\[(.*)\\] \\[(.*)\\]\",", "find the call number. UUID:%s\" % sessUUID pass else: pass # 会话关键信息收集 def", "= -1, mod=\"normal\"): l = [(i, x) for i, x in enumerate(keyInfoList) if", "\"Result\" + fileNames + self.OUTPUT_POSTFIX_RESULT context = \"\" warningCount, errorCount, okCount = 0,", "\" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (183<-", "numberFrom: s += \"%-16s: %-s\\n\" % (\"显示号码\", numberFrom) s += \"%-16s: %-s\\n\" %", "成功解析的会话日志字典和无法解析的会话日志字典 dict,dict 异常: 无 \"\"\" ignoreLinesDict = {} sessLogInfoDict = {} fileLen =", "s += \"%s \\n\" % getColor(\"{0:^40}\".format(\"时差:\" + str((signTimeThis - signTimePrev).seconds) + \"s\", color=\"red\",", "= self.getSessLogInfoDict() sessLen = len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys(): process", "无 \"\"\" if PY2: return LogAnalyzer.load(self, path, rl) else: return super(FsLogAnalyzer, self).load(path, rl)", "in sessLogInfoDict.keys(): if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s, c =", "输出的文件列表 # 如果存在UUID(只输出一个文件) if sessUUID: if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码", "for UUID in sessDict.keys()] def getLogDict(self, UUID = \"\"): \"\"\"获取日志字典 参数列表: UUID:会话的UUID 返回值:", "case_hangup_180) or self.caseMatch(detailsDict, case_hangup_183): note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and", "返回值: 忽略的行字典 例如: {文件索引:{行数:日志}} 异常: 无 \"\"\" return self.__ignoreLinesDict def load(self, path, rl=False):", "= tuple(l) keyInfoList.append((f, line, flag, res)) break else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList def __match(self,", "默认按照此行日志取号码 res = self.reMatch(\"New Channel sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] =", "logDict in fileList: logList = sorted(logDict.items(), key=lambda logDict:logDict[0]) for line, log in logList:", "\"\" res = self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod = self.MATCH_MOD_DETAILS) disFrom, numberFrom, disTo, numberTo =", "return s def __outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\", conclusion =", "== callNumber if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK] +", "\"CS_NEW\", \"CS_INIT\"), \"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList, \"CS_INIT\", \"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\", \"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\",", "sessDict: return len(fileNameList), newPath, fileNameList if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True:", "break # 显示尾 self.__showAnalysisResultTail(count, conclusion) return count def showResult(self, sessUUID = \"\", callNumber", "return l[0][1][3] if any(l) else False else: return False def __matchCsStateChange(self, keyInfoList, fromState,", "(callTime, sessUUID, callNumber or getColor(\"{0:<20}\".format(\"null\"), color='gray', need=show), conclusion, note) return s, conclusion def", "= \"\", targConclusion=\"\"): fileNameList = [] sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath if", "3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True # 若有号码变换,需要取变换的号码 res =", "self.__matchCallStateChange(keyInfoList, \"EARLY\", \"ACTIVE\"), \"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"ACTIVE\"), \"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"HANGUP\"), \"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList,", "if self.caseMatch(detailsDict, case_answer_180) or self.caseMatch(detailsDict, case_answerd_180) or \\ self.caseMatch(detailsDict, case_answer_183) or self.caseMatch(detailsDict, case_answerd_183):", "if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not keyInfoList or", "elif s and c.upper() in ['OK']: okCount += 1 else: for sessUUID in", "2 PY3 = sys.version_info[0] == 3 from base.base import PRINT, INPUT, getColor if", "or self.caseMatch(detailsDict, case_ringinged_183): note += \" -> RINGING\" # invite-> (183<- or 180<-)", "if mode in ['normal']: s += \"-\" * 160 + \"\\n\" s +=", "fileName) # 创建新的目录 if not self.makeDir(newPath): return len(fileNameList), newPath, fileNameList for sessUUID in", "{self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\", \\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"}, \\ self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else: self.__sessLogInfoDict = sessLogInfoDict", "name = \"\"): return self.__outputOriginLog(outputPath, sessUUID = sessUUID, callNumber = callNumber, name =", "callNumber = \"\", name = \"\"): sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath fileNameList", "= \"details\" # 输出文件 OUTPUT_POSTFIX_LOG = \".log\" OUTPUT_POSTFIX_RESULT = \".result\" OUTPUT_POSTFIX_DETAILS = \".details\"", "2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080 Standard INIT # 找到第一个空格,左边就是会话ID,右边就是日志信息 pos = line.find(' ')", "\"\", callNumber = \"\", conclusion = \"\", fileName = \"\"): return self.__outputReslut(outputPath, sessUUID", "case_hangup_180 = {\"RINGING__HANGUP\":True,} case_hangup_183 = {\"EARLY__HANGUP\":True,} case_hangup_acitve = {\"ACTIVE__HANGUP\":True,} case_r_183 = {\"proceeding_183\":True,} #", "self.__matchCsStateChange(keyInfoList, \"CS_INIT\", \"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\", \"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"), \"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList,", "'BYE' # 匹配模式 MATCH_MOD_NORMAL = \"normal\" MATCH_MOD_EXTEND = \"extend\" MATCH_MOD_DETAILS = \"details\" #", "context) in enumerate(sessList): # 若输入了callNumber if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True)", "= str(Err, reExpInfo[i], res) PRINT(s) raise res = tuple(l) keyInfoList.append((f, line, flag, res))", "logDict or not keyInfoList: return \"\" res = self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod = self.MATCH_MOD_DETAILS)", "param1 = desc) # 分析会话过程 def __sessAnalysis(self): \"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表: 无 返回值: 无", "l != -1 else True)] if mod in [self.MATCH_MOD_NORMAL]: return any(l) elif mod", "def showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return self.__showDetails(sessUUID", "conclusion.upper() in ['OK'] and 'green' conclusion = getColor(\"{0:<7}\".format(conclusion), color = color) s +=", "signTimeThis = datetime(int(res[0]), int(res[1]), int(res[2]), int(res[3]), int(res[4]), int(res[5])) if signTimePrev and (signTimeThis -", "PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 from base.base import", "= False for l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): # 取一行日志 sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] # 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码", "sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] # 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 # 默认按照此行日志取号码 res = self.reMatch(\"New Channel sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\",", "res)) break else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList def __match(self, keyInfoList, flag, param1 = \"\",", "\\ (x[1] >= l if l != -1 else True)] if mod in", "+= \"(recv %s)\" % detailsDict[\"terminated_list\"][0][1] else: conclusion = \"WARNING\" note += \"[NOT COMPLETE\"", "\\[(.*)\\] (.*) port (\\d+) -> (.*) port (\\d+) codec: (\\d+) ms: (\\d+)\", 7,", "note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if targConclusion.upper() in conclusion.upper(): color = conclusion.upper() in ['ERROR'] and", "self.__ignoreLinesDict = {} if PY2: return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS) else: return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS) def", "c if callNumber else True): fileName = name or ((callNumber or c) +", "若输入了callNumber if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion):", "\"percent\", begin=50, end=100) keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if sessUUID == \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": # print(\"\\n\") #", "sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not keyInfoList or not", "= {\"RINGING__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_183 = {\"EARLY__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_183 = {\"EARLY__ACTIVE\":True, \"completed_200\":True,", "else: # 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if not name: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()])", "= \"\", targConclusion = \"\"): s = self.getDetails(sessUUID, targConclusion) if s: PRINT(s) return", "2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080) Running State Change CS_INIT sessLen = len(sessLogInfoDict) process", "dict,dict 异常: 无 \"\"\" ignoreLinesDict = {} sessLogInfoDict = {} fileLen = len(self.getLines())", "= 0 for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\")", "\"\\n\" s += \"%s \\n\" % getColor(\"{0:^40}\".format(\"时差:\" + str((signTimeThis - signTimePrev).seconds) + \"s\",", "analyzer.analyzer import LogAnalyzer # FS日志分析器 class FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict = {}# 按照会话归类的日志信息 # {会话UUID:{log:{文件索引:{行数:日志}},", "\"(R)\") # invite-> (bye-> or 错误应答<-) elif self.caseMatch(detailsDict, case_hangup_invite): if self.caseMatch(detailsDict, case_r_183): note", "fileName) # ----------------------------------------------输出原始日志到文件---------------------------------------------- def __getOutputHeader(self, logDict, callNumber, sessUUID): s = \"呼叫号码:%s\\nUUID:%s\\n\" % (callNumber,", "# 输出到文件 if sessUUID: if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s,", "l = list(res) for dPos in [x for x in sorted(dropPos, reverse=True) if", "\"_tmp\" newPath = os.path.join(outputPath, fileNames) else: newPath = os.path.join(outputPath, name) # 创建新的目录,若存在则删除 self.makeDir(newPath)", "if l != -1 else True)] if mod in [self.MATCH_MOD_NORMAL]: return any(l) elif", "+= \"%02d. %-35s %-16s %-16s %s\\n\" % (i + 1, signTime, str(k[1]), str(k[2]),", "\"ready_200\": self.__matchChannelStateDesc(keyInfoList, \"ready\"), \"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList,", "self.makeDir(newPath): return len(fileNameList), newPath, fileNameList for sessUUID in sessLogInfoDict.keys(): sessDict = sessLogInfoDict[sessUUID] if", "提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 需要匹配的正则表达式", "callNumber + sessUUID + targConclusion + \"_tmp\" newPath = os.path.join(outputPath, orgLogFileNames) else: newPath", "4: s += \"{0:^40}\".format(\" ↑ \") + \"\\n\" s += \"%s \\n\" %", "sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath if sessUUID: sessDict = sessLogInfoDict.get(sessUUID, False) if", "def __showAnalysisResultHeader(self, targConclusion=\"\"): s = \"%-30s %-36s %-30s %-7s %-s\\n\" % (\"呼叫开始时间\", \"UUID\",", "color='green') return True, \"\" # 获取UUID列表 def getSessUUIDList(self): sessLogInfoDict = self.getSessLogInfoDict() return sessLogInfoDict.keys()", "200<- if self.caseMatch(detailsDict, case_answer_invite): note += \" -> TALKING\" # invite-> 200<- bye<->", "int(res[5])) if signTimePrev and (signTimeThis - signTimePrev).seconds > 4: s += \"{0:^40}\".format(\" ↑", "s = self.getDetails(sessUUID, targConclusion) if s: PRINT(s) return s def __showDetailsTail(self, count, sessUUID", "reason) else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) reason = res[1] if", "= self.getDetails(sessUUID, targConclusion) if s: PRINT(s) return s def __showDetailsTail(self, count, sessUUID =", "% (\"呼叫号码\", numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if locIp and RmtIp: s += \"%-16s: %s:%s:%s", "targConclusion)): fileNameList.append(newFileName) else: if not callNumber: # 确定新的目录,以源日志文件名作为目录名 if not fileName: #orgLogFileNames =", "= {} if PY2: return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS) else: return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS) def getSessLogInfoDict(self):", "sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s = \"\" if mode in ['normal']: s += \"-\" * 160", "[ (\"State Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CS), # 状态转移类的日志 (\"entering state", "+= 1 else: for sessUUID in sessLogInfoDict.keys(): if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber", "self.__outputOriginLog(outputPath, sessUUID = sessUUID, callNumber = callNumber, name = name) # ----------------------------------------------输出详细分析结果到文件---------------------------------------------- def", "or \\ conclusion.upper() in ['WARNING'] and 'yellow' or \\ conclusion.upper() in ['OK'] and", "callNumber, sessUUID): s = \"呼叫号码:%s\\nUUID:%s\\n\" % (callNumber, sessUUID) return s def __outputOriginLog(self, outputPath,", "+= \"\\n\" + \"{0:*^160}\".format(\" 基本信息 \") + \"\\n\\n\" s += \"%-16s: %-s\\n\" %", "time import sys from datetime import datetime PY2 = sys.version_info[0] == 2 PY3", "self).load(path, rl) def clear(self): \"\"\"清理FS的日志 参数列表: 无 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无", "ANALYZER_TYPE_FS = 'fs' # 会话类字典key SESS_FS_CALLNUMBER_DK = \"callNumber\" # 抽取出的关键信息分类 SIGN_FLAG_CHAN = \"chan", "self.__matchCallStateChange(keyInfoList, \"DOWN\", \"EARLY\"), \"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"ACTIVE\"), \"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"RINGING\"), \"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList,", "c.upper() in ['OK']: okCount += 1 if context: context = self.__getOutputResultHeader() + context", "else reason) signTimePrev = None signTimeThis = None if mode in ['normal']: s", "= outputPath if sessUUID: sessDict = sessLogInfoDict.get(sessUUID, False) if not sessDict: return len(fileNameList),", "= [(i, x) for i, x in enumerate(keyInfoList) if x[2] == flag and", "返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 例如 2016-03-21 17:41:14.701532 [DEBUG]", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID] =", "f = -1, l = -1, mod=\"normal\"): l = [(i, x) for i,", "\"s\", color=\"red\", need=True)) s += \"{0:^40}\".format(\" ↓ \") + \"\\n\" if k[0] not", "% (\"序号\",\"信令时间\", \"源日志行号\", \"消息类型\", \"详情\") l = [] for i, k in enumerate(keyInfoList):", "UUID = \"\"): \"\"\"获取日志字典 参数列表: UUID:会话的UUID 返回值: 日志字典 参照__sessLogInfoDict定义 异常: 无 \"\"\" return", "分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() sessLen =", "1, [0], self.SIGN_FLAG_S_BYE), (\"Sending CANCEL to(.*)\", 1, [0], self.SIGN_FLAG_CANCEL), ] sessLen = len(sessLogInfoDict)", "= \"%-30s %-36s %-30s %-6s %s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") return", "# ----------------------------------------------输出原始日志到文件---------------------------------------------- def __getOutputHeader(self, logDict, callNumber, sessUUID): s = \"呼叫号码:%s\\nUUID:%s\\n\" % (callNumber, sessUUID)", "呼叫号码 str 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK) def getResultDict(self, UUID = \"\"):", "OUTPUT_POSTFIX_LOG = \".log\" OUTPUT_POSTFIX_RESULT = \".result\" OUTPUT_POSTFIX_DETAILS = \".details\" def __init__(self): self.__sessLogInfoDict =", "for k in keyInfoList: # print(k) conclusion = \"\" note = \"\" detailsDict", "= \"OK\" note = \"[CALLING\" + (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and \"(R)\" or \"(S)\") #", "self.caseMatch(detailsDict, case_ringing_180) or self.caseMatch(detailsDict, case_ringing_183) or self.caseMatch(detailsDict, case_ringing_183_180) or \\ self.caseMatch(detailsDict, case_ringinged_180) or", "\"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict()", "'recv_bye' SIGN_FLAG_S_BYE = \"send_bye\" SIGN_FLAG_CANCEL = 'cancel' SIGN_FLAG_R_INVITE = \"recv_invite\" # SIP信令 SIP_INVITE", "callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID", "= sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not logDict or not keyInfoList: return \"\" res = self.__match(keyInfoList,", "def outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\", conclusion = \"\", fileName", "%s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") return s def __getOutputResultTail(self, warningCount, errorCount,", "1 else: total = len(sessLogInfoDict) flag = False sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK])", "callNumber or getColor(\"{0:<20}\".format(\"null\"), color='gray', need=show), conclusion, note) return s, conclusion def __showAnalysisResultBody(self, sessUUID,", "and param2.strip() == x[3][1].strip()) if param2 != \"\" else True) and \\ (x[0]", "+ \"}\" if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" if detailsDict[\"terminated_list\"]:", "UUID:会话的UUID 返回值: 呼叫号码 str 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK) def getResultDict(self, UUID", "[INCOMPATIBLE_DESTINATION] -- 挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict =", "self.getSessInfo(UUID, self.SESS_RESULT_DK) def getkeyInfoList(self, UUID = \"\"): \"\"\"获取关键信息列表 参数列表: UUID:会话的UUID 返回值: 关键信息 [(文件索引,行数,状态类型,(信息)),]", "\"WARNING\" note += \"[NOT COMPLETE\" note += \"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] =", "in enumerate(sessList): # 若输入了callNumber if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and", "tuple(l) keyInfoList.append((f, line, flag, res)) break else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList def __match(self, keyInfoList,", "and c.upper() in ['OK']: okCount += 1 if context: context = self.__getOutputResultHeader() +", "sessUUID: if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s, c = self.__getAnalysisResultBody(sessUUID,", "\"completing_200\":True, \"ready_200\":True,} case_answerd_183 = {\"EARLY__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_hangup_invite = {\"DOWN__HANGUP\":True,} case_hangup_180 = {\"RINGING__HANGUP\":True,}", "break # 没有找到号码,可能是日志文件的格式发生了变化 else: #print \"Not find the call number. UUID:%s\" % sessUUID", "%-6s %s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") return s def __getOutputResultTail(self, warningCount,", "\\[(.*)\\]\", 3, [0], self.SIGN_FLAG_HANGUP), (\"Sending BYE to(.*)\", 1, [0], self.SIGN_FLAG_S_BYE), (\"Sending CANCEL to(.*)\",", "k in enumerate(keyInfoList): signTime = \"%s\" % self.getLogTime(logDict.get(k[0], {}).get(k[1], \"\")) res = self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2})", "logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c if", "c, sessUUID)): fileNameList.append(fileName) return len(fileNameList), newPath, fileNameList def outputOriginLog(self, outputPath, sessUUID = \"\",", "if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count", "sessLogInfoDict.get(sessUUID, False) if not sessDict: return len(fileNameList), newPath, fileNameList if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber", "+ 1, signTime, str(k[1]), str(k[2]), str(k[3])) else: s += \"\\n\" return s def", "import LogAnalyzer else: from analyzer.analyzer import LogAnalyzer # FS日志分析器 class FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict =", "(\"结果\", conclusion) s += \"%-16s: %-s\\n\" % (\"消息流\", self.showNote(note)) s += \"\\n\" +", "context\", sessLog, 2) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True break if", "sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\", \\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"}, \\", "= \"\", callNumber = \"\", name = \"\"): return self.__outputOriginLog(outputPath, sessUUID = sessUUID,", "self.SESS_LOG_DK) def getCallNumber(self, UUID = \"\"): \"\"\"获取呼叫号码 参数列表: UUID:会话的UUID 返回值: 呼叫号码 str 异常:", "\"ready_200\":True,} case_answerd_183 = {\"EARLY__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_hangup_invite = {\"DOWN__HANGUP\":True,} case_hangup_180 = {\"RINGING__HANGUP\":True,} case_hangup_183", "self.SIGN_FLAG_R_INVITE): conclusion = \"OK\" note = \"[CALLING\" + (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and \"(R)\" or", "180<-) 200<- if self.caseMatch(detailsDict, case_answer_180) or self.caseMatch(detailsDict, case_answerd_180) or \\ self.caseMatch(detailsDict, case_answer_183) or", "context): \"\"\"信令的收发方向(用于上层显示输出) 参数列表: flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段 返回值: 元组(FromModule, ToModule, Sign) 异常: 无 \"\"\" if", "= {\"DOWN__EARLY\":True, \"proceeding_183\":True, \"EARLY__RINGING\":True, \"proceeding_180\":True,} case_answer_invite = {\"DOWN__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_invite = {\"DOWN__ACTIVE\":True,", "= False sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList):", "def __showResult(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): sessLogInfoDict =", "\"}\" if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" else: res =", "self.inputContinue(i, count, total, flag, self.__showAnalysisResultHeader, conclusion) if not continueRet: break # 显示尾 self.__showAnalysisResultTail(count,", "= sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" + targConclusion", "\"calling\"), \"proceeding_180\": self.__matchChannelStateCode(keyInfoList, \"180\"), \"proceeding_183\": self.__matchChannelStateCode(keyInfoList, \"183\"), \"completing_200\": self.__matchChannelStateDesc(keyInfoList, \"completing\"), \"completed_200\": self.__matchChannelStateDesc(keyInfoList, \"completed\"),", "'INVITE' SIP_CANCEL = 'CANCEL' SIP_BYE = 'BYE' # 匹配模式 MATCH_MOD_NORMAL = \"normal\" MATCH_MOD_EXTEND", "conclusion, fileName = fileName) # ----------------------------------------------输出原始日志到文件---------------------------------------------- def __getOutputHeader(self, logDict, callNumber, sessUUID): s =", "self.__matchCallStateChange(keyInfoList, \"RINGING\", \"HANGUP\"), \"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList, \"ACTIVE\", \"HANGUP\"), \"calling_0\": self.__matchChannelStateDesc(keyInfoList, \"calling\"), \"proceeding_180\": self.__matchChannelStateCode(keyInfoList, \"180\"),", "end='') self.__analysis() time4 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time4 - time3)", "def getSignInfo(self, flag, context): \"\"\"信令的收发方向(用于上层显示输出) 参数列表: flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段 返回值: 元组(FromModule, ToModule, Sign) 异常:", "self.OUTPUT_POSTFIX_LOG) if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) # 不存在UUID(可能输出多个文件) else: #", "self.__getOutputResultHeader() + context context += self.__getOutputResultTail(warningCount, errorCount, okCount) if self.outputEx(outputPath, fileName, context): return", "(callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count +=", "# 确定新的目录,以源日志文件名作为目录名 if not fileName: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames", "return self.getSessInfo(UUID, self.SESS_LOG_DK) def getCallNumber(self, UUID = \"\"): \"\"\"获取呼叫号码 参数列表: UUID:会话的UUID 返回值: 呼叫号码", "= \"-\" * 160 + \"\\n\" s += \"\\n总数:%d\" % count PRINT(s) return", "clear(self): \"\"\"清理FS的日志 参数列表: 无 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" self.__sessLogInfoDict =", "\"\"\"获取呼叫号码 参数列表: UUID:会话的UUID 返回值: 呼叫号码 str 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK) def", "res = self.reMatch(reExpr, log, expLen) if res: l = list(res) for dPos in", "\"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"), \"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"RINGING\"), \"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"EARLY\"), \"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"DOWN\",", "self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime, 7) if res: signTimePrev = signTimeThis signTimeThis = datetime(int(res[0]), int(res[1]),", "(183<- or 180<-) 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \" -> HANGUP\"", "self.SIGN_FLAG_CALL), # 呼叫状态类的日志 (\"receiving invite from (.*) version\", 1, [], self.SIGN_FLAG_R_INVITE), (\"AUDIO RTP", "case_hangup_invite): if self.caseMatch(detailsDict, case_r_183): note += \" -> RINGING(183)\" note += \" ->", "if res: l = list(res) for dPos in [x for x in sorted(dropPos,", "异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() sessLen = len(sessLogInfoDict) process = 0 for", "= \"callnumber\" SIGN_FLAG_HANGUP = \"hangup_reason\" SIGN_FLAG_R_BYE = 'recv_bye' SIGN_FLAG_S_BYE = \"send_bye\" SIGN_FLAG_CANCEL =", "continueRet: break # 显示尾 self.__showDetailsTail(count) return count # 按照UUID搜索日志,并显示详细分析信息 def showDetails(self, sessUUID =", "not sessDict: return len(fileNameList), newPath, fileNameList if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else", "(sessUUID, context) in enumerate(sessList): # 若输入了callNumber或UUID则认为需要过滤 if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else", "1 else: for sessUUID in sessLogInfoDict.keys(): if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else", "context[0] in [self.SIGN_CHAN_TERMINATED]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif flag in [self.SIGN_FLAG_R_BYE]: return self.MOD_OUTSIDE,", "# 呼叫状态类的日志 (\"receiving invite from (.*) version\", 1, [], self.SIGN_FLAG_R_INVITE), (\"AUDIO RTP \\[(.*)\\]", "# FS日志分析器 class FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict = {}# 按照会话归类的日志信息 # {会话UUID:{log:{文件索引:{行数:日志}}, # callNumber:呼叫号码, #", "self.OUTPUT_POSTFIX_DETAILS if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) return len(fileNameList), newPath, fileNameList def outputDetails(self,", "返回值: 会话信息字典 例如: {UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常: 无 \"\"\" return self.__sessLogInfoDict def", "in self.getPath()]) fileNames = sessUUID + callNumber + \"_tmp\" newPath = os.path.join(outputPath, fileNames)", "为呼叫层状态机迁移 -- call类 entering state [proceeding][180] 为收响应消息的处理 -- channel类 AUDIO RTP [sofia/external/6797@10.0.7.152:5080] 10.0.7.176", "sessLogInfoDict: if f not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog} if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None:", "flag in [self.SIGN_FLAG_S_BYE]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE else: pass return '', '', ''", "\"\\n\" s += \"\\n总数:%d\" % count PRINT(s) return s def __showDetails(self, sessUUID =", "fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) return len(fileNameList), newPath, fileNameList def outputOriginLog(self, outputPath, sessUUID", "conclusion.upper(): return \"\" logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not logDict or", "str(k[2]), str(k[3])) else: s += \"\\n\" return s def __showDetailsBody(self, sessUUID = \"\",", "__outputDetails(self, outputPath, fileName = \"\", callNumber = \"\", sessUUID = \"\", targConclusion=\"\"): fileNameList", "# 若输入了callNumber则认为需要过滤 if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID,", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): # 取一行日志 sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] # 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 # 默认按照此行日志取号码 res = self.reMatch(\"New", "sessLen, widgetType = \"percent\", begin=0, end=50) keyInfoList = [] logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList", "% (\"UUID\", sessUUID) if numberFrom: s += \"%-16s: %-s\\n\" % (\"显示号码\", numberFrom) s", "sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s, c = self.__getAnalysisResultBody(sessUUID, conclusion, show=False) context +=", "无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 例如 2016-03-21 17:41:14.701532", "# 获取会话中的呼叫号码 def __getCallNumber(self): \"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表: 无 返回值: 无 异常: 无", "keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not logDict or not keyInfoList: return \"\" res =", "in ['ERROR'] and 'red' or \\ conclusion.upper() in ['WARNING'] and 'yellow' or \\", "1:-1] # 按照UUID归类存放日志信息 if sessUUID in sessLogInfoDict: if f not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f]", "1) res and codeList.append(x[3]) return codeList def __matchChannelStateCode(self, keyInfoList, code): \"\"\"通道状态码匹配 精确匹配状态码 参数列表:", "param2 = code) def __matchChannelStateDesc(self, keyInfoList, desc): \"\"\"通道状态描述匹配 匹配状态描述 参数列表: keyInfoList:关键信息列表 desc:描述 返回值:", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] # 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 # 默认按照此行日志取号码 res = self.reMatch(\"New Channel sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\", sessLog, 3)", "the call number. UUID:%s\" % sessUUID pass else: pass # 会话关键信息收集 def __sessKeyInfoCollect(self):", "if mod in [self.MATCH_MOD_NORMAL]: return any(l) elif mod in [self.MATCH_MOD_EXTEND]: return (l[0][1][0], l[0][1][1],", "l[0][1][1], l[0][0]) if any(l) else False elif mod in [self.MATCH_MOD_DETAILS]: return l[0][1][3] if", "% (\"挂断原因\", res[1] if res else reason) signTimePrev = None signTimeThis = None", "self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 else: total = len(sessLogInfoDict) flag = False sessList", "异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 需要匹配的正则表达式 reExpInfo = [ (\"State Change", "res and codeList.append(x[3]) return codeList def __matchChannelStateCode(self, keyInfoList, code): \"\"\"通道状态码匹配 精确匹配状态码 参数列表: keyInfoList:关键信息列表", "= \"\", key = \"\"): \"\"\"清理FS的日志 参数列表: UUID:会话的UUID key:内部的字典名 返回值: 成功标志和错误信息 元组(bool, str)", "s = \"正在分析会话过程...\" PRINT(s, end='') self.__analysis() time4 = time.clock() s = \"OK (耗时:%.2f秒)\"", "callNumber = \"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showAnalysisResultHeader(conclusion) #", "res = tuple(l) keyInfoList.append((f, line, flag, res)) break else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList def", "return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif flag in [self.SIGN_FLAG_R_BYE]: return self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE elif", "else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" +", "keyInfoList: if x[2] == self.SIGN_FLAG_CHAN: reExpr = \"(\" + fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\") + \")\"", "res[1] if res else reason) signTimePrev = None signTimeThis = None if mode", "\"\", targConclusion=\"\"): fileNameList = [] sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath if sessUUID:", "\"%02d. %-35s %-16s %-16s %s\\n\" % (i + 1, signTime, str(k[1]), str(k[2]), str(k[3]))", "return s, conclusion def __showAnalysisResultBody(self, sessUUID, targConclusion = \"\"): s, c = self.__getAnalysisResultBody(sessUUID,", "PRINT(s) return s def __showAnalysisResultTail(self, count, targConclusion=\"\"): s = \"\\n总数:%d\" % count PRINT(s)", "sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showAnalysisResultHeader(conclusion) # 显示Body count = 0 if sessUUID:", "# invite-> (183<- or 180<-) 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \"", "continueRet: break # 显示尾 self.__showAnalysisResultTail(count, conclusion) return count def showResult(self, sessUUID = \"\",", "\"EARLY\"), \"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"ACTIVE\"), \"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"RINGING\"), \"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"ACTIVE\"),", "self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"), } # 标志性处理类的状态", "len(line) # 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if pos is -1 or pos < 36 or line[0:pos].count('-')", "enumerate(self.getLines()): process = self.printProc(process, fileLen) for i, line in enumerate(lines): # 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21", "and \\ (x[0] >= f if f != -1 else True) and \\", "\" -> RINGING\" # invite-> (183<- or 180<-) 200<- if self.caseMatch(detailsDict, case_answer_180) or", "= \".details\" def __init__(self): self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} if PY2: return", "\"详情\") l = [] for i, k in enumerate(keyInfoList): signTime = \"%s\" %", "(\"Sending CANCEL to(.*)\", 1, [0], self.SIGN_FLAG_CANCEL), ] sessLen = len(sessLogInfoDict) process = 0", "显示Body count = 0 if sessUUID: # 若输入了callNumber if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if", "if param1 != \"\" else True) and \\ ((len(x[3]) >= 2 and param2.strip()", "self.SIGN_FLAG_CHAN, param2 = code) def __matchChannelStateDesc(self, keyInfoList, desc): \"\"\"通道状态描述匹配 匹配状态描述 参数列表: keyInfoList:关键信息列表 desc:描述", "sessUUID = \"\", callNumber = \"\", conclusion = \"\", fileName = \"\"): sessLogInfoDict", "sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if targConclusion.upper() in conclusion.upper(): color = conclusion.upper() in ['ERROR'] and 'red' or", "会话信息字典 例如: {UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常: 无 \"\"\" return self.__sessLogInfoDict def getIgnoreLinesDict(self):", "成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = desc) # 分析会话过程", "s += \"{0:^40}\".format(\" ↑ \") + \"\\n\" s += \"%s \\n\" % getColor(\"{0:^40}\".format(\"时差:\"", "getCallNumberList(self): sessLogInfoDict = self.getSessLogInfoDict() return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID in sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] #", "s += \"%-16s: %s:%s:%s -> %s:%s:%s (%s:%s %s:%s)\\n\" % (\"媒体信息\", \"本端地址\", locIp, locPort,", "self.SIGN_FLAG_CANCEL), ] sessLen = len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys(): process", "mode = \"normal\"): sessLogInfoDict = self.getSessLogInfoDict() if not sessLogInfoDict.get(sessUUID, False): return \"\" conclusion", "MATCH_MOD_EXTEND = \"extend\" MATCH_MOD_DETAILS = \"details\" # 输出文件 OUTPUT_POSTFIX_LOG = \".log\" OUTPUT_POSTFIX_RESULT =", "pass else: pass # 会话关键信息收集 def __sessKeyInfoCollect(self): \"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如: State Change CS_CONSUME_MEDIA", "self.SIP_BYE else: pass return '', '', '' # 按照会话,收集日志信息 def __sessCollect(self): \"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f)", "= \"\", param2 = \"\", f = -1, l = -1, mod=\"normal\"): l", "\"本端地址\", locIp, locPort, \"远端地址\", RmtIp, RmtPort, \"Payload\", audioPayLoad, \"ptime\", audioPTime) res = self.__match(keyInfoList,", "\"MANAGER_REQUEST\"]: conclusion = \"ERROR\" else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) st,", "else \"\" if reason: res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED, mod =", "sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2] flag = True break res = self.reMatch(\"<(\\d*)>->(\\d*) in context\", sessLog,", "+ \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"), } # 标志性处理类的状态 case_calling_invite = {\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,} case_ringing_180", "异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState, toState) def __fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode): \"\"\"通道状态码模糊匹配", "fileName = \"Result\" + fileNames + self.OUTPUT_POSTFIX_RESULT context = \"\" warningCount, errorCount, okCount", "SIGN_CHAN_PROCEDDING = 'proceeding' SIGN_CHAN_COMPLETE = 'completing' SIGN_CHAN_TERMINATED = 'terminated' SIGN_FLAG_CALL = \"channel sm\"", "in ['normal']: s += \"-\" * 160 + \"\\n\" s += \"\\n\" +", "# print(\"\\n\") # for k in keyInfoList: # print(k) conclusion = \"\" note", "160 + \"\\n\" s += \"\\n\" + \"{0:*^160}\".format(\" 基本信息 \") + \"\\n\\n\" s", "keyInfoList, fromState, toState): \"\"\"CS状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无", "sessLogInfoDict.keys(): if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: print(sessUUID, \"\\nis not get time\") return sessLogInfoDict, ignoreLinesDict", "else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID]", "= self.reMatch(\"New Channel sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag", "= time.clock() s = \"OK (耗时:%.2f秒)\" % (time3 - time2) PRINT(s, color='green') s", "self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod = self.MATCH_MOD_DETAILS) disFrom, numberFrom, disTo, numberTo = res if res", "= os.path.join(outputPath, fileNames) else: newPath = os.path.join(outputPath, name) # 创建新的目录,若存在则删除 self.makeDir(newPath) for sessUUID", "param2.strip() == x[3][1].strip()) if param2 != \"\" else True) and \\ (x[0] >=", "\".log\" OUTPUT_POSTFIX_RESULT = \".result\" OUTPUT_POSTFIX_DETAILS = \".details\" def __init__(self): self.__sessLogInfoDict = {} self.__ignoreLinesDict", "无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState, toState) def __matchCallStateChange(self, keyInfoList, fromState, toState): \"\"\"call状态变迁匹配", "s += \"%-4s %-35s %-16s %-16s %s\\n\\n\" % (\"序号\",\"信令时间\", \"源日志行号\", \"消息类型\", \"详情\") l", ">= 2 and param2.strip() == x[3][1].strip()) if param2 != \"\" else True) and", "\"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName)", "sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = fileName", "\"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"RINGING\"), \"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"EARLY\"), \"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"ACTIVE\"), \"EARLY__RINGING\":", "targConclusion=\"\"): s = \"%-30s %-36s %-30s %-7s %-s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\",", "def getCallNumberList(self): sessLogInfoDict = self.getSessLogInfoDict() return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID in sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]]", "\"normal\"): sessLogInfoDict = self.getSessLogInfoDict() if not sessLogInfoDict.get(sessUUID, False): return \"\" conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK]", "{} if PY2: return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS) else: return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS) def getSessLogInfoDict(self): \"\"\"获取会话信息字典", "----------------------------------------------输出原始日志到文件---------------------------------------------- def __getOutputHeader(self, logDict, callNumber, sessUUID): s = \"呼叫号码:%s\\nUUID:%s\\n\" % (callNumber, sessUUID) return", "get time\") return sessLogInfoDict, ignoreLinesDict # 获取会话中的呼叫号码 def __getCallNumber(self): \"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表:", "\"\")) res = self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime, 7) if res: signTimePrev = signTimeThis signTimeThis", "if self.caseMatch(detailsDict, case_ringing_180) or self.caseMatch(detailsDict, case_ringing_183) or self.caseMatch(detailsDict, case_ringing_183_180) or \\ self.caseMatch(detailsDict, case_ringinged_180)", "if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK] +", "def load(self, path, rl=False): \"\"\"加载FS的日志 参数列表: path:日志路径 rl:是否重新加载 返回值: 成功标志和错误信息 元组(bool, str) 异常:", "conclusion = \"\", fileName = \"\"): return self.__outputReslut(outputPath, sessUUID = sessUUID, callNumber =", "note += \"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion #", "len(fileNameList), newPath, fileNameList for sessUUID in sessLogInfoDict.keys(): sessDict = sessLogInfoDict[sessUUID] if sessDict[self.SESS_FS_CALLNUMBER_DK] ==", "\"\"\"加载FS的日志 参数列表: path:日志路径 rl:是否重新加载 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" if PY2:", "count, total, flag, self.__showAnalysisResultHeader, conclusion) if not continueRet: break # 显示尾 self.__showAnalysisResultTail(count, conclusion)", "self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_COMPLETE]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in", "self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], \"\")) res = self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod = self.MATCH_MOD_DETAILS) locIp, locPort, RmtIp,", "\"ready_200\":True,} case_answer_183 = {\"EARLY__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_183 = {\"EARLY__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_hangup_invite =", "False elif mod in [self.MATCH_MOD_DETAILS]: return l[0][1][3] if any(l) else False else: return", "context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 continueRet,", "COMPLETE\" note += \"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion", "self.SESS_FS_CALLNUMBER_DK:\"\", \\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"}, \\ self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else: self.__sessLogInfoDict = sessLogInfoDict self.__ignoreLinesDict", "= \"\\n总数:%d\" % count PRINT(s) def __showResult(self, sessUUID = \"\", callNumber = \"\",", "\"_tmp\" newPath = os.path.join(outputPath, orgLogFileNames) else: newPath = os.path.join(outputPath, fileName) # 创建新的目录 if", "\")\" res = self.reMatch(reExpr, x[3][1], 1) res and codeList.append(x[3]) return codeList def __matchChannelStateCode(self,", "-> 192.168.0.178 port 7076 codec: 18 ms: 20 -- RTP信息类 Hangup sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA]", "self.SIGN_FLAG_CHAN, param1 = desc) # 分析会话过程 def __sessAnalysis(self): \"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表: 无 返回值:", "callNumber = callNumber, conclusion = conclusion, fileName = fileName) # ----------------------------------------------输出原始日志到文件---------------------------------------------- def __getOutputHeader(self,", "{\"DOWN__HANGUP\":True,} case_hangup_180 = {\"RINGING__HANGUP\":True,} case_hangup_183 = {\"EARLY__HANGUP\":True,} case_hangup_acitve = {\"ACTIVE__HANGUP\":True,} case_r_183 = {\"proceeding_183\":True,}", "def __outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\", name = \"\"): sessLogInfoDict", "return self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState, toState) def __fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode): \"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表: keyInfoList:关键信息列表", "返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2 = code) def", "conclusion) s += \"%-16s: %-s\\n\" % (\"消息流\", self.showNote(note)) s += \"\\n\" + \"{0:*^160}\".format(\"", "self.__matchCallStateChange(keyInfoList, \"ACTIVE\", \"HANGUP\"), \"calling_0\": self.__matchChannelStateDesc(keyInfoList, \"calling\"), \"proceeding_180\": self.__matchChannelStateCode(keyInfoList, \"180\"), \"proceeding_183\": self.__matchChannelStateCode(keyInfoList, \"183\"), \"completing_200\":", "\"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], \"\")) res = self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod = self.MATCH_MOD_DETAILS) locIp,", "sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return self.__showDetails(sessUUID = sessUUID,", "in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog} if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else:", "= sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not keyInfoList or not logDict: return s, conclusion callTime =", "self.getSessLogInfoDict() if UUID: if sessDict.get(UUID, False): return UUID, sessDict[UUID].get(key, False) else: return UUID,", "targConclusion = \"\"): s, c = self.__getAnalysisResultBody(sessUUID, targConclusion) if s: PRINT(s) return s", "\"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showDetailsHeader() # 显示Body count", "sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber if (callNumber == context[self.SESS_FS_CALLNUMBER_DK]", "fromState, toState): \"\"\"call状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\"", "case_ringing_183_180 = {\"DOWN__EARLY\":True, \"proceeding_183\":True, \"EARLY__RINGING\":True, \"proceeding_180\":True,} case_answer_invite = {\"DOWN__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_invite =", "to(.*)\", 1, [0], self.SIGN_FLAG_S_BYE), (\"Sending CANCEL to(.*)\", 1, [0], self.SIGN_FLAG_CANCEL), ] sessLen =", "return self.getSessInfo(UUID, self.SESS_RESULT_DK) def getkeyInfoList(self, UUID = \"\"): \"\"\"获取关键信息列表 参数列表: UUID:会话的UUID 返回值: 关键信息", "if res: signTimePrev = signTimeThis signTimeThis = datetime(int(res[0]), int(res[1]), int(res[2]), int(res[3]), int(res[4]), int(res[5]))", "flag, param1 = \"\", param2 = \"\", f = -1, l = -1,", "\"\")) res = self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod = self.MATCH_MOD_DETAILS) locIp, locPort, RmtIp, RmtPort, audioPayLoad,", "= \"\"): return self.__outputDetails(outputPath, fileName = fileName, sessUUID = sessUUID, callNumber = callNumber,", "= set(callNumberList) self.printList(tmp, 8, \"呼叫号码列表:\", \"总数:%d\" % len(tmp)) # 重复的呼叫号码 dupl = self.findDupl(callNumberList)", "\"\", callNumber = \"\", sessUUID = \"\", targConclusion=\"\"): fileNameList = [] sessLogInfoDict =", "signTimeThis signTimeThis = datetime(int(res[0]), int(res[1]), int(res[2]), int(res[3]), int(res[4]), int(res[5])) if signTimePrev and (signTimeThis", "print \"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#, \"\\n\", keyInfoList, \"\\n\",detailsDict, \"\\n\" # 分析会话日志 def __analysis(self):", "# 没有找到号码,可能是日志文件的格式发生了变化 else: #print \"Not find the call number. UUID:%s\" % sessUUID pass", "没有找到号码,可能是日志文件的格式发生了变化 else: #print \"Not find the call number. UUID:%s\" % sessUUID pass else:", "res = self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod = self.MATCH_MOD_DETAILS) locIp, locPort, RmtIp, RmtPort, audioPayLoad, audioPTime", "+= \"%-16s: %s:%s:%s -> %s:%s:%s (%s:%s %s:%s)\\n\" % (\"媒体信息\", \"本端地址\", locIp, locPort, \"远端地址\",", "self.SIGN_FLAG_S_BYE), (\"Sending CANCEL to(.*)\", 1, [0], self.SIGN_FLAG_CANCEL), ] sessLen = len(sessLogInfoDict) process =", "self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"), \"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"RINGING\"), \"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"EARLY\"), \"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList,", "= {\"DOWN__EARLY\":True,} case_ringing_183_180 = {\"DOWN__EARLY\":True, \"proceeding_183\":True, \"EARLY__RINGING\":True, \"proceeding_180\":True,} case_answer_invite = {\"DOWN__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,}", "sessUUID, targConclusion = \"\", show = True): sessLogInfoDict = self.getSessLogInfoDict() s = \"\"", "\"{0:*^160}\".format(\" 基本信息 \") + \"\\n\\n\" s += \"%-16s: %-s\\n\" % (\"呼叫开始时间\", callTime) s", "无 \"\"\" return self.getSessInfo(UUID, self.SESS_KEYINFO_DK) def getSignInfo(self, flag, context): \"\"\"信令的收发方向(用于上层显示输出) 参数列表: flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段", "= \"正在收集会话信息...\" PRINT(s, end='') self.__sessCollect() time2 = time.clock() s = \"OK (耗时:%.2f秒)\" %", "\"\", conclusion = \"\"): return self.__showResult(sessUUID = sessUUID, callNumber = callNumber, conclusion =", "or \"(R)\") else: # invite-> (183<- or 180<-) if self.caseMatch(detailsDict, case_ringing_180) or self.caseMatch(detailsDict,", "if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 continueRet, flag", "def __matchChannelStateDesc(self, keyInfoList, desc): \"\"\"通道状态描述匹配 匹配状态描述 参数列表: keyInfoList:关键信息列表 desc:描述 返回值: 成功或失败 bool 异常:", "self.getPath()]) fileNames = sessUUID + callNumber + conclusion + \"_tmp\" fileName = \"Result\"", "\"\"\" return self.getSessInfo(UUID, self.SESS_RESULT_DK) def getkeyInfoList(self, UUID = \"\"): \"\"\"获取关键信息列表 参数列表: UUID:会话的UUID 返回值:", "fileNameList = [] sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath if sessUUID: sessDict =", "\"\", f = -1, l = -1, mod=\"normal\"): l = [(i, x) for", "\"EARLY\", \"HANGUP\"), \"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"HANGUP\"), \"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList, \"ACTIVE\", \"HANGUP\"), \"calling_0\": self.__matchChannelStateDesc(keyInfoList, \"calling\"),", "= \"\"): return \"\" def getDetails(self, sessUUID = \"\", targConclusion = \"\", mode", "'calling' SIGN_CHAN_PROCEDDING = 'proceeding' SIGN_CHAN_COMPLETE = 'completing' SIGN_CHAN_TERMINATED = 'terminated' SIGN_FLAG_CALL = \"channel", "self.getPathEx(k[0]) + \"\\n\" l.append(k[0]) s += \"%02d. %-35s %-16s %-16s %s\\n\" % (i", "+= 1 if context: context = self.__getOutputResultHeader() + context context += self.__getOutputResultTail(warningCount, errorCount,", "res: l = list(res) for dPos in [x for x in sorted(dropPos, reverse=True)", "return count # 按照UUID搜索日志,并显示详细分析信息 def showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion", "%-30s %-7s %-s\\n\" % (callTime, sessUUID, callNumber or getColor(\"{0:<20}\".format(\"null\"), color='gray', need=show), conclusion, note)", "from analyzer.analyzer import LogAnalyzer # FS日志分析器 class FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict = {}# 按照会话归类的日志信息 #", "# 获取UUID列表 def getSessUUIDList(self): sessLogInfoDict = self.getSessLogInfoDict() return sessLogInfoDict.keys() # 获取呼叫号码列表 def getCallNumberList(self):", "self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK) def getResultDict(self, UUID = \"\"): \"\"\"获取结果字典 参数列表: UUID:会话的UUID 返回值: 结果字典 {'conclusion':\"\",", "# 找到第一个空格,左边就是会话ID,右边就是日志信息 pos = line.find(' ') line_len = len(line) # 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if pos", "{\"DOWN__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_180 = {\"RINGING__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_180 = {\"RINGING__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,}", "self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) st, reason = res if res else (\"\",", "return \"\" res = self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod = self.MATCH_MOD_DETAILS) disFrom, numberFrom, disTo, numberTo", "toState) def __fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode): \"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表: keyInfoList:关键信息列表 fuzzyCode:模糊状态码 返回值: 匹配到的值 异常:", "{会话UUID:{log:{文件索引:{行数:日志}}, # callNumber:呼叫号码, # result:{分析结果}, # keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} # } __ignoreLinesDict = {}# 忽略的行{文件索引:{行数:日志}}", "note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") #", "note,#, \"\\n\", keyInfoList, \"\\n\",detailsDict, \"\\n\" # 分析会话日志 def __analysis(self): self.__sessKeyInfoCollect() self.__sessAnalysis() # 运行", "self.__sessLogInfoDict = sessLogInfoDict self.__ignoreLinesDict = ignoreLinesDict for sessUUID in sessLogInfoDict.keys(): if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is", "targConclusion + self.OUTPUT_POSTFIX_DETAILS) if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) else: if not callNumber:", "raise res = tuple(l) keyInfoList.append((f, line, flag, res)) break else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList", "= self.getSessLogInfoDict() # 例如 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080) Running State Change CS_INIT", "(.*) port (\\d+) -> (.*) port (\\d+) codec: (\\d+) ms: (\\d+)\", 7, [0],", "callNumberList = self.getCallNumberList() tmp = set(callNumberList) self.printList(tmp, 8, \"呼叫号码列表:\", \"总数:%d\" % len(tmp)) #", "+ \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"), } # 标志性处理类的状态 case_calling_invite =", "= \"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showAnalysisResultHeader(conclusion) # 显示Body", "%-s\\n\" % (\"呼叫号码\", numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if locIp and RmtIp: s += \"%-16s:", "重复的呼叫号码 dupl = self.findDupl(callNumberList) len(dupl) and self.printList(dupl, 8, \"重复的号码:\", \"总数:%d\" % len(dupl)) #", "+ sessUUID + self.OUTPUT_POSTFIX_LOG if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) return", "self.SESS_RESULT_DK) def getkeyInfoList(self, UUID = \"\"): \"\"\"获取关键信息列表 参数列表: UUID:会话的UUID 返回值: 关键信息 [(文件索引,行数,状态类型,(信息)),] 异常:", "sessUUID, callNumber or getColor(\"{0:<20}\".format(\"null\"), color='gray', need=show), conclusion, note) return s, conclusion def __showAnalysisResultBody(self,", "res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) st, reason = res if res", "sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True #", "conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showDetailsHeader() # 显示Body count =", "if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber", "获取会话中的呼叫号码 def __getCallNumber(self): \"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表: 无 返回值: 无 异常: 无 \"\"\"", "sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\", begin=0, end=50) keyInfoList = []", "号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 例如", "PY2: return LogAnalyzer.load(self, path, rl) else: return super(FsLogAnalyzer, self).load(path, rl) def clear(self): \"\"\"清理FS的日志", "elif flag in [self.SIGN_FLAG_CANCEL]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL elif flag in [self.SIGN_FLAG_S_BYE]: return", "(\"挂断原因\", res[1] if res else reason) else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod =", "OUTPUT_POSTFIX_DETAILS = \".details\" def __init__(self): self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} if PY2:", "= \".log\" OUTPUT_POSTFIX_RESULT = \".result\" OUTPUT_POSTFIX_DETAILS = \".details\" def __init__(self): self.__sessLogInfoDict = {}", "return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK) def getResultDict(self, UUID = \"\"): \"\"\"获取结果字典 参数列表: UUID:会话的UUID 返回值: 结果字典", "context[1] elif flag in [self.SIGN_FLAG_R_BYE]: return self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE elif flag in [self.SIGN_FLAG_CANCEL]:", "-1 or pos < 36 or line[0:pos].count('-') != 4: if f not in", "+= \" -> RINGING(183)\" note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and", "\"\" if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not keyInfoList", "call number. UUID:%s\" % sessUUID pass else: pass # 会话关键信息收集 def __sessKeyInfoCollect(self): \"\"\"会话关键信息收集", "+= \"{0:^40}\".format(\" ↑ \") + \"\\n\" s += \"%s \\n\" % getColor(\"{0:^40}\".format(\"时差:\" +", "and param1.strip() == x[3][0].strip()) if param1 != \"\" else True) and \\ ((len(x[3])", "+ (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") else: # invite-> (183<- or 180<-)", "port (\\d+) -> (.*) port (\\d+) codec: (\\d+) ms: (\\d+)\", 7, [0], self.SIGN_FLAG_RTP),", "fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\") + \")\" res = self.reMatch(reExpr, x[3][1], 1) res and codeList.append(x[3]) return", "__sessAnalysis(self): \"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict()", "% (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") return s def __getOutputResultTail(self, warningCount, errorCount, okCount):", "sessLogInfoDict.get(sessUUID, False): return \"\" conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if targConclusion.upper() not in conclusion.upper(): return", "LogAnalyzer else: from analyzer.analyzer import LogAnalyzer # FS日志分析器 class FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict = {}#", "{}).get(keyInfoList[0][1], \"\")) res = self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod = self.MATCH_MOD_DETAILS) locIp, locPort, RmtIp, RmtPort,", "\"\"\" ignoreLinesDict = {} sessLogInfoDict = {} fileLen = len(self.getLines()) process = 0", "%-16s %s\\n\" % (i + 1, signTime, str(k[1]), str(k[2]), str(k[3])) else: s +=", "toState): \"\"\"call状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return", "异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 例如 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080)", "== flag and \\ ((len(x[3]) >= 1 and param1.strip() == x[3][0].strip()) if param1", "sessUUID) return s def __outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\", name", "['OK'] and 'green' conclusion = getColor(\"{0:<7}\".format(conclusion), color = color) s += \"%-30s %-36s", "# ----------------------------------------------输出详细分析结果到文件---------------------------------------------- def __outputDetails(self, outputPath, fileName = \"\", callNumber = \"\", sessUUID =", "+ \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) return", "res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2] flag = True break res = self.reMatch(\"<(\\d*)>->(\\d*) in context\",", "-> TALKING\" # invite-> (183<- or 180<-) 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note", "== 2 PY3 = sys.version_info[0] == 3 from base.base import PRINT, INPUT, getColor", "time.clock() s = \"OK (耗时:%.2f秒)\" % (time2 - time1) PRINT(s, color='green') s =", "日志字典 参照__sessLogInfoDict定义 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_LOG_DK) def getCallNumber(self, UUID = \"\"):", "\"\"): return \"\" def getDetails(self, sessUUID = \"\", targConclusion = \"\", mode =", "signTimePrev = signTimeThis signTimeThis = datetime(int(res[0]), int(res[1]), int(res[2]), int(res[3]), int(res[4]), int(res[5])) if signTimePrev", "conclusion = \"\", fileName = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 确定新的目录,以源日志文件名作为目录名 if not", "输出文件 OUTPUT_POSTFIX_LOG = \".log\" OUTPUT_POSTFIX_RESULT = \".result\" OUTPUT_POSTFIX_DETAILS = \".details\" def __init__(self): self.__sessLogInfoDict", "Action transfer\\((\\d*) XML default\\)\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2] flag =", "+ \"{0:*^160}\".format(\" 基本信息 \") + \"\\n\\n\" s += \"%-16s: %-s\\n\" % (\"呼叫开始时间\", callTime)", "\"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"HANGUP\"), \"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"HANGUP\"), \"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList, \"ACTIVE\", \"HANGUP\"), \"calling_0\":", "FS日志分析器 class FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict = {}# 按照会话归类的日志信息 # {会话UUID:{log:{文件索引:{行数:日志}}, # callNumber:呼叫号码, # result:{分析结果},", "\\<(.*)\\>\", 4, [], self.SIGN_FLAG_CALLNUMBER), # 呼叫号码 (\"952 Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0],", "= \"core sm\" SIGN_FLAG_RTP = \"rtp\" SIGN_FLAG_CALLNUMBER = \"callnumber\" SIGN_FLAG_HANGUP = \"hangup_reason\" SIGN_FLAG_R_BYE", "switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080 Standard INIT # 找到第一个空格,左边就是会话ID,右边就是日志信息 pos = line.find(' ') line_len = len(line)", "= 'recv_bye' SIGN_FLAG_S_BYE = \"send_bye\" SIGN_FLAG_CANCEL = 'cancel' SIGN_FLAG_R_INVITE = \"recv_invite\" # SIP信令", "\"备注\") PRINT(s) def __getAnalysisResultBody(self, sessUUID, targConclusion = \"\", show = True): sessLogInfoDict =", "conclusion.upper() in ['WARNING'] and 'yellow' or \\ conclusion.upper() in ['OK'] and 'green' conclusion", "self.getSessLogInfoDict() return sessLogInfoDict.keys() # 获取呼叫号码列表 def getCallNumberList(self): sessLogInfoDict = self.getSessLogInfoDict() return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for", "self.getSessLogInfoDict() return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID in sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] # 显示UUID列表 def showSessUUIDList(self):", "def __init__(self): self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} if PY2: return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS)", "self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS) s += \"%-16s: %s\\n\" % (\"挂断原因\",", "invite-> (183<- or 180<-) if self.caseMatch(detailsDict, case_ringing_180) or self.caseMatch(detailsDict, case_ringing_183) or self.caseMatch(detailsDict, case_ringing_183_180)", "context = self.__getOutputResultHeader() + context context += self.__getOutputResultTail(warningCount, errorCount, okCount) if self.outputEx(outputPath, fileName,", "= self.getSessLogInfoDict() newPath = outputPath fileNameList = [] # 输出的文件列表 # 如果存在UUID(只输出一个文件) if", "default\\)\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2] flag = True break res", "SIGN_CHAN_CALLING = 'calling' SIGN_CHAN_PROCEDDING = 'proceeding' SIGN_CHAN_COMPLETE = 'completing' SIGN_CHAN_TERMINATED = 'terminated' SIGN_FLAG_CALL", "无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 需要匹配的正则表达式 reExpInfo = [ (\"State", "成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2 = code) def __matchChannelStateDesc(self,", "\"]\" + reason + \"}\" if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion =", "+ \"_tmp\" newPath = os.path.join(outputPath, fileNames) else: newPath = os.path.join(outputPath, name) # 创建新的目录,若存在则删除", "UUID:会话的UUID key:内部的字典名 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" sessDict = self.getSessLogInfoDict() if", "= \"percent\", begin=0, end=50) keyInfoList = [] logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList = sorted(logFileDict.items(),", "= \"\", conclusion = \"\"): s = \"-\" * 160 + \"\\n\" s", "['OK']: okCount += 1 else: for sessUUID in sessLogInfoDict.keys(): if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]", "异常: 无 \"\"\" sessDict = self.getSessLogInfoDict() if UUID: if sessDict.get(UUID, False): return UUID,", "def getLogDict(self, UUID = \"\"): \"\"\"获取日志字典 参数列表: UUID:会话的UUID 返回值: 日志字典 参照__sessLogInfoDict定义 异常: 无", "= time.clock() s = \"OK (耗时:%.2f秒)\" % (time4 - time3) PRINT(s, color='green') return", "异常: 无 \"\"\" return self.__sessLogInfoDict def getIgnoreLinesDict(self): \"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表: 无 返回值: 忽略的行字典", "param2 != \"\" else True) and \\ (x[0] >= f if f !=", "in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\") for f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys():", "from analyzer import LogAnalyzer else: from analyzer.analyzer import LogAnalyzer # FS日志分析器 class FsLogAnalyzer(LogAnalyzer):", "\"结果\", \"备注\") return s def __getOutputResultTail(self, warningCount, errorCount, okCount): s = \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" %", "signTimePrev = None signTimeThis = None if mode in ['normal']: s += \"%-16s:", "= self.__getOutputResultHeader() + context context += self.__getOutputResultTail(warningCount, errorCount, okCount) if self.outputEx(outputPath, fileName, context):", "if res else reason) signTimePrev = None signTimeThis = None if mode in", "self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) st, reason = res if res else (\"\", \"\")", "elif mod in [self.MATCH_MOD_DETAILS]: return l[0][1][3] if any(l) else False else: return False", "\"\", callNumber = \"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showAnalysisResultHeader(conclusion)", "else (\"\",\"\",\"\",\"\") callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], \"\")) res = self.__match(keyInfoList, self.SIGN_FLAG_RTP,", "for f, logDict in fileList: logList = sorted(logDict.items(), key=lambda logDict:logDict[0]) for line, log", "print(k) conclusion = \"\" note = \"\" detailsDict = { \"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList, \"CS_NEW\",", "基本信息 \") + \"\\n\\n\" s += \"%-16s: %-s\\n\" % (\"呼叫开始时间\", callTime) s +=", "\"ready_200\":True,} case_answerd_invite = {\"DOWN__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_180 = {\"RINGING__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_180 =", "audioPTime) res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) reason = res[1] if res", "False): return UUID, sessDict[UUID].get(key, False) else: return UUID, None else: return [(UUID, sessDict[UUID].get(key,", "(耗时:%.2f秒)\" % (time3 - time2) PRINT(s, color='green') s = \"正在分析会话过程...\" PRINT(s, end='') self.__analysis()", "\"proceeding_180\": self.__matchChannelStateCode(keyInfoList, \"180\"), \"proceeding_183\": self.__matchChannelStateCode(keyInfoList, \"183\"), \"completing_200\": self.__matchChannelStateDesc(keyInfoList, \"completing\"), \"completed_200\": self.__matchChannelStateDesc(keyInfoList, \"completed\"), \"ready_200\":", "signTime, 7) if res: signTimePrev = signTimeThis signTimeThis = datetime(int(res[0]), int(res[1]), int(res[2]), int(res[3]),", "newPath, fileNameList if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]", "callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常: 无 \"\"\" return self.__sessLogInfoDict def getIgnoreLinesDict(self): \"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表:", "sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] # 显示UUID列表 def showSessUUIDList(self): sessUUIDList = self.getSessUUIDList() self.printList(sessUUIDList, 4, \"UUID列表:\", \"总数:%d\" %", "self.__matchCallStateChange(keyInfoList, \"EARLY\", \"HANGUP\"), \"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"HANGUP\"), \"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList, \"ACTIVE\", \"HANGUP\"), \"calling_0\": self.__matchChannelStateDesc(keyInfoList,", "return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS) else: return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS) def getSessLogInfoDict(self): \"\"\"获取会话信息字典 参数列表: 无 返回值:", "= {\"ACTIVE__HANGUP\":True,} case_r_183 = {\"proceeding_183\":True,} # invite-> if self.caseMatch(detailsDict, case_calling_invite) or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE):", "CS_CONSUME_MEDIA -> CS_EXECUTE 为核心层状态机迁移 -- CS类 Callstate Change ACTIVE -> HANGUP 为呼叫层状态机迁移 --", "def run(self, mode = \"Normal\"): time1 = time.clock() s = \"正在收集会话信息...\" PRINT(s, end='')", "if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count", "= conclusion) # ----------------------------------------------显示分析结果---------------------------------------------- def __showAnalysisResultHeader(self, targConclusion=\"\"): s = \"%-30s %-36s %-30s %-7s", "self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) return len(fileNameList), newPath, fileNameList def outputDetails(self, outputPath, fileName", "line continue # 拆分出UUID和日志信息 sessUUID, sessLog = line[0:pos], line[pos + 1:-1] # 按照UUID归类存放日志信息", "callNumber else True): fileName = name or ((callNumber or c) + \"__\" +", "sys.version_info[0] == 3 from base.base import PRINT, INPUT, getColor if PY2: from analyzer", "self.getPath()]) orgLogFileNames = callNumber + sessUUID + targConclusion + \"_tmp\" newPath = os.path.join(outputPath,", "s = \"%-30s %-36s %-30s %-6s %s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\")", "param1 = \"\", param2 = \"\", f = -1, l = -1, mod=\"normal\"):", "成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} return", "sessLogInfoDict = self.getSessLogInfoDict() # 需要匹配的正则表达式 reExpInfo = [ (\"State Change (.*) -> (.*)\",", "2, [], self.SIGN_FLAG_CS), # 状态转移类的日志 (\"entering state \\[(.*)\\]\\[(.*)\\]\", 2, [], self.SIGN_FLAG_CHAN), # 收到消息类的日志", "(\\d+) codec: (\\d+) ms: (\\d+)\", 7, [0], self.SIGN_FLAG_RTP), # RTP通道信息 (\"Flipping CID from", "+ \")\" res = self.reMatch(reExpr, x[3][1], 1) res and codeList.append(x[3]) return codeList def", "in ['normal']: s += \"%-16s: %-s\\n\" % (\"结果\", conclusion) s += \"%-16s: %-s\\n\"", "= \"\", conclusion = \"\"): return self.__showResult(sessUUID = sessUUID, callNumber = callNumber, conclusion", "break # 显示尾 self.__showDetailsTail(count) return count # 按照UUID搜索日志,并显示详细分析信息 def showDetails(self, sessUUID = \"\",", "dPos in [x for x in sorted(dropPos, reverse=True) if dropPos and x <", "SIGN_FLAG_CS = \"core sm\" SIGN_FLAG_RTP = \"rtp\" SIGN_FLAG_CALLNUMBER = \"callnumber\" SIGN_FLAG_HANGUP = \"hangup_reason\"", "True, \"\" # 获取UUID列表 def getSessUUIDList(self): sessLogInfoDict = self.getSessLogInfoDict() return sessLogInfoDict.keys() # 获取呼叫号码列表", "= \"\", callNumber = \"\", name = \"\"): sessLogInfoDict = self.getSessLogInfoDict() newPath =", "if pos is -1 or pos < 36 or line[0:pos].count('-') != 4: if", "+= \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # 判断挂断原因", "self.MATCH_MOD_DETAILS) locIp, locPort, RmtIp, RmtPort, audioPayLoad, audioPTime = res if res else (\"\",\"\",\"\",\"\",\"\",\"\")", "sessDict[UUID].get(key, False) else: return UUID, None else: return [(UUID, sessDict[UUID].get(key, False)) for UUID", "and c.upper() in ['ERROR']: errorCount += 1 elif s and c.upper() in ['WARNING']:", "callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\"", "not continueRet: break # 显示尾 self.__showAnalysisResultTail(count, conclusion) return count def showResult(self, sessUUID =", "__getOutputHeader(self, logDict, callNumber, sessUUID): s = \"呼叫号码:%s\\nUUID:%s\\n\" % (callNumber, sessUUID) return s def", "self.SIGN_FLAG_CS), # 状态转移类的日志 (\"entering state \\[(.*)\\]\\[(.*)\\]\", 2, [], self.SIGN_FLAG_CHAN), # 收到消息类的日志 (\"Callstate Change", "if not sessLogInfoDict.get(sessUUID, False): return \"\" conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if targConclusion.upper() not in", "not keyInfoList: return \"\" res = self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod = self.MATCH_MOD_DETAILS) disFrom, numberFrom,", "for f, lines in enumerate(self.getLines()): process = self.printProc(process, fileLen) for i, line in", "= [ (\"State Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CS), # 状态转移类的日志 (\"entering", "s and c.upper() in ['OK']: okCount += 1 if context: context = self.__getOutputResultHeader()", "self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_TERMINATED]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif flag in", "{\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,} case_ringing_180 = {\"proceeding_180\":True,} case_ringing_183 = {\"proceeding_183\":True,} case_ringinged_180 = {\"DOWN__RINGING\":True,} case_ringinged_183", "callNumber = \"\", sessUUID = \"\", targConclusion=\"\"): fileNameList = [] sessLogInfoDict = self.getSessLogInfoDict()", "\"\", callNumber = \"\", conclusion = \"\"): return self.__showDetails(sessUUID = sessUUID, callNumber =", "['ERROR']: errorCount += 1 elif s and c.upper() in ['WARNING']: warningCount += 1", "sessLogInfoDict.keys(): if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s, c = self.__getAnalysisResultBody(sessUUID,", "SIGN_FLAG_CANCEL = 'cancel' SIGN_FLAG_R_INVITE = \"recv_invite\" # SIP信令 SIP_INVITE = 'INVITE' SIP_CANCEL =", "tmp = set(callNumberList) self.printList(tmp, 8, \"呼叫号码列表:\", \"总数:%d\" % len(tmp)) # 重复的呼叫号码 dupl =", "outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\", name = \"\"): return self.__outputOriginLog(outputPath,", "(self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (bye-> or 错误应答<-) elif self.caseMatch(detailsDict,", "case_ringing_180) or self.caseMatch(detailsDict, case_ringing_183) or self.caseMatch(detailsDict, case_ringing_183_180) or \\ self.caseMatch(detailsDict, case_ringinged_180) or self.caseMatch(detailsDict,", "= \"\"): return self.__outputOriginLog(outputPath, sessUUID = sessUUID, callNumber = callNumber, name = name)", "# 状态转移类的日志 (\"entering state \\[(.*)\\]\\[(.*)\\]\", 2, [], self.SIGN_FLAG_CHAN), # 收到消息类的日志 (\"Callstate Change (.*)", "# for k in keyInfoList: # print(k) conclusion = \"\" note = \"\"", "getSessInfo(self, UUID = \"\", key = \"\"): \"\"\"清理FS的日志 参数列表: UUID:会话的UUID key:内部的字典名 返回值: 成功标志和错误信息", "\"\"\" sessLogInfoDict = self.getSessLogInfoDict() sessLen = len(sessLogInfoDict) process = 0 for sessUUID in", "{文件索引:{行数:日志}} 异常: 无 \"\"\" return self.__ignoreLinesDict def load(self, path, rl=False): \"\"\"加载FS的日志 参数列表: path:日志路径", "= code) def __matchChannelStateDesc(self, keyInfoList, desc): \"\"\"通道状态描述匹配 匹配状态描述 参数列表: keyInfoList:关键信息列表 desc:描述 返回值: 成功或失败", "p in self.getPath()]) fileNames = sessUUID + callNumber + conclusion + \"_tmp\" fileName", "res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) st, reason = res if res", "\"%-30s %-36s %-30s %-7s %-s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") PRINT(s) def", "RmtPort, audioPayLoad, audioPTime = res if res else (\"\",\"\",\"\",\"\",\"\",\"\") note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s", "callNumber else True): s, c = self.__getAnalysisResultBody(sessUUID, conclusion, show=False) context += s if", "if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" +", "newPath, fileNameList for sessUUID in sessLogInfoDict.keys(): sessDict = sessLogInfoDict[sessUUID] if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber", "callNumber if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\"", "+ callNumber + conclusion + \"_tmp\" fileName = \"Result\" + fileNames + self.OUTPUT_POSTFIX_RESULT", "\"\\n\", keyInfoList, \"\\n\",detailsDict, \"\\n\" # 分析会话日志 def __analysis(self): self.__sessKeyInfoCollect() self.__sessAnalysis() # 运行 def", "% (time3 - time2) PRINT(s, color='green') s = \"正在分析会话过程...\" PRINT(s, end='') self.__analysis() time4", "= sessLogInfoDict.get(sessUUID, False) if not sessDict: return len(fileNameList), newPath, fileNameList if sessDict[self.SESS_FS_CALLNUMBER_DK] ==", "res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS) s += \"%-16s:", "= self.printProc(process, sessLen, widgetType = \"percent\", begin=0, end=50) keyInfoList = [] logFileDict =", "无 \"\"\" ignoreLinesDict = {} sessLogInfoDict = {} fileLen = len(self.getLines()) process =", "PRINT(s) raise res = tuple(l) keyInfoList.append((f, line, flag, res)) break else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] =", "-> %s:%s:%s (%s:%s %s:%s)\\n\" % (\"媒体信息\", \"本端地址\", locIp, locPort, \"远端地址\", RmtIp, RmtPort, \"Payload\",", "in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" if detailsDict[\"terminated_list\"]: conclusion = \"ERROR\" note +=", "= \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames = sessUUID + callNumber + conclusion", ">= f if f != -1 else True) and \\ (x[1] >= l", "[0], self.SIGN_FLAG_S_BYE), (\"Sending CANCEL to(.*)\", 1, [0], self.SIGN_FLAG_CANCEL), ] sessLen = len(sessLogInfoDict) process", "None: print(sessUUID, \"\\nis not get time\") return sessLogInfoDict, ignoreLinesDict # 获取会话中的呼叫号码 def __getCallNumber(self):", "return self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState, toState) def __matchCallStateChange(self, keyInfoList, fromState, toState): \"\"\"call状态变迁匹配 参数列表: keyInfoList:关键信息列表", "显示Body count = 0 if sessUUID: # 若输入了callNumber则认为需要过滤 if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if", "fuzzyCode): \"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表: keyInfoList:关键信息列表 fuzzyCode:模糊状态码 返回值: 匹配到的值 异常: 无 \"\"\" codeList =", "sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return self.__showResult(sessUUID = sessUUID,", "errorCount, okCount = 0, 0, 0 # 输出到文件 if sessUUID: if (callNumber ==", "\"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#, \"\\n\", keyInfoList, \"\\n\",detailsDict, \"\\n\" # 分析会话日志 def __analysis(self): self.__sessKeyInfoCollect()", "sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showDetailsHeader() # 显示Body count = 0 if sessUUID:", "keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState,", "\"completed_200\":True, \"ready_200\":True,} case_answer_183 = {\"EARLY__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_183 = {\"EARLY__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_hangup_invite", "targConclusion.upper() in conclusion.upper(): color = conclusion.upper() in ['ERROR'] and 'red' or \\ conclusion.upper()", "call类 entering state [proceeding][180] 为收响应消息的处理 -- channel类 AUDIO RTP [sofia/external/6797@10.0.7.152:5080] 10.0.7.176 port 24776", "\"\", conclusion = \"\"): s = \"-\" * 160 + \"\\n\" s +=", "callNumber = \"\", conclusion = \"\", fileName = \"\"): return self.__outputReslut(outputPath, sessUUID =", "self.__matchChannelStateDesc(keyInfoList, \"calling\"), \"proceeding_180\": self.__matchChannelStateCode(keyInfoList, \"180\"), \"proceeding_183\": self.__matchChannelStateCode(keyInfoList, \"183\"), \"completing_200\": self.__matchChannelStateDesc(keyInfoList, \"completing\"), \"completed_200\": self.__matchChannelStateDesc(keyInfoList,", "# invite-> if self.caseMatch(detailsDict, case_calling_invite) or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE): conclusion = \"OK\" note =", "s += \"\\n\" + \"{0:*^160}\".format(\" 基本信息 \") + \"\\n\\n\" s += \"%-16s: %-s\\n\"", "return self.__showDetails(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------显示分析结果---------------------------------------------- def", "def getSessLogInfoDict(self): \"\"\"获取会话信息字典 参数列表: 无 返回值: 会话信息字典 例如: {UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常:", "sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\") for f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag", "color='gray', need=show), conclusion, note) return s, conclusion def __showAnalysisResultBody(self, sessUUID, targConclusion = \"\"):", "'proceeding' SIGN_CHAN_COMPLETE = 'completing' SIGN_CHAN_TERMINATED = 'terminated' SIGN_FLAG_CALL = \"channel sm\" SIGN_CALL_HANGUP =", "异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = desc) # 分析会话过程 def __sessAnalysis(self):", "= \"\", callNumber = \"\", sessUUID = \"\", targConclusion=\"\"): fileNameList = [] sessLogInfoDict", "\"__\" + sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS) if self.outputEx(newPath, newFileName, self.getDetails(sessUUID,", "= os.path.join(outputPath, name) # 创建新的目录,若存在则删除 self.makeDir(newPath) for sessUUID in sessLogInfoDict.keys(): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]", "not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod", "and \"(S)\" or \"(R)\") else: # invite-> (183<- or 180<-) if self.caseMatch(detailsDict, case_ringing_180)", "s += \"%-16s: %-s\\n\" % (\"结果\", conclusion) s += \"%-16s: %-s\\n\" % (\"消息流\",", "sessLogInfoDict = {} fileLen = len(self.getLines()) process = 0 for f, lines in", "{\"proceeding_180\":True,} case_ringing_183 = {\"proceeding_183\":True,} case_ringinged_180 = {\"DOWN__RINGING\":True,} case_ringinged_183 = {\"DOWN__EARLY\":True,} case_ringing_183_180 = {\"DOWN__EARLY\":True,", "+= \"%-16s: %-s\\n\" % (\"消息流\", self.showNote(note)) s += \"\\n\" + \"{0:*^160}\".format(\" 消息交互详情 \")", "fileName, context): return 1, outputPath, [fileName] else: return 0, outputPath, [] else: return", "# 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if not name: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames", "newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) return len(fileNameList), newPath, fileNameList def outputDetails(self, outputPath, fileName =", "or \"(R)\") # invite-> (183<- or 180<-) 错误应答<- elif self.caseMatch(detailsDict, case_hangup_180) or self.caseMatch(detailsDict,", "\"ready_200\":True,} case_answer_180 = {\"RINGING__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_180 = {\"RINGING__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_183 =", "PRINT(s) return s def __showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion =", "for i, line in enumerate(lines): # 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080 Standard", "self.caseMatch(detailsDict, case_answer_180) or self.caseMatch(detailsDict, case_answerd_180) or \\ self.caseMatch(detailsDict, case_answer_183) or self.caseMatch(detailsDict, case_answerd_183): note", "sessUUID = \"\", callNumber = \"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() #", "无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 例如 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:473", "180<-) 错误应答<- elif self.caseMatch(detailsDict, case_hangup_180) or self.caseMatch(detailsDict, case_hangup_183): note += \" -> HANGUP\"", "color = conclusion.upper() in ['ERROR'] and 'red' or \\ conclusion.upper() in ['WARNING'] and", "return s def __getOutputResultTail(self, warningCount, errorCount, okCount): s = \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" % (\"总计\", errorCount", "note = \"\" detailsDict = { \"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList, \"CS_NEW\", \"CS_INIT\"), \"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList, \"CS_INIT\",", "sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion # print \"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#, \"\\n\",", "if callNumber else True): s, c = self.__getAnalysisResultBody(sessUUID, conclusion, show=False) context += s", "else: total = len(sessLogInfoDict) flag = False sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for", "in enumerate(keyInfoList): signTime = \"%s\" % self.getLogTime(logDict.get(k[0], {}).get(k[1], \"\")) res = self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\",", "sessUUID in sessLogInfoDict.keys(): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber", "% len(tmp)) # 重复的呼叫号码 dupl = self.findDupl(callNumberList) len(dupl) and self.printList(dupl, 8, \"重复的号码:\", \"总数:%d\"", "pass # 会话关键信息收集 def __sessKeyInfoCollect(self): \"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如: State Change CS_CONSUME_MEDIA -> CS_EXECUTE", "l if l != -1 else True)] if mod in [self.MATCH_MOD_NORMAL]: return any(l)", "\"DOWN\", \"ACTIVE\"), \"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"RINGING\"), \"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"ACTIVE\"), \"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"RINGING\",", "sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True # 若有号码变换,需要取变换的号码 res", "and RmtIp: s += \"%-16s: %s:%s:%s -> %s:%s:%s (%s:%s %s:%s)\\n\" % (\"媒体信息\", \"本端地址\",", "False def __matchCsStateChange(self, keyInfoList, fromState, toState): \"\"\"CS状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败", "self.OUTPUT_POSTFIX_DETAILS) if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) else: if not callNumber: # 确定新的目录,以源日志文件名作为目录名", "= sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if targConclusion.upper() in conclusion.upper(): color = conclusion.upper() in ['ERROR'] and 'red'", "else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList def __match(self, keyInfoList, flag, param1 = \"\", param2 =", "len(sessUUIDList)) # 显示呼叫号码列表 def showCallNumberList(self): # 呼叫号码 callNumberList = self.getCallNumberList() tmp = set(callNumberList)", "time3) PRINT(s, color='green') return True, \"\" # 获取UUID列表 def getSessUUIDList(self): sessLogInfoDict = self.getSessLogInfoDict()", "if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True break if flag: break #", "s and c.upper() in ['ERROR']: errorCount += 1 elif s and c.upper() in", "self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) st, reason = res if res else (\"\", \"\")", "sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: print(sessUUID, \"\\nis not get time\") return sessLogInfoDict, ignoreLinesDict # 获取会话中的呼叫号码", "} # 标志性处理类的状态 case_calling_invite = {\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,} case_ringing_180 = {\"proceeding_180\":True,} case_ringing_183 =", "[] # 输出简单分析结果到文件 def outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\", conclusion", "numberTo = res if res else (\"\",\"\",\"\",\"\") callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1],", "for p in self.getPath()]) fileNames = sessUUID + callNumber + conclusion + \"_tmp\"", "name = \"\"): sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath fileNameList = [] #", "= conclusion.upper() in ['ERROR'] and 'red' or \\ conclusion.upper() in ['WARNING'] and 'yellow'", "outputPath if sessUUID: sessDict = sessLogInfoDict.get(sessUUID, False) if not sessDict: return len(fileNameList), newPath,", "self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE elif context[0] in [self.SIGN_CHAN_PROCEDDING]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0]", "case_answerd_invite = {\"DOWN__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_180 = {\"RINGING__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_180 = {\"RINGING__ACTIVE\":True,", "取一行日志 sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] # 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 # 默认按照此行日志取号码 res = self.reMatch(\"New Channel sofia\\/(.*)\\/(\\d*)\\@(.*?)", "\"\\n\" + \"{0:*^160}\".format(\" 基本信息 \") + \"\\n\\n\" s += \"%-16s: %-s\\n\" % (\"呼叫开始时间\",", "rl:是否重新加载 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" if PY2: return LogAnalyzer.load(self, path,", "return LogAnalyzer.load(self, path, rl) else: return super(FsLogAnalyzer, self).load(path, rl) def clear(self): \"\"\"清理FS的日志 参数列表:", "= \"\" detailsDict = { \"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList, \"CS_NEW\", \"CS_INIT\"), \"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList, \"CS_INIT\", \"CS_ROUTING\"),", "self.__getAnalysisResultBody(sessUUID, conclusion, show=False) context += s if s and c.upper() in ['ERROR']: errorCount", "self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) return len(fileNameList), newPath, fileNameList def outputOriginLog(self,", "sofia/external/6010@10.0.7.152:5080 Standard INIT # 找到第一个空格,左边就是会话ID,右边就是日志信息 pos = line.find(' ') line_len = len(line) #", "\"\", conclusion = \"\", fileName = \"\"): return self.__outputReslut(outputPath, sessUUID = sessUUID, callNumber", "= \"callNumber\" # 抽取出的关键信息分类 SIGN_FLAG_CHAN = \"chan proc\" SIGN_CHAN_CALLING = 'calling' SIGN_CHAN_PROCEDDING =", "显示呼叫号码列表 def showCallNumberList(self): # 呼叫号码 callNumberList = self.getCallNumberList() tmp = set(callNumberList) self.printList(tmp, 8,", "(\"呼叫号码\", numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if locIp and RmtIp: s += \"%-16s: %s:%s:%s ->", "not keyInfoList or not logDict: return s, conclusion callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1]))", "if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK]", "需要匹配的正则表达式 reExpInfo = [ (\"State Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CS), #", "not self.makeDir(newPath): return len(fileNameList), newPath, fileNameList for sessUUID in sessLogInfoDict.keys(): sessDict = sessLogInfoDict[sessUUID]", "%-s\\n\" % (callTime, sessUUID, callNumber or getColor(\"{0:<20}\".format(\"null\"), color='gray', need=show), conclusion, note) return s,", "%s\\n\\n\" % (\"序号\",\"信令时间\", \"源日志行号\", \"消息类型\", \"详情\") l = [] for i, k in", "reason = res if res else (\"\", \"\") if reason: note += \"{[\"", "flag: break # 没有找到号码,可能是日志文件的格式发生了变化 else: #print \"Not find the call number. UUID:%s\" %", "= \"\", name = \"\"): sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath fileNameList =", "\"completing_200\": self.__matchChannelStateDesc(keyInfoList, \"completing\"), \"completed_200\": self.__matchChannelStateDesc(keyInfoList, \"completed\"), \"ready_200\": self.__matchChannelStateDesc(keyInfoList, \"ready\"), \"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\") +", "= self.getSessLogInfoDict() # 显示头 self.__showAnalysisResultHeader(conclusion) # 显示Body count = 0 if sessUUID: #", "self.caseMatch(detailsDict, case_ringinged_180) or self.caseMatch(detailsDict, case_ringinged_183): note += \" -> RINGING\" # invite-> (183<-", "return len(fileNameList), newPath, fileNameList if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True: logDict", "if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True # 若有号码变换,需要取变换的号码 res = self.reMatch(\"Dialplan:", "[], self.SIGN_FLAG_R_INVITE), (\"AUDIO RTP \\[(.*)\\] (.*) port (\\d+) -> (.*) port (\\d+) codec:", "#orgLogFileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) orgLogFileNames = callNumber + sessUUID +", "\"-\" * 160 + \"\\n\" s += \"\\n总数:%d\" % count PRINT(s) return s", "for sessUUID in sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] # 显示UUID列表 def showSessUUIDList(self): sessUUIDList = self.getSessUUIDList()", "\"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如: State Change CS_CONSUME_MEDIA -> CS_EXECUTE 为核心层状态机迁移 -- CS类 Callstate Change", "= callNumber, conclusion = conclusion) # ----------------------------------------------输出简单分析结果到文件---------------------------------------------- def __getOutputResultHeader(self): s = \"%-30s %-36s", "UUID:会话的UUID 返回值: 关键信息 [(文件索引,行数,状态类型,(信息)),] 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_KEYINFO_DK) def getSignInfo(self, flag,", "if k[0] not in l: s += self.getPathEx(k[0]) + \"\\n\" l.append(k[0]) s +=", "3 from base.base import PRINT, INPUT, getColor if PY2: from analyzer import LogAnalyzer", "callNumber = \"\", conclusion = \"\"): return \"\" def getDetails(self, sessUUID = \"\",", "conclusion = conclusion) # ----------------------------------------------显示分析结果---------------------------------------------- def __showAnalysisResultHeader(self, targConclusion=\"\"): s = \"%-30s %-36s %-30s", "{\"proceeding_183\":True,} # invite-> if self.caseMatch(detailsDict, case_calling_invite) or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE): conclusion = \"OK\" note", "[] logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList = sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0]) for f, logDict in", "200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE)", "\"callnumber\" SIGN_FLAG_HANGUP = \"hangup_reason\" SIGN_FLAG_R_BYE = 'recv_bye' SIGN_FLAG_S_BYE = \"send_bye\" SIGN_FLAG_CANCEL = 'cancel'", "break res = self.reMatch(\"<(\\d*)>->(\\d*) in context\", sessLog, 2) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1]", "+ \"{0:*^160}\".format(\" 消息交互详情 \") + \"\\n\\n\" s += \"%-4s %-35s %-16s %-16s %s\\n\\n\"", "s = \"正在提取号码...\" PRINT(s, end='') self.__getCallNumber() time3 = time.clock() s = \"OK (耗时:%.2f秒)\"", "pos < 36 or line[0:pos].count('-') != 4: if f not in ignoreLinesDict: ignoreLinesDict[f]", "if f not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog} if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK]", "sessDict = sessLogInfoDict.get(sessUUID, False) if not sessDict: return len(fileNameList), newPath, fileNameList if sessDict[self.SESS_FS_CALLNUMBER_DK]", "analyzer import LogAnalyzer else: from analyzer.analyzer import LogAnalyzer # FS日志分析器 class FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict", "\"\", conclusion = \"\"): return self.__showDetails(sessUUID = sessUUID, callNumber = callNumber, conclusion =", "def __getOutputHeader(self, logDict, callNumber, sessUUID): s = \"呼叫号码:%s\\nUUID:%s\\n\" % (callNumber, sessUUID) return s", "-> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # 判断挂断原因 res =", "None signTimeThis = None if mode in ['normal']: s += \"%-16s: %-s\\n\" %", "= res[1] if res else \"\" if reason: res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1", "成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" sessDict = self.getSessLogInfoDict() if UUID: if sessDict.get(UUID,", "sessUUID in sessLogInfoDict.keys(): if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: print(sessUUID, \"\\nis not get time\") return", "sessUUID, callNumber = callNumber, conclusion = conclusion, fileName = fileName) # ----------------------------------------------输出原始日志到文件---------------------------------------------- def", "# 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 # 默认按照此行日志取号码 res = self.reMatch(\"New Channel sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\", sessLog, 3) if", "__fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode): \"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表: keyInfoList:关键信息列表 fuzzyCode:模糊状态码 返回值: 匹配到的值 异常: 无 \"\"\"", "sessLogInfoDict.keys() # 获取呼叫号码列表 def getCallNumberList(self): sessLogInfoDict = self.getSessLogInfoDict() return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID in", "self.reMatch(\"<(\\d*)>->(\\d*) in context\", sessLog, 2) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True", "\"\", callNumber = \"\", conclusion = \"\", fileName = \"\"): sessLogInfoDict = self.getSessLogInfoDict()", "\"ready_200\":True,} case_hangup_invite = {\"DOWN__HANGUP\":True,} case_hangup_180 = {\"RINGING__HANGUP\":True,} case_hangup_183 = {\"EARLY__HANGUP\":True,} case_hangup_acitve = {\"ACTIVE__HANGUP\":True,}", "= sessLogInfoDict self.__ignoreLinesDict = ignoreLinesDict for sessUUID in sessLogInfoDict.keys(): if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None:", "返回值: 结果字典 {'conclusion':\"\", 'details':{}, 'note':\"\"} 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_RESULT_DK) def getkeyInfoList(self,", "Standard INIT # 找到第一个空格,左边就是会话ID,右边就是日志信息 pos = line.find(' ') line_len = len(line) # 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录)", "self.MATCH_MOD_DETAILS) s += \"%-16s: %s\\n\" % (\"挂断原因\", res[1] if res else reason) signTimePrev", "and c.upper() in ['WARNING']: warningCount += 1 elif s and c.upper() in ['OK']:", "\"Normal\"): time1 = time.clock() s = \"正在收集会话信息...\" PRINT(s, end='') self.__sessCollect() time2 = time.clock()", "name) # ----------------------------------------------输出详细分析结果到文件---------------------------------------------- def __outputDetails(self, outputPath, fileName = \"\", callNumber = \"\", sessUUID", "\"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = desc) # 分析会话过程 def __sessAnalysis(self): \"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程", "# callNumber:呼叫号码, # result:{分析结果}, # keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} # } __ignoreLinesDict = {}# 忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS", "\"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"ACTIVE\"), \"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"HANGUP\"), \"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"HANGUP\"), \"RINGING__HANGUP\":", "# 呼叫号码 (\"952 Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_R_BYE), (\"Hangup (.*) \\[(.*)\\]", "= {\"EARLY__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_hangup_invite = {\"DOWN__HANGUP\":True,} case_hangup_180 = {\"RINGING__HANGUP\":True,} case_hangup_183 = {\"EARLY__HANGUP\":True,}", "异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_KEYINFO_DK) def getSignInfo(self, flag, context): \"\"\"信令的收发方向(用于上层显示输出) 参数列表: flag:keyInfoList中元组的‘状态类型’字段", "# 运行 def run(self, mode = \"Normal\"): time1 = time.clock() s = \"正在收集会话信息...\"", "\"[CALLING\" + (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and \"(R)\" or \"(S)\") # invite-> 200<- if self.caseMatch(detailsDict,", "判断挂断原因 res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) st, reason = res if", "conclusion = \"\"): return self.__showDetails(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion)", "and x < len(res)]: try: del l[dPos] except Exception as Err: s =", "if context[0] in [self.SIGN_CHAN_CALLING]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE elif context[0] in [self.SIGN_CHAN_PROCEDDING]: return", "s, c = self.__getAnalysisResultBody(sessUUID, conclusion, show=False) context += s if s and c.upper()", "def __sessAnalysis(self): \"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict =", "参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CS,", "Running State Change CS_INIT sessLen = len(sessLogInfoDict) process = 0 for sessUUID in", "code): \"\"\"通道状态码匹配 精确匹配状态码 参数列表: keyInfoList:关键信息列表 code:状态码 返回值: 成功或失败 bool 异常: 无 \"\"\" return", "\"%-30s %-36s %-30s %-6s %s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") return s", "(time4 - time3) PRINT(s, color='green') return True, \"\" # 获取UUID列表 def getSessUUIDList(self): sessLogInfoDict", "self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) reason = res[1] if res else \"\" if reason:", "s def __getOutputResultTail(self, warningCount, errorCount, okCount): s = \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" % (\"总计\", errorCount +", "sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion # print \"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"],", "{UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常: 无 \"\"\" return self.__sessLogInfoDict def getIgnoreLinesDict(self): \"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中", "in enumerate(self.getLines()): process = self.printProc(process, fileLen) for i, line in enumerate(lines): # 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f", "context[1] elif context[0] in [self.SIGN_CHAN_TERMINATED]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif flag in [self.SIGN_FLAG_R_BYE]:", "= \"\" note = \"\" detailsDict = { \"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList, \"CS_NEW\", \"CS_INIT\"), \"CS_INIT__CS_ROUTING\":", "callNumber = \"\", conclusion = \"\"): s = \"-\" * 160 + \"\\n\"", "end='') self.__sessCollect() time2 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time2 - time1)", "\"%-16s: %-s\\n\" % (\"呼叫开始时间\", callTime) s += \"%-16s: %-s\\n\" % (\"UUID\", sessUUID) if", "返回值: 元组(FromModule, ToModule, Sign) 异常: 无 \"\"\" if flag in [self.SIGN_FLAG_CHAN]: if context[0]", "x[3][1], 1) res and codeList.append(x[3]) return codeList def __matchChannelStateCode(self, keyInfoList, code): \"\"\"通道状态码匹配 精确匹配状态码", "self.findDupl(callNumberList) len(dupl) and self.printList(dupl, 8, \"重复的号码:\", \"总数:%d\" % len(dupl)) # ----------------------------------------------显示详细分析结果---------------------------------------------- def __showDetailsHeader(self,", "self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_COMPLETE]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0]", "\"recv_invite\" # SIP信令 SIP_INVITE = 'INVITE' SIP_CANCEL = 'CANCEL' SIP_BYE = 'BYE' #", "输出简单分析结果到文件 def outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\", conclusion = \"\",", "outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\", conclusion = \"\", fileName =", "self.MOD_OUTSIDE, self.MOD_FS, context[1] elif flag in [self.SIGN_FLAG_R_BYE]: return self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE elif flag", "(\"显示号码\", numberFrom) s += \"%-16s: %-s\\n\" % (\"呼叫号码\", numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if locIp", "proc\" SIGN_CHAN_CALLING = 'calling' SIGN_CHAN_PROCEDDING = 'proceeding' SIGN_CHAN_COMPLETE = 'completing' SIGN_CHAN_TERMINATED = 'terminated'", "\"\"): return self.__showDetails(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------显示分析结果----------------------------------------------", "% (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") PRINT(s) def __getAnalysisResultBody(self, sessUUID, targConclusion = \"\",", "参数列表: 无 返回值: 会话信息字典 例如: {UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常: 无 \"\"\" return", "匹配到的值 异常: 无 \"\"\" codeList = [] for x in keyInfoList: if x[2]", "l = [] for i, k in enumerate(keyInfoList): signTime = \"%s\" % self.getLogTime(logDict.get(k[0],", "self.__matchCallStateChange(keyInfoList, \"EARLY\", \"RINGING\"), \"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"ACTIVE\"), \"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"ACTIVE\"), \"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList,", "+ self.OUTPUT_POSTFIX_RESULT context = \"\" warningCount, errorCount, okCount = 0, 0, 0 #", "reverse=True) if dropPos and x < len(res)]: try: del l[dPos] except Exception as", "+ targConclusion + \"_tmp\" newPath = os.path.join(outputPath, orgLogFileNames) else: newPath = os.path.join(outputPath, fileName)", "RTP [sofia/external/6797@10.0.7.152:5080] 10.0.7.176 port 24776 -> 192.168.0.178 port 7076 codec: 18 ms: 20", "[0], self.SIGN_FLAG_R_BYE), (\"Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_HANGUP), (\"Sending BYE to(.*)\", 1,", "+= \" -> TALKING\" # invite-> (183<- or 180<-) 200<- bye<-> if self.caseMatch(detailsDict,", "self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\", \\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"}, \\ self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)}", "参数列表: 无 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" self.__sessLogInfoDict = {} self.__ignoreLinesDict", "else: if not callNumber: # 确定新的目录,以源日志文件名作为目录名 if not fileName: #orgLogFileNames = \"_\".join([os.path.split(p)[-1] for", "# {会话UUID:{log:{文件索引:{行数:日志}}, # callNumber:呼叫号码, # result:{分析结果}, # keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} # } __ignoreLinesDict = {}#", "(\"Sending BYE to(.*)\", 1, [0], self.SIGN_FLAG_S_BYE), (\"Sending CANCEL to(.*)\", 1, [0], self.SIGN_FLAG_CANCEL), ]", "locPort, RmtIp, RmtPort, audioPayLoad, audioPTime = res if res else (\"\",\"\",\"\",\"\",\"\",\"\") note =", "if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2] flag = True break res = self.reMatch(\"<(\\d*)>->(\\d*) in", "else: newPath = os.path.join(outputPath, name) # 创建新的目录,若存在则删除 self.makeDir(newPath) for sessUUID in sessLogInfoDict.keys(): logDict", "Sign) 异常: 无 \"\"\" if flag in [self.SIGN_FLAG_CHAN]: if context[0] in [self.SIGN_CHAN_CALLING]: return", "conclusion = \"OK\" note = \"[CALLING\" + (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and \"(R)\" or \"(S)\")", "# 不存在UUID(可能输出多个文件) else: # 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if not name: #fileNames = \"_\".join([os.path.split(p)[-1] for p", "res = self.reMatch(reExpr, x[3][1], 1) res and codeList.append(x[3]) return codeList def __matchChannelStateCode(self, keyInfoList,", "24776 -> 192.168.0.178 port 7076 codec: 18 ms: 20 -- RTP信息类 Hangup sofia/external/1920@10.0.7.152:5080", "del l[dPos] except Exception as Err: s = str(Err, reExpInfo[i], res) PRINT(s) raise", "(callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count +=", "\"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"HANGUP\"), \"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"HANGUP\"), \"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"HANGUP\"), \"ACTIVE__HANGUP\":", "param1 = self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS) s += \"%-16s: %s\\n\" % (\"挂断原因\", res[1]", "\" -> RINGING(183)\" note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\"", "\"channel sm\" SIGN_CALL_HANGUP = 'HANGUP' SIGN_FLAG_CS = \"core sm\" SIGN_FLAG_RTP = \"rtp\" SIGN_FLAG_CALLNUMBER", "异常: 无 \"\"\" return self.__ignoreLinesDict def load(self, path, rl=False): \"\"\"加载FS的日志 参数列表: path:日志路径 rl:是否重新加载", "%-s\\n\" % (\"UUID\", sessUUID) if numberFrom: s += \"%-16s: %-s\\n\" % (\"显示号码\", numberFrom)", "+= \"%-16s: %-s\\n\" % (\"结果\", conclusion) s += \"%-16s: %-s\\n\" % (\"消息流\", self.showNote(note))", "sofia\\/(.*)\\/(.*) Action transfer\\((\\d*) XML default\\)\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2] flag", "HANGUP 为呼叫层状态机迁移 -- call类 entering state [proceeding][180] 为收响应消息的处理 -- channel类 AUDIO RTP [sofia/external/6797@10.0.7.152:5080]", "sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 from base.base import PRINT, INPUT,", "and \\ ((len(x[3]) >= 2 and param2.strip() == x[3][1].strip()) if param2 != \"\"", "\"\" else True) and \\ (x[0] >= f if f != -1 else", "self.OUTPUT_POSTFIX_RESULT context = \"\" warningCount, errorCount, okCount = 0, 0, 0 # 输出到文件", "↑ \") + \"\\n\" s += \"%s \\n\" % getColor(\"{0:^40}\".format(\"时差:\" + str((signTimeThis -", "= \"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showDetailsHeader() # 显示Body", "case_answer_180 = {\"RINGING__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_180 = {\"RINGING__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_183 = {\"EARLY__ACTIVE\":True,", "= self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*) Action transfer\\((\\d*) XML default\\)\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] =", "= {\"RINGING__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_180 = {\"RINGING__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_183 = {\"EARLY__ACTIVE\":True, \"completing_200\":True,", "无 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" self.__sessLogInfoDict = {} self.__ignoreLinesDict =", "s, conclusion def __showAnalysisResultBody(self, sessUUID, targConclusion = \"\"): s, c = self.__getAnalysisResultBody(sessUUID, targConclusion)", "disFrom, numberFrom, disTo, numberTo = res if res else (\"\",\"\",\"\",\"\") callTime = \"%s\"", "f not in ignoreLinesDict: ignoreLinesDict[f] = {} else: ignoreLinesDict[f][i] = line continue #", "# 例如 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080) Running State Change CS_INIT sessLen =", "self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} return super(FsLogAnalyzer, self).clear() def getSessInfo(self, UUID =", "sessLogInfoDict self.__ignoreLinesDict = ignoreLinesDict for sessUUID in sessLogInfoDict.keys(): if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: print(sessUUID,", "ms: 20 -- RTP信息类 Hangup sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] -- 挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表: 无", "context:keyInfoList中元组的‘信息’字段 返回值: 元组(FromModule, ToModule, Sign) 异常: 无 \"\"\" if flag in [self.SIGN_FLAG_CHAN]: if", "res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) reason = res[1] if res else", "= self.MATCH_MOD_DETAILS) st, reason = res if res else (\"\", \"\") if reason:", "in [self.SIGN_CHAN_CALLING]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE elif context[0] in [self.SIGN_CHAN_PROCEDDING]: return self.MOD_OUTSIDE, self.MOD_FS,", "PRINT(s, end='') self.__analysis() time4 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time4 -", "callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 continueRet, flag =", "for l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): # 取一行日志 sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] # 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 # 默认按照此行日志取号码", "or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if locIp and RmtIp: s += \"%-16s: %s:%s:%s -> %s:%s:%s (%s:%s", "\"\"\"获取关键信息列表 参数列表: UUID:会话的UUID 返回值: 关键信息 [(文件索引,行数,状态类型,(信息)),] 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_KEYINFO_DK) def", "\"重复的号码:\", \"总数:%d\" % len(dupl)) # ----------------------------------------------显示详细分析结果---------------------------------------------- def __showDetailsHeader(self, sessUUID = \"\", callNumber =", "(.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_HANGUP), (\"Sending BYE to(.*)\", 1, [0], self.SIGN_FLAG_S_BYE), (\"Sending", "== x[3][1].strip()) if param2 != \"\" else True) and \\ (x[0] >= f", "for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber或UUID则认为需要过滤 if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if", "self.__getOutputResultTail(warningCount, errorCount, okCount) if self.outputEx(outputPath, fileName, context): return 1, outputPath, [fileName] else: return", "c.upper() in ['WARNING']: warningCount += 1 elif s and c.upper() in ['OK']: okCount", "int(res[1]), int(res[2]), int(res[3]), int(res[4]), int(res[5])) if signTimePrev and (signTimeThis - signTimePrev).seconds > 4:", "else: from analyzer.analyzer import LogAnalyzer # FS日志分析器 class FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict = {}# 按照会话归类的日志信息", "\"(S)\") # invite-> 200<- if self.caseMatch(detailsDict, case_answer_invite): note += \" -> TALKING\" #", "\"呼叫号码:%s\\nUUID:%s\\n\" % (callNumber, sessUUID) return s def __outputOriginLog(self, outputPath, sessUUID = \"\", callNumber", "2 and param2.strip() == x[3][1].strip()) if param2 != \"\" else True) and \\", "outputPath, [] else: return 0, outputPath, [] # 输出简单分析结果到文件 def outputReslut(self, outputPath, sessUUID", "newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) # 不存在UUID(可能输出多个文件) else: # 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if not", "%s\\n\" % (\"挂断原因\", res[1] if res else reason) else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE,", "reExpInfo = [ (\"State Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CS), # 状态转移类的日志", "self.__matchCsStateChange(keyInfoList, \"CS_NEW\", \"CS_INIT\"), \"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList, \"CS_INIT\", \"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\", \"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList,", "\"completing_200\":True, \"ready_200\":True,} case_answerd_invite = {\"DOWN__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_180 = {\"RINGING__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_180", "getSessUUIDList(self): sessLogInfoDict = self.getSessLogInfoDict() return sessLogInfoDict.keys() # 获取呼叫号码列表 def getCallNumberList(self): sessLogInfoDict = self.getSessLogInfoDict()", "fileName = \"\", callNumber = \"\", sessUUID = \"\", targConclusion=\"\"): fileNameList = []", "# 创建新的目录 if not self.makeDir(newPath): return len(fileNameList), newPath, fileNameList for sessUUID in sessLogInfoDict.keys():", "无 返回值: 会话信息字典 例如: {UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常: 无 \"\"\" return self.__sessLogInfoDict", "to(.*)\", 1, [0], self.SIGN_FLAG_CANCEL), ] sessLen = len(sessLogInfoDict) process = 0 for sessUUID", "keyInfoList def __match(self, keyInfoList, flag, param1 = \"\", param2 = \"\", f =", "= \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], \"\")) res = self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod = self.MATCH_MOD_DETAILS)", "self.MATCH_MOD_DETAILS) s += \"%-16s: %s\\n\" % (\"挂断原因\", res[1] if res else reason) else:", "self.getSessLogInfoDict() # 确定新的目录,以源日志文件名作为目录名 if not fileName: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()])", "% (callNumber, sessUUID) return s def __outputOriginLog(self, outputPath, sessUUID = \"\", callNumber =", "MATCH_MOD_NORMAL = \"normal\" MATCH_MOD_EXTEND = \"extend\" MATCH_MOD_DETAILS = \"details\" # 输出文件 OUTPUT_POSTFIX_LOG =", "return UUID, sessDict[UUID].get(key, False) else: return UUID, None else: return [(UUID, sessDict[UUID].get(key, False))", "desc:描述 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = desc)", "%-7s %-s\\n\" % (callTime, sessUUID, callNumber or getColor(\"{0:<20}\".format(\"null\"), color='gray', need=show), conclusion, note) return", "for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\") for f", "self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # 判断挂断原因 res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod =", "self.getSessInfo(UUID, self.SESS_KEYINFO_DK) def getSignInfo(self, flag, context): \"\"\"信令的收发方向(用于上层显示输出) 参数列表: flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段 返回值: 元组(FromModule, ToModule,", "sessUUID + self.OUTPUT_POSTFIX_LOG if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) return len(fileNameList),", "callNumber = \"\", name = \"\"): return self.__outputOriginLog(outputPath, sessUUID = sessUUID, callNumber =", "\") + \"\\n\\n\" s += \"%-16s: %-s\\n\" % (\"呼叫开始时间\", callTime) s += \"%-16s:", "= sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c if callNumber else True): fileName = name", "\"\\n总数:%d\" % count PRINT(s) return s def __showDetails(self, sessUUID = \"\", callNumber =", "False): return \"\" conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if targConclusion.upper() not in conclusion.upper(): return \"\"", "self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else: self.__sessLogInfoDict = sessLogInfoDict self.__ignoreLinesDict = ignoreLinesDict for sessUUID in sessLogInfoDict.keys(): if", "在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表: 无 返回值: 忽略的行字典 例如: {文件索引:{行数:日志}} 异常: 无 \"\"\" return self.__ignoreLinesDict def", "CS_INIT sessLen = len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys(): process =", "return len(fileNameList), newPath, fileNameList for sessUUID in sessLogInfoDict.keys(): sessDict = sessLogInfoDict[sessUUID] if sessDict[self.SESS_FS_CALLNUMBER_DK]", "def getDetails(self, sessUUID = \"\", targConclusion = \"\", mode = \"normal\"): sessLogInfoDict =", "192.168.0.178 port 7076 codec: 18 ms: 20 -- RTP信息类 Hangup sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION]", "# invite-> 200<- if self.caseMatch(detailsDict, case_answer_invite): note += \" -> TALKING\" # invite->", "else: # invite-> (183<- or 180<-) if self.caseMatch(detailsDict, case_ringing_180) or self.caseMatch(detailsDict, case_ringing_183) or", "\"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) return len(fileNameList),", "状态转移类的日志 (\"entering state \\[(.*)\\]\\[(.*)\\]\", 2, [], self.SIGN_FLAG_CHAN), # 收到消息类的日志 (\"Callstate Change (.*) ->", "会话关键信息收集 def __sessKeyInfoCollect(self): \"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如: State Change CS_CONSUME_MEDIA -> CS_EXECUTE 为核心层状态机迁移 --", "\"\", conclusion = \"\", fileName = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 确定新的目录,以源日志文件名作为目录名 if", "if callNumber else True): fileName = name or ((callNumber or c) + \"__\"", "\"\"): return self.__outputDetails(outputPath, fileName = fileName, sessUUID = sessUUID, callNumber = callNumber, targConclusion", "BYE to(.*)\", 1, [0], self.SIGN_FLAG_S_BYE), (\"Sending CANCEL to(.*)\", 1, [0], self.SIGN_FLAG_CANCEL), ] sessLen", "[self.MATCH_MOD_NORMAL]: return any(l) elif mod in [self.MATCH_MOD_EXTEND]: return (l[0][1][0], l[0][1][1], l[0][0]) if any(l)", "若输入了callNumber或UUID则认为需要过滤 if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion):", "+= 1 continueRet, flag = self.inputContinue(i, count, total, flag, self.__showDetailsHeader) if not continueRet:", "MATCH_MOD_DETAILS = \"details\" # 输出文件 OUTPUT_POSTFIX_LOG = \".log\" OUTPUT_POSTFIX_RESULT = \".result\" OUTPUT_POSTFIX_DETAILS =", "if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count", "for reExpr, expLen, dropPos, flag in reExpInfo: res = self.reMatch(reExpr, log, expLen) if", "code:状态码 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2 = code)", "{} sessLogInfoDict = {} fileLen = len(self.getLines()) process = 0 for f, lines", "is -1 or pos < 36 or line[0:pos].count('-') != 4: if f not", "\"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showAnalysisResultHeader(conclusion) # 显示Body count", "fileNameList = [] # 输出的文件列表 # 如果存在UUID(只输出一个文件) if sessUUID: if sessLogInfoDict.get(sessUUID, False): logDict", "[self.SIGN_CHAN_PROCEDDING]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_COMPLETE]: return self.MOD_OUTSIDE, self.MOD_FS, context[1]", "# 显示头 self.__showDetailsHeader() # 显示Body count = 0 if sessUUID: # 若输入了callNumber if", "if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) else: if not callNumber: # 确定新的目录,以源日志文件名作为目录名 if", "outputPath, sessUUID = \"\", callNumber = \"\", name = \"\"): sessLogInfoDict = self.getSessLogInfoDict()", "else False elif mod in [self.MATCH_MOD_DETAILS]: return l[0][1][3] if any(l) else False else:", "flag in [self.SIGN_FLAG_CANCEL]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL elif flag in [self.SIGN_FLAG_S_BYE]: return self.MOD_FS,", "self.getPath()]) fileNames = sessUUID + callNumber + \"_tmp\" newPath = os.path.join(outputPath, fileNames) else:", "conclusion, note) return s, conclusion def __showAnalysisResultBody(self, sessUUID, targConclusion = \"\"): s, c", "self.__sessKeyInfoCollect() self.__sessAnalysis() # 运行 def run(self, mode = \"Normal\"): time1 = time.clock() s", "PRINT(s, color='green') return True, \"\" # 获取UUID列表 def getSessUUIDList(self): sessLogInfoDict = self.getSessLogInfoDict() return", "参数列表: 无 返回值: 成功解析的会话日志字典和无法解析的会话日志字典 dict,dict 异常: 无 \"\"\" ignoreLinesDict = {} sessLogInfoDict =", "res if res else (\"\", \"\") if reason: note += \"{[\" + st", "+= 1 elif s and c.upper() in ['WARNING']: warningCount += 1 elif s", "= \"chan proc\" SIGN_CHAN_CALLING = 'calling' SIGN_CHAN_PROCEDDING = 'proceeding' SIGN_CHAN_COMPLETE = 'completing' SIGN_CHAN_TERMINATED", "else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) st, reason = res if", "os.path.join(outputPath, fileNames) else: newPath = os.path.join(outputPath, name) # 创建新的目录,若存在则删除 self.makeDir(newPath) for sessUUID in", "% sessUUID pass else: pass # 会话关键信息收集 def __sessKeyInfoCollect(self): \"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如: State", "signTimeThis = None if mode in ['normal']: s += \"%-16s: %-s\\n\" % (\"结果\",", "# 输出的文件列表 # 如果存在UUID(只输出一个文件) if sessUUID: if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] #", "= True): sessLogInfoDict = self.getSessLogInfoDict() s = \"\" conclusion = \"\" if sessLogInfoDict.get(sessUUID,", "收到消息类的日志 (\"Callstate Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CALL), # 呼叫状态类的日志 (\"receiving invite", "== sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1", "fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) # 不存在UUID(可能输出多个文件) else: # 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if not name:", "\"{0:*^160}\".format(\" 消息交互详情 \") + \"\\n\\n\" s += \"%-4s %-35s %-16s %-16s %s\\n\\n\" %", "SIGN_FLAG_R_BYE = 'recv_bye' SIGN_FLAG_S_BYE = \"send_bye\" SIGN_FLAG_CANCEL = 'cancel' SIGN_FLAG_R_INVITE = \"recv_invite\" #", "else: return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS) def getSessLogInfoDict(self): \"\"\"获取会话信息字典 参数列表: 无 返回值: 会话信息字典 例如: {UUID:{log:{文件索引:{行数:日志}},", "self.getSessLogInfoDict() newPath = outputPath fileNameList = [] # 输出的文件列表 # 如果存在UUID(只输出一个文件) if sessUUID:", "= \"percent\") for f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag = False for l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys():", "\"(R)\") # invite-> (183<- or 180<-) 错误应答<- elif self.caseMatch(detailsDict, case_hangup_180) or self.caseMatch(detailsDict, case_hangup_183):", "__getCallNumber(self): \"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict =", "%-35s %-16s %-16s %s\\n\" % (i + 1, signTime, str(k[1]), str(k[2]), str(k[3])) else:", "context[0] in [self.SIGN_CHAN_PROCEDDING]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_COMPLETE]: return self.MOD_OUTSIDE,", "s = \"%-30s %-36s %-30s %-7s %-s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\")", "case_ringing_183_180) or \\ self.caseMatch(detailsDict, case_ringinged_180) or self.caseMatch(detailsDict, case_ringinged_183): note += \" -> RINGING\"", "= 'proceeding' SIGN_CHAN_COMPLETE = 'completing' SIGN_CHAN_TERMINATED = 'terminated' SIGN_FLAG_CALL = \"channel sm\" SIGN_CALL_HANGUP", "port 7076 codec: 18 ms: 20 -- RTP信息类 Hangup sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] --", "= 0 if sessUUID: # 若输入了callNumber if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else", "conclusion, show=False) context += s if s and c.upper() in ['ERROR']: errorCount +=", "return s def __outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\", name =", "ignoreLinesDict[f] = {} else: ignoreLinesDict[f][i] = line continue # 拆分出UUID和日志信息 sessUUID, sessLog =", "fileNameList def outputDetails(self, outputPath, fileName = \"\", sessUUID = \"\", callNumber = \"\",", "# 分析会话过程 def __sessAnalysis(self): \"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表: 无 返回值: 无 异常: 无 \"\"\"", "detailsDict = { \"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList, \"CS_NEW\", \"CS_INIT\"), \"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList, \"CS_INIT\", \"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList,", "\"\", mode = \"normal\"): sessLogInfoDict = self.getSessLogInfoDict() if not sessLogInfoDict.get(sessUUID, False): return \"\"", "SIP_CANCEL = 'CANCEL' SIP_BYE = 'BYE' # 匹配模式 MATCH_MOD_NORMAL = \"normal\" MATCH_MOD_EXTEND =", "\"\"\"通道状态描述匹配 匹配状态描述 参数列表: keyInfoList:关键信息列表 desc:描述 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList,", "关键信息 [(文件索引,行数,状态类型,(信息)),] 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_KEYINFO_DK) def getSignInfo(self, flag, context): \"\"\"信令的收发方向(用于上层显示输出)", "mod in [self.MATCH_MOD_EXTEND]: return (l[0][1][0], l[0][1][1], l[0][0]) if any(l) else False elif mod", "= sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if targConclusion.upper() in conclusion.upper(): color = conclusion.upper() in", "[\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" if detailsDict[\"terminated_list\"]: conclusion = \"ERROR\" note += \"(recv", "return sessLogInfoDict, ignoreLinesDict # 获取会话中的呼叫号码 def __getCallNumber(self): \"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表: 无 返回值:", "enumerate(keyInfoList) if x[2] == flag and \\ ((len(x[3]) >= 1 and param1.strip() ==", "返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() sessLen = len(sessLogInfoDict) process =", "elif context[0] in [self.SIGN_CHAN_TERMINATED]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif flag in [self.SIGN_FLAG_R_BYE]: return", "sessLogInfoDict.keys(): sessDict = sessLogInfoDict[sessUUID] if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True: logDict", "\\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"), } # 标志性处理类的状态 case_calling_invite = {\"CS_INIT__CS_ROUTING\":True,", "True): fileName = (callNumber or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG if", "忽略的行字典 例如: {文件索引:{行数:日志}} 异常: 无 \"\"\" return self.__ignoreLinesDict def load(self, path, rl=False): \"\"\"加载FS的日志", "dropPos, flag in reExpInfo: res = self.reMatch(reExpr, log, expLen) if res: l =", "self.__showDetailsTail(count) return count # 按照UUID搜索日志,并显示详细分析信息 def showDetails(self, sessUUID = \"\", callNumber = \"\",", "keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not keyInfoList or not logDict: return s, conclusion callTime", "\"UUID\", \"呼叫号码\", \"结果\", \"备注\") return s def __getOutputResultTail(self, warningCount, errorCount, okCount): s =", "PRINT(s, color='green') s = \"正在分析会话过程...\" PRINT(s, end='') self.__analysis() time4 = time.clock() s =", "else: conclusion = \"WARNING\" note += \"[NOT COMPLETE\" note += \"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] =", "无 \"\"\" self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} return super(FsLogAnalyzer, self).clear() def getSessInfo(self,", "flag in [self.SIGN_FLAG_R_BYE]: return self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE elif flag in [self.SIGN_FLAG_CANCEL]: return self.MOD_FS,", "conclusion = \"WARNING\" note += \"[NOT COMPLETE\" note += \"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note", "mod = self.MATCH_MOD_DETAILS) locIp, locPort, RmtIp, RmtPort, audioPayLoad, audioPTime = res if res", "= \"\", callNumber = \"\", conclusion = \"\"): return self.__showDetails(sessUUID = sessUUID, callNumber", "\"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"), \"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"RINGING\"), \"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"EARLY\"), \"DOWN__ACTIVE\":", "invite-> if self.caseMatch(detailsDict, case_calling_invite) or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE): conclusion = \"OK\" note = \"[CALLING\"", "+= \"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion # print", "locPort, \"远端地址\", RmtIp, RmtPort, \"Payload\", audioPayLoad, \"ptime\", audioPTime) res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod", "\\ ((len(x[3]) >= 1 and param1.strip() == x[3][0].strip()) if param1 != \"\" else", "SIGN_CHAN_TERMINATED = 'terminated' SIGN_FLAG_CALL = \"channel sm\" SIGN_CALL_HANGUP = 'HANGUP' SIGN_FLAG_CS = \"core", "else True)] if mod in [self.MATCH_MOD_NORMAL]: return any(l) elif mod in [self.MATCH_MOD_EXTEND]: return", "= {\"proceeding_180\":True,} case_ringing_183 = {\"proceeding_183\":True,} case_ringinged_180 = {\"DOWN__RINGING\":True,} case_ringinged_183 = {\"DOWN__EARLY\":True,} case_ringing_183_180 =", "% (\"挂断原因\", res[1] if res else reason) else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod", "or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG) if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict,", "outputPath, sessUUID = \"\", callNumber = \"\", name = \"\"): return self.__outputOriginLog(outputPath, sessUUID", "sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if targConclusion.upper() in conclusion.upper(): color = conclusion.upper() in ['ERROR']", "self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) reason = res[1] if res else \"\" if reason:", "\"结果\", \"备注\") PRINT(s) def __getAnalysisResultBody(self, sessUUID, targConclusion = \"\", show = True): sessLogInfoDict", "找到第一个空格,左边就是会话ID,右边就是日志信息 pos = line.find(' ') line_len = len(line) # 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if pos is", "{\"DOWN__RINGING\":True,} case_ringinged_183 = {\"DOWN__EARLY\":True,} case_ringing_183_180 = {\"DOWN__EARLY\":True, \"proceeding_183\":True, \"EARLY__RINGING\":True, \"proceeding_180\":True,} case_answer_invite = {\"DOWN__ACTIVE\":True,", "\\ self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else: self.__sessLogInfoDict = sessLogInfoDict self.__ignoreLinesDict = ignoreLinesDict for sessUUID in", "# 获取呼叫号码列表 def getCallNumberList(self): sessLogInfoDict = self.getSessLogInfoDict() return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID in sessLogInfoDict.keys()", "= \"OK (耗时:%.2f秒)\" % (time4 - time3) PRINT(s, color='green') return True, \"\" #", "sessUUID: if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if", "if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 else: total", "newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) return len(fileNameList), newPath, fileNameList def outputOriginLog(self, outputPath,", "context[0] in [self.SIGN_CHAN_COMPLETE]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_TERMINATED]: return self.MOD_OUTSIDE,", "= {\"DOWN__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_180 = {\"RINGING__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_180 = {\"RINGING__ACTIVE\":True, \"completed_200\":True,", "按照会话归类的日志信息 # {会话UUID:{log:{文件索引:{行数:日志}}, # callNumber:呼叫号码, # result:{分析结果}, # keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} # } __ignoreLinesDict =", "return super(FsLogAnalyzer, self).clear() def getSessInfo(self, UUID = \"\", key = \"\"): \"\"\"清理FS的日志 参数列表:", "l = [(i, x) for i, x in enumerate(keyInfoList) if x[2] == flag", "and \"(S)\" or \"(R)\") # invite-> (bye-> or 错误应答<-) elif self.caseMatch(detailsDict, case_hangup_invite): if", "%-16s %s\\n\\n\" % (\"序号\",\"信令时间\", \"源日志行号\", \"消息类型\", \"详情\") l = [] for i, k", "\"呼叫号码\", \"结果\", \"备注\") PRINT(s) def __getAnalysisResultBody(self, sessUUID, targConclusion = \"\", show = True):", "PRINT(s, end='') self.__sessCollect() time2 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time2 -", "sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#, \"\\n\", keyInfoList, \"\\n\",detailsDict, \"\\n\" # 分析会话日志 def __analysis(self): self.__sessKeyInfoCollect() self.__sessAnalysis()", "+= s if s and c.upper() in ['ERROR']: errorCount += 1 elif s", "return [(UUID, sessDict[UUID].get(key, False)) for UUID in sessDict.keys()] def getLogDict(self, UUID = \"\"):", "= line continue # 拆分出UUID和日志信息 sessUUID, sessLog = line[0:pos], line[pos + 1:-1] #", "rl) def clear(self): \"\"\"清理FS的日志 参数列表: 无 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\"", "True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 else: total = len(sessLogInfoDict) flag", "LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS) else: return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS) def getSessLogInfoDict(self): \"\"\"获取会话信息字典 参数列表: 无 返回值: 会话信息字典", "= self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime, 7) if res: signTimePrev = signTimeThis signTimeThis = datetime(int(res[0]),", "import datetime PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 from", "+= \"%-16s: %-s\\n\" % (\"显示号码\", numberFrom) s += \"%-16s: %-s\\n\" % (\"呼叫号码\", numberTo", "total, flag, self.__showAnalysisResultHeader, conclusion) if not continueRet: break # 显示尾 self.__showAnalysisResultTail(count, conclusion) return", "case_ringing_183) or self.caseMatch(detailsDict, case_ringing_183_180) or \\ self.caseMatch(detailsDict, case_ringinged_180) or self.caseMatch(detailsDict, case_ringinged_183): note +=", "< len(res)]: try: del l[dPos] except Exception as Err: s = str(Err, reExpInfo[i],", "callNumber = \"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showDetailsHeader() #", "{} return super(FsLogAnalyzer, self).clear() def getSessInfo(self, UUID = \"\", key = \"\"): \"\"\"清理FS的日志", "self.__matchCallStateChange(keyInfoList, \"DOWN\", \"HANGUP\"), \"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"HANGUP\"), \"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"HANGUP\"), \"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList,", "newPath = os.path.join(outputPath, orgLogFileNames) else: newPath = os.path.join(outputPath, fileName) # 创建新的目录 if not", "reason + \"}\" if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" else:", "os.path.join(outputPath, orgLogFileNames) else: newPath = os.path.join(outputPath, fileName) # 创建新的目录 if not self.makeDir(newPath): return", "= {i:sessLog} if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog", "= True # 若有号码变换,需要取变换的号码 res = self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*) Action transfer\\((\\d*) XML default\\)\", sessLog,", "UUID in sessDict.keys()] def getLogDict(self, UUID = \"\"): \"\"\"获取日志字典 参数列表: UUID:会话的UUID 返回值: 日志字典", "fileName: #orgLogFileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) orgLogFileNames = callNumber + sessUUID", "\"\"): s = self.getDetails(sessUUID, targConclusion) if s: PRINT(s) return s def __showDetailsTail(self, count,", "1 elif s and c.upper() in ['OK']: okCount += 1 else: for sessUUID", "200<- if self.caseMatch(detailsDict, case_answer_180) or self.caseMatch(detailsDict, case_answerd_180) or \\ self.caseMatch(detailsDict, case_answer_183) or self.caseMatch(detailsDict,", "= self.printProc(process, fileLen) for i, line in enumerate(lines): # 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532 [DEBUG]", "# keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} # } __ignoreLinesDict = {}# 忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS = 'fs' # 会话类字典key", "sessUUID)): fileNameList.append(fileName) # 不存在UUID(可能输出多个文件) else: # 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if not name: #fileNames = \"_\".join([os.path.split(p)[-1]", "self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) st, reason = res if res else (\"\",", "newPath, fileNameList def outputDetails(self, outputPath, fileName = \"\", sessUUID = \"\", callNumber =", "\"DOWN\", \"HANGUP\"), \"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"HANGUP\"), \"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"HANGUP\"), \"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList, \"ACTIVE\",", "return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS) def getSessLogInfoDict(self): \"\"\"获取会话信息字典 参数列表: 无 返回值: 会话信息字典 例如: {UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码,", "self.__matchCallStateChange(keyInfoList, \"RINGING\", \"ACTIVE\"), \"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"HANGUP\"), \"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"HANGUP\"), \"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList,", "True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 continueRet, flag = self.inputContinue(i, count,", "param2 = \"\", f = -1, l = -1, mod=\"normal\"): l = [(i,", "logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not logDict or not keyInfoList: return", "- time2) PRINT(s, color='green') s = \"正在分析会话过程...\" PRINT(s, end='') self.__analysis() time4 = time.clock()", "str) 异常: 无 \"\"\" if PY2: return LogAnalyzer.load(self, path, rl) else: return super(FsLogAnalyzer,", "is None: print(sessUUID, \"\\nis not get time\") return sessLogInfoDict, ignoreLinesDict # 获取会话中的呼叫号码 def", "def __analysis(self): self.__sessKeyInfoCollect() self.__sessAnalysis() # 运行 def run(self, mode = \"Normal\"): time1 =", "__getOutputResultTail(self, warningCount, errorCount, okCount): s = \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" % (\"总计\", errorCount + okCount +", "fileName = name or ((callNumber or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG)", "return (l[0][1][0], l[0][1][1], l[0][0]) if any(l) else False elif mod in [self.MATCH_MOD_DETAILS]: return", "not continueRet: break # 显示尾 self.__showDetailsTail(count) return count # 按照UUID搜索日志,并显示详细分析信息 def showDetails(self, sessUUID", "'note':\"\"} 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_RESULT_DK) def getkeyInfoList(self, UUID = \"\"): \"\"\"获取关键信息列表", "+= \"\\n总数:%d\" % count PRINT(s) return s def __showDetails(self, sessUUID = \"\", callNumber", "返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = desc) #", "= \"\", callNumber = \"\", conclusion = \"\"): return self.__showResult(sessUUID = sessUUID, callNumber", "异常: 无 \"\"\" ignoreLinesDict = {} sessLogInfoDict = {} fileLen = len(self.getLines()) process", "if detailsDict[\"terminated_list\"]: conclusion = \"ERROR\" note += \"(recv %s)\" % detailsDict[\"terminated_list\"][0][1] else: conclusion", "\"5xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"), } # 标志性处理类的状态 case_calling_invite = {\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,}", "else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) reason = res[1] if res", "显示尾 self.__showDetailsTail(count) return count # 按照UUID搜索日志,并显示详细分析信息 def showDetails(self, sessUUID = \"\", callNumber =", "fileNames = sessUUID + callNumber + \"_tmp\" newPath = os.path.join(outputPath, fileNames) else: newPath", "self.__matchChannelStateCode(keyInfoList, \"183\"), \"completing_200\": self.__matchChannelStateDesc(keyInfoList, \"completing\"), \"completed_200\": self.__matchChannelStateDesc(keyInfoList, \"completed\"), \"ready_200\": self.__matchChannelStateDesc(keyInfoList, \"ready\"), \"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList,", "return s def __showDetailsBody(self, sessUUID = \"\", targConclusion = \"\"): s = self.getDetails(sessUUID,", "self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS) s += \"%-16s: %s\\n\" %", "if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 else: total", "conclusion # print \"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#, \"\\n\", keyInfoList, \"\\n\",detailsDict, \"\\n\" # 分析会话日志", "\"%-16s: %s\\n\" % (\"挂断原因\", res[1] if res else reason) else: res = self.__match(keyInfoList,", "\"send_bye\" SIGN_FLAG_CANCEL = 'cancel' SIGN_FLAG_R_INVITE = \"recv_invite\" # SIP信令 SIP_INVITE = 'INVITE' SIP_CANCEL", "res if res else (\"\",\"\",\"\",\"\",\"\",\"\") note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s = \"\" if mode", "%-36s %-30s %-7s %-s\\n\" % (callTime, sessUUID, callNumber or getColor(\"{0:<20}\".format(\"null\"), color='gray', need=show), conclusion,", "sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName)", "\"completed\"), \"ready_200\": self.__matchChannelStateDesc(keyInfoList, \"ready\"), \"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\") + \\", "'terminated' SIGN_FLAG_CALL = \"channel sm\" SIGN_CALL_HANGUP = 'HANGUP' SIGN_FLAG_CS = \"core sm\" SIGN_FLAG_RTP", "+ \"]\" + reason + \"}\" if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion", "= self.printProc(process, sessLen, widgetType = \"percent\") for f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag = False", "(x[0] >= f if f != -1 else True) and \\ (x[1] >=", "False) else: return UUID, None else: return [(UUID, sessDict[UUID].get(key, False)) for UUID in", "\"\", targConclusion = \"\", mode = \"normal\"): sessLogInfoDict = self.getSessLogInfoDict() if not sessLogInfoDict.get(sessUUID,", "[self.SIGN_FLAG_R_BYE]: return self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE elif flag in [self.SIGN_FLAG_CANCEL]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL", "SESS_FS_CALLNUMBER_DK = \"callNumber\" # 抽取出的关键信息分类 SIGN_FLAG_CHAN = \"chan proc\" SIGN_CHAN_CALLING = 'calling' SIGN_CHAN_PROCEDDING", "False else: return False def __matchCsStateChange(self, keyInfoList, fromState, toState): \"\"\"CS状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态", "logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList = sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0]) for f, logDict in fileList:", "return self.__sessLogInfoDict def getIgnoreLinesDict(self): \"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表: 无 返回值: 忽略的行字典 例如: {文件索引:{行数:日志}} 异常:", "if locIp and RmtIp: s += \"%-16s: %s:%s:%s -> %s:%s:%s (%s:%s %s:%s)\\n\" %", "最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表: 无 返回值: 成功解析的会话日志字典和无法解析的会话日志字典 dict,dict 异常: 无 \"\"\" ignoreLinesDict = {} sessLogInfoDict", "or self.caseMatch(detailsDict, case_ringing_183) or self.caseMatch(detailsDict, case_ringing_183_180) or \\ self.caseMatch(detailsDict, case_ringinged_180) or self.caseMatch(detailsDict, case_ringinged_183):", "None else: return [(UUID, sessDict[UUID].get(key, False)) for UUID in sessDict.keys()] def getLogDict(self, UUID", "for p in self.getPath()]) fileNames = sessUUID + callNumber + \"_tmp\" newPath =", "\"\", callNumber = \"\", conclusion = \"\"): return \"\" def getDetails(self, sessUUID =", "= \"\", sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return self.__outputDetails(outputPath,", "time.clock() s = \"OK (耗时:%.2f秒)\" % (time3 - time2) PRINT(s, color='green') s =", "(time2 - time1) PRINT(s, color='green') s = \"正在提取号码...\" PRINT(s, end='') self.__getCallNumber() time3 =", "s = \"\\n总数:%d\" % count PRINT(s) def __showResult(self, sessUUID = \"\", callNumber =", "path, rl) else: return super(FsLogAnalyzer, self).load(path, rl) def clear(self): \"\"\"清理FS的日志 参数列表: 无 返回值:", "\"\", callNumber = \"\", name = \"\"): return self.__outputOriginLog(outputPath, sessUUID = sessUUID, callNumber", "self.showNote(note)) s += \"\\n\" + \"{0:*^160}\".format(\" 消息交互详情 \") + \"\\n\\n\" s += \"%-4s", "# invite-> 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \" -> HANGUP\" +", "self.caseMatch(detailsDict, case_r_183): note += \" -> RINGING(183)\" note += \" -> HANGUP\" +", "(\"entering state \\[(.*)\\]\\[(.*)\\]\", 2, [], self.SIGN_FLAG_CHAN), # 收到消息类的日志 (\"Callstate Change (.*) -> (.*)\",", "None if mode in ['normal']: s += \"%-16s: %-s\\n\" % (\"结果\", conclusion) s", "class FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict = {}# 按照会话归类的日志信息 # {会话UUID:{log:{文件索引:{行数:日志}}, # callNumber:呼叫号码, # result:{分析结果}, #", "if param2 != \"\" else True) and \\ (x[0] >= f if f", "(.*) -> (.*)\", 2, [], self.SIGN_FLAG_CS), # 状态转移类的日志 (\"entering state \\[(.*)\\]\\[(.*)\\]\", 2, [],", "x[2] == flag and \\ ((len(x[3]) >= 1 and param1.strip() == x[3][0].strip()) if", "+= \"%-16s: %-s\\n\" % (\"UUID\", sessUUID) if numberFrom: s += \"%-16s: %-s\\n\" %", "conclusion = \"\"): return self.__outputDetails(outputPath, fileName = fileName, sessUUID = sessUUID, callNumber =", "ToModule, Sign) 异常: 无 \"\"\" if flag in [self.SIGN_FLAG_CHAN]: if context[0] in [self.SIGN_CHAN_CALLING]:", "% len(sessUUIDList)) # 显示呼叫号码列表 def showCallNumberList(self): # 呼叫号码 callNumberList = self.getCallNumberList() tmp =", "无 返回值: 成功解析的会话日志字典和无法解析的会话日志字典 dict,dict 异常: 无 \"\"\" ignoreLinesDict = {} sessLogInfoDict = {}", "[], self.SIGN_FLAG_CALL), # 呼叫状态类的日志 (\"receiving invite from (.*) version\", 1, [], self.SIGN_FLAG_R_INVITE), (\"AUDIO", "True) and \\ (x[0] >= f if f != -1 else True) and", "\"4xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"), } # 标志性处理类的状态 case_calling_invite", "= \"Normal\"): time1 = time.clock() s = \"正在收集会话信息...\" PRINT(s, end='') self.__sessCollect() time2 =", "conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if targConclusion.upper() in conclusion.upper(): color = conclusion.upper()", "sessLogInfoDict = self.getSessLogInfoDict() return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID in sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] # 显示UUID列表", "Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CS), # 状态转移类的日志 (\"entering state \\[(.*)\\]\\[(.*)\\]\", 2,", "case_answer_180) or self.caseMatch(detailsDict, case_answerd_180) or \\ self.caseMatch(detailsDict, case_answer_183) or self.caseMatch(detailsDict, case_answerd_183): note +=", "参数列表: UUID:会话的UUID 返回值: 呼叫号码 str 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK) def getResultDict(self,", "+ 1:-1] # 按照UUID归类存放日志信息 if sessUUID in sessLogInfoDict: if f not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]:", "[] for i, k in enumerate(keyInfoList): signTime = \"%s\" % self.getLogTime(logDict.get(k[0], {}).get(k[1], \"\"))", "# 输出分段,提示是否继续显示内容 continueRet, flag = self.inputContinue(i, count, total, flag, self.__showAnalysisResultHeader, conclusion) if not", "sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True break if flag: break # 没有找到号码,可能是日志文件的格式发生了变化 else:", "\"\\n\" + \"{0:*^160}\".format(\" 消息交互详情 \") + \"\\n\\n\" s += \"%-4s %-35s %-16s %-16s", "\"HANGUP\"), \"calling_0\": self.__matchChannelStateDesc(keyInfoList, \"calling\"), \"proceeding_180\": self.__matchChannelStateCode(keyInfoList, \"180\"), \"proceeding_183\": self.__matchChannelStateCode(keyInfoList, \"183\"), \"completing_200\": self.__matchChannelStateDesc(keyInfoList, \"completing\"),", "= {}# 按照会话归类的日志信息 # {会话UUID:{log:{文件索引:{行数:日志}}, # callNumber:呼叫号码, # result:{分析结果}, # keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} # }", "\"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表: 无 返回值: 忽略的行字典 例如: {文件索引:{行数:日志}} 异常: 无 \"\"\" return self.__ignoreLinesDict", "\"备注\") return s def __getOutputResultTail(self, warningCount, errorCount, okCount): s = \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" % (\"总计\",", "line.find(' ') line_len = len(line) # 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if pos is -1 or pos", "sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 else:", "{\"RINGING__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_183 = {\"EARLY__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_183 = {\"EARLY__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,}", "Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CALL), # 呼叫状态类的日志 (\"receiving invite from (.*)", "# -*- coding: utf-8 -*- import os import time import sys from datetime", "+= \"\\n\" + \"{0:*^160}\".format(\" 消息交互详情 \") + \"\\n\\n\" s += \"%-4s %-35s %-16s", "__matchCallStateChange(self, keyInfoList, fromState, toState): \"\"\"call状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常:", "keyInfoList, fromState, toState): \"\"\"call状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无", "= [] for x in keyInfoList: if x[2] == self.SIGN_FLAG_CHAN: reExpr = \"(\"", "-> TALKING\" # invite-> 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \" ->", "+ \"_tmp\" newPath = os.path.join(outputPath, orgLogFileNames) else: newPath = os.path.join(outputPath, fileName) # 创建新的目录", "not sessLogInfoDict.get(sessUUID, False): return \"\" conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if targConclusion.upper() not in conclusion.upper():", "for i, k in enumerate(keyInfoList): signTime = \"%s\" % self.getLogTime(logDict.get(k[0], {}).get(k[1], \"\")) res", "s += \"%-16s: %-s\\n\" % (\"消息流\", self.showNote(note)) s += \"\\n\" + \"{0:*^160}\".format(\" 消息交互详情", "pos = line.find(' ') line_len = len(line) # 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if pos is -1", "self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) return len(fileNameList), newPath, fileNameList def outputOriginLog(self, outputPath, sessUUID =", "0, 0, 0 # 输出到文件 if sessUUID: if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber", "\"\" if mode in ['normal']: s += \"-\" * 160 + \"\\n\" s", "= ignoreLinesDict for sessUUID in sessLogInfoDict.keys(): if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: print(sessUUID, \"\\nis not", "= sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 from base.base import PRINT,", "super(FsLogAnalyzer, self).load(path, rl) def clear(self): \"\"\"清理FS的日志 参数列表: 无 返回值: 成功标志和错误信息 元组(bool, str) 异常:", "无 \"\"\" sessDict = self.getSessLogInfoDict() if UUID: if sessDict.get(UUID, False): return UUID, sessDict[UUID].get(key,", "set(callNumberList) self.printList(tmp, 8, \"呼叫号码列表:\", \"总数:%d\" % len(tmp)) # 重复的呼叫号码 dupl = self.findDupl(callNumberList) len(dupl)", "\"EARLY\", \"RINGING\"), \"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"ACTIVE\"), \"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"ACTIVE\"), \"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList, \"DOWN\",", "= 'HANGUP' SIGN_FLAG_CS = \"core sm\" SIGN_FLAG_RTP = \"rtp\" SIGN_FLAG_CALLNUMBER = \"callnumber\" SIGN_FLAG_HANGUP", "logFileDict:logFileDict[0]) for f, logDict in fileList: logList = sorted(logDict.items(), key=lambda logDict:logDict[0]) for line,", "sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK]", "= self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) reason = res[1] if res else \"\"", "elif mod in [self.MATCH_MOD_EXTEND]: return (l[0][1][0], l[0][1][1], l[0][0]) if any(l) else False elif", "(\"Flipping CID from \\\"(.*)\\\" \\<(.*)\\> to \\\"(.*)\\\" \\<(.*)\\>\", 4, [], self.SIGN_FLAG_CALLNUMBER), # 呼叫号码", "sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c if callNumber else True): fileName = (callNumber or", "(sessUUID, context) in enumerate(sessList): # 若输入了callNumber if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else", "PRINT, INPUT, getColor if PY2: from analyzer import LogAnalyzer else: from analyzer.analyzer import", "return s def __showDetailsTail(self, count, sessUUID = \"\", callNumber = \"\", conclusion =", "self.reMatch(\"New Channel sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag =", "(\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") return s def __getOutputResultTail(self, warningCount, errorCount, okCount): s", "callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------输出简单分析结果到文件---------------------------------------------- def __getOutputResultHeader(self): s = \"%-30s", "% (\"消息流\", self.showNote(note)) s += \"\\n\" + \"{0:*^160}\".format(\" 消息交互详情 \") + \"\\n\\n\" s", "s += \"%-16s: %-s\\n\" % (\"呼叫开始时间\", callTime) s += \"%-16s: %-s\\n\" % (\"UUID\",", "__showDetailsHeader(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return \"\" def", "\"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion # print \"\\n\",", "in context\", sessLog, 2) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True break", "1, outputPath, [fileName] else: return 0, outputPath, [] else: return 0, outputPath, []", "self.caseMatch(detailsDict, case_hangup_180) or self.caseMatch(detailsDict, case_hangup_183): note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE)", "\"Not find the call number. UUID:%s\" % sessUUID pass else: pass # 会话关键信息收集", "sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 else:", "[] else: return 0, outputPath, [] # 输出简单分析结果到文件 def outputReslut(self, outputPath, sessUUID =", "note += \" -> TALKING\" # invite-> (183<- or 180<-) 200<- bye<-> if", "mod = self.MATCH_MOD_DETAILS) s += \"%-16s: %s\\n\" % (\"挂断原因\", res[1] if res else", "if f not in ignoreLinesDict: ignoreLinesDict[f] = {} else: ignoreLinesDict[f][i] = line continue", "self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod = self.MATCH_MOD_DETAILS) locIp, locPort, RmtIp, RmtPort, audioPayLoad, audioPTime = res", "= 'CANCEL' SIP_BYE = 'BYE' # 匹配模式 MATCH_MOD_NORMAL = \"normal\" MATCH_MOD_EXTEND = \"extend\"", "= self.getSessLogInfoDict() return sessLogInfoDict.keys() # 获取呼叫号码列表 def getCallNumberList(self): sessLogInfoDict = self.getSessLogInfoDict() return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]", "conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if targConclusion.upper() not in conclusion.upper(): return \"\" logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]", "sessUUID, targConclusion = \"\"): s, c = self.__getAnalysisResultBody(sessUUID, targConclusion) if s: PRINT(s) return", "# 匹配模式 MATCH_MOD_NORMAL = \"normal\" MATCH_MOD_EXTEND = \"extend\" MATCH_MOD_DETAILS = \"details\" # 输出文件", "== \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": # print(\"\\n\") # for k in keyInfoList: # print(k) conclusion =", "if not fileName: #orgLogFileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) orgLogFileNames = callNumber", "CS_EXECUTE 为核心层状态机迁移 -- CS类 Callstate Change ACTIVE -> HANGUP 为呼叫层状态机迁移 -- call类 entering", "{\"RINGING__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_180 = {\"RINGING__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_183 = {\"EARLY__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,}", "= res if res else (\"\",\"\",\"\",\"\",\"\",\"\") note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s = \"\" if", "datetime import datetime PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3", "desc) # 分析会话过程 def __sessAnalysis(self): \"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表: 无 返回值: 无 异常: 无", "异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2 = code) def __matchChannelStateDesc(self, keyInfoList, desc):", "-> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (183<- or", "callTime) s += \"%-16s: %-s\\n\" % (\"UUID\", sessUUID) if numberFrom: s += \"%-16s:", "targConclusion) if s: PRINT(s) return s def __showDetailsTail(self, count, sessUUID = \"\", callNumber", "\"proceeding_183\": self.__matchChannelStateCode(keyInfoList, \"183\"), \"completing_200\": self.__matchChannelStateDesc(keyInfoList, \"completing\"), \"completed_200\": self.__matchChannelStateDesc(keyInfoList, \"completed\"), \"ready_200\": self.__matchChannelStateDesc(keyInfoList, \"ready\"), \"terminated_list\":", "if PY2: from analyzer import LogAnalyzer else: from analyzer.analyzer import LogAnalyzer # FS日志分析器", "SIGN_FLAG_RTP = \"rtp\" SIGN_FLAG_CALLNUMBER = \"callnumber\" SIGN_FLAG_HANGUP = \"hangup_reason\" SIGN_FLAG_R_BYE = 'recv_bye' SIGN_FLAG_S_BYE", "if res else (\"\",\"\",\"\",\"\",\"\",\"\") note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s = \"\" if mode in", "is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\", \\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{},", "run(self, mode = \"Normal\"): time1 = time.clock() s = \"正在收集会话信息...\" PRINT(s, end='') self.__sessCollect()", "c if callNumber else True): fileName = (callNumber or c) + \"__\" +", "self.SIP_CANCEL elif flag in [self.SIGN_FLAG_S_BYE]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE else: pass return '',", "+ \"\\n\" s += \"%s \\n\" % getColor(\"{0:^40}\".format(\"时差:\" + str((signTimeThis - signTimePrev).seconds) +", "\"\\n\" return s def __showDetailsBody(self, sessUUID = \"\", targConclusion = \"\"): s =", "# 显示头 self.__showAnalysisResultHeader(conclusion) # 显示Body count = 0 if sessUUID: # 若输入了callNumber则认为需要过滤 if", "keyInfoList, desc): \"\"\"通道状态描述匹配 匹配状态描述 参数列表: keyInfoList:关键信息列表 desc:描述 返回值: 成功或失败 bool 异常: 无 \"\"\"", "= \"rtp\" SIGN_FLAG_CALLNUMBER = \"callnumber\" SIGN_FLAG_HANGUP = \"hangup_reason\" SIGN_FLAG_R_BYE = 'recv_bye' SIGN_FLAG_S_BYE =", "mod = self.MATCH_MOD_DETAILS) reason = res[1] if res else \"\" if reason: res", "+ \"__\" + sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS if self.outputEx(newPath, newFileName,", "note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") else:", "{'conclusion':\"\", 'details':{}, 'note':\"\"} 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_RESULT_DK) def getkeyInfoList(self, UUID =", "\"%-16s: %-s\\n\" % (\"消息流\", self.showNote(note)) s += \"\\n\" + \"{0:*^160}\".format(\" 消息交互详情 \") +", "-> (.*)\", 2, [], self.SIGN_FLAG_CALL), # 呼叫状态类的日志 (\"receiving invite from (.*) version\", 1,", "# 默认按照此行日志取号码 res = self.reMatch(\"New Channel sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]", "self.MOD_FS, context[1] elif flag in [self.SIGN_FLAG_R_BYE]: return self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE elif flag in", "+ \"\\n\" s += \"\\n\" + \"{0:*^160}\".format(\" 基本信息 \") + \"\\n\\n\" s +=", "\"completed_200\":True, \"ready_200\":True,} case_hangup_invite = {\"DOWN__HANGUP\":True,} case_hangup_180 = {\"RINGING__HANGUP\":True,} case_hangup_183 = {\"EARLY__HANGUP\":True,} case_hangup_acitve =", "s += \"%02d. %-35s %-16s %-16s %s\\n\" % (i + 1, signTime, str(k[1]),", "if res else (\"\",\"\",\"\",\"\") callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], \"\")) res =", "callNumber, conclusion = conclusion, fileName = fileName) # ----------------------------------------------输出原始日志到文件---------------------------------------------- def __getOutputHeader(self, logDict, callNumber,", "st + \"]\" + reason + \"}\" if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]:", "-> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") else: # invite-> (183<-", "\"%-16s: %-s\\n\" % (\"显示号码\", numberFrom) s += \"%-16s: %-s\\n\" % (\"呼叫号码\", numberTo or", "self.caseMatch(detailsDict, case_hangup_acitve): note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or", "in [self.SIGN_FLAG_R_BYE]: return self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE elif flag in [self.SIGN_FLAG_CANCEL]: return self.MOD_FS, self.MOD_OUTSIDE,", "\"OK (耗时:%.2f秒)\" % (time3 - time2) PRINT(s, color='green') s = \"正在分析会话过程...\" PRINT(s, end='')", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" +", "os import time import sys from datetime import datetime PY2 = sys.version_info[0] ==", "-> HANGUP 为呼叫层状态机迁移 -- call类 entering state [proceeding][180] 为收响应消息的处理 -- channel类 AUDIO RTP", "port (\\d+) codec: (\\d+) ms: (\\d+)\", 7, [0], self.SIGN_FLAG_RTP), # RTP通道信息 (\"Flipping CID", "\"-\" * 160 + \"\\n\" s += \"\\n\" + \"{0:*^160}\".format(\" 基本信息 \") +", "self.MATCH_MOD_DETAILS) reason = res[1] if res else \"\" if reason: res = self.__match(keyInfoList,", "return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID in sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] # 显示UUID列表 def showSessUUIDList(self): sessUUIDList", "(\"消息流\", self.showNote(note)) s += \"\\n\" + \"{0:*^160}\".format(\" 消息交互详情 \") + \"\\n\\n\" s +=", "return 0, outputPath, [] # 输出简单分析结果到文件 def outputReslut(self, outputPath, sessUUID = \"\", callNumber", "__getAnalysisResultBody(self, sessUUID, targConclusion = \"\", show = True): sessLogInfoDict = self.getSessLogInfoDict() s =", "无 \"\"\" return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK) def getResultDict(self, UUID = \"\"): \"\"\"获取结果字典 参数列表: UUID:会话的UUID", "\"\", key = \"\"): \"\"\"清理FS的日志 参数列表: UUID:会话的UUID key:内部的字典名 返回值: 成功标志和错误信息 元组(bool, str) 异常:", "= fileName) # ----------------------------------------------输出原始日志到文件---------------------------------------------- def __getOutputHeader(self, logDict, callNumber, sessUUID): s = \"呼叫号码:%s\\nUUID:%s\\n\" %", "str 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK) def getResultDict(self, UUID = \"\"): \"\"\"获取结果字典", "def __sessKeyInfoCollect(self): \"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如: State Change CS_CONSUME_MEDIA -> CS_EXECUTE 为核心层状态机迁移 -- CS类", "= \"\"): s, c = self.__getAnalysisResultBody(sessUUID, targConclusion) if s: PRINT(s) return s def", "line[pos + 1:-1] # 按照UUID归类存放日志信息 if sessUUID in sessLogInfoDict: if f not in", "locIp and RmtIp: s += \"%-16s: %s:%s:%s -> %s:%s:%s (%s:%s %s:%s)\\n\" % (\"媒体信息\",", "+ self.OUTPUT_POSTFIX_LOG) if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) # 不存在UUID(可能输出多个文件) else:", "CS类 Callstate Change ACTIVE -> HANGUP 为呼叫层状态机迁移 -- call类 entering state [proceeding][180] 为收响应消息的处理", "fileNameList.append(fileName) # 不存在UUID(可能输出多个文件) else: # 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if not name: #fileNames = \"_\".join([os.path.split(p)[-1] for", "and codeList.append(x[3]) return codeList def __matchChannelStateCode(self, keyInfoList, code): \"\"\"通道状态码匹配 精确匹配状态码 参数列表: keyInfoList:关键信息列表 code:状态码", "返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 需要匹配的正则表达式 reExpInfo = [", "sessLogInfoDict = self.getSessLogInfoDict() # 确定新的目录,以源日志文件名作为目录名 if not fileName: #fileNames = \"_\".join([os.path.split(p)[-1] for p", "self.SIGN_FLAG_CS, fromState, toState) def __matchCallStateChange(self, keyInfoList, fromState, toState): \"\"\"call状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态", "= self.reMatch(reExpr, x[3][1], 1) res and codeList.append(x[3]) return codeList def __matchChannelStateCode(self, keyInfoList, code):", "len(sessLogInfoDict) flag = False sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context)", "newPath = outputPath fileNameList = [] # 输出的文件列表 # 如果存在UUID(只输出一个文件) if sessUUID: if", "\"percent\", begin=0, end=50) keyInfoList = [] logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList = sorted(logFileDict.items(), key=lambda", "sessLogInfoDict = self.getSessLogInfoDict() s = \"\" conclusion = \"\" if sessLogInfoDict.get(sessUUID, False): logDict", "if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 # 输出分段,提示是否继续显示内容", "(callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count +=", "\"rtp\" SIGN_FLAG_CALLNUMBER = \"callnumber\" SIGN_FLAG_HANGUP = \"hangup_reason\" SIGN_FLAG_R_BYE = 'recv_bye' SIGN_FLAG_S_BYE = \"send_bye\"", "sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#, \"\\n\", keyInfoList, \"\\n\",detailsDict, \"\\n\" # 分析会话日志 def __analysis(self): self.__sessKeyInfoCollect() self.__sessAnalysis() #", "RTP信息类 Hangup sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] -- 挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表: 无 返回值: 无 异常:", "res else \"\" if reason: res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED, mod", "UUID:会话的UUID 返回值: 日志字典 参照__sessLogInfoDict定义 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_LOG_DK) def getCallNumber(self, UUID", "\"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表: keyInfoList:关键信息列表 fuzzyCode:模糊状态码 返回值: 匹配到的值 异常: 无 \"\"\" codeList = []", "__outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\", name = \"\"): sessLogInfoDict =", "呼叫号码 callNumberList = self.getCallNumberList() tmp = set(callNumberList) self.printList(tmp, 8, \"呼叫号码列表:\", \"总数:%d\" % len(tmp))", "\"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showDetailsHeader() # 显示Body count = 0 if", "'red' or \\ conclusion.upper() in ['WARNING'] and 'yellow' or \\ conclusion.upper() in ['OK']", "!= \"\" else True) and \\ (x[0] >= f if f != -1", "Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_R_BYE), (\"Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0],", "fileList: logList = sorted(logDict.items(), key=lambda logDict:logDict[0]) for line, log in logList: for reExpr,", "1 elif s and c.upper() in ['OK']: okCount += 1 if context: context", "def getCallNumber(self, UUID = \"\"): \"\"\"获取呼叫号码 参数列表: UUID:会话的UUID 返回值: 呼叫号码 str 异常: 无", "+ self.OUTPUT_POSTFIX_DETAILS) if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) else: if not callNumber: #", "# 输出文件 OUTPUT_POSTFIX_LOG = \".log\" OUTPUT_POSTFIX_RESULT = \".result\" OUTPUT_POSTFIX_DETAILS = \".details\" def __init__(self):", "\"\\n\\n\" s += \"%-16s: %-s\\n\" % (\"呼叫开始时间\", callTime) s += \"%-16s: %-s\\n\" %", "# 分析会话日志 def __analysis(self): self.__sessKeyInfoCollect() self.__sessAnalysis() # 运行 def run(self, mode = \"Normal\"):", "\"\" def getDetails(self, sessUUID = \"\", targConclusion = \"\", mode = \"normal\"): sessLogInfoDict", "in sessLogInfoDict.keys(): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber ==", "% (callTime, sessUUID, callNumber or getColor(\"{0:<20}\".format(\"null\"), color='gray', need=show), conclusion, note) return s, conclusion", "targConclusion=\"\"): s = \"\\n总数:%d\" % count PRINT(s) def __showResult(self, sessUUID = \"\", callNumber", "flag = True # 若有号码变换,需要取变换的号码 res = self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*) Action transfer\\((\\d*) XML default\\)\",", "in sessLogInfoDict: if f not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog} if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is", "time4 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time4 - time3) PRINT(s, color='green')", "coding: utf-8 -*- import os import time import sys from datetime import datetime", "函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表: 无 返回值: 成功解析的会话日志字典和无法解析的会话日志字典 dict,dict 异常: 无 \"\"\" ignoreLinesDict = {}", "sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if targConclusion.upper() not in conclusion.upper(): return \"\" logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList =", "\"\"): sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath fileNameList = [] # 输出的文件列表 #", "-> RINGING(183)\" note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or", "= getColor(\"{0:<7}\".format(conclusion), color = color) s += \"%-30s %-36s %-30s %-7s %-s\\n\" %", "(\"\",\"\",\"\",\"\",\"\",\"\") note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s = \"\" if mode in ['normal']: s +=", "getColor(\"{0:^40}\".format(\"时差:\" + str((signTimeThis - signTimePrev).seconds) + \"s\", color=\"red\", need=True)) s += \"{0:^40}\".format(\" ↓", "sessUUID = \"\", callNumber = \"\", conclusion = \"\", fileName = \"\"): return", "context context += self.__getOutputResultTail(warningCount, errorCount, okCount) if self.outputEx(outputPath, fileName, context): return 1, outputPath,", "from (.*) version\", 1, [], self.SIGN_FLAG_R_INVITE), (\"AUDIO RTP \\[(.*)\\] (.*) port (\\d+) ->", "name: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames = sessUUID + callNumber", "(\"呼叫开始时间\", callTime) s += \"%-16s: %-s\\n\" % (\"UUID\", sessUUID) if numberFrom: s +=", "count PRINT(s) return s def __showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion", "code) def __matchChannelStateDesc(self, keyInfoList, desc): \"\"\"通道状态描述匹配 匹配状态描述 参数列表: keyInfoList:关键信息列表 desc:描述 返回值: 成功或失败 bool", "in enumerate(lines): # 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080 Standard INIT # 找到第一个空格,左边就是会话ID,右边就是日志信息", "keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} # } __ignoreLinesDict = {}# 忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS = 'fs' # 会话类字典key SESS_FS_CALLNUMBER_DK", "def getSessInfo(self, UUID = \"\", key = \"\"): \"\"\"清理FS的日志 参数列表: UUID:会话的UUID key:内部的字典名 返回值:", "(time3 - time2) PRINT(s, color='green') s = \"正在分析会话过程...\" PRINT(s, end='') self.__analysis() time4 =", "in [self.SIGN_FLAG_CHAN]: if context[0] in [self.SIGN_CHAN_CALLING]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE elif context[0] in", "((len(x[3]) >= 2 and param2.strip() == x[3][1].strip()) if param2 != \"\" else True)", "if s: PRINT(s) return s def __showAnalysisResultTail(self, count, targConclusion=\"\"): s = \"\\n总数:%d\" %", "\"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList, \"CS_INIT\", \"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\", \"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"), \"DOWN__RINGING\":", "= 'calling' SIGN_CHAN_PROCEDDING = 'proceeding' SIGN_CHAN_COMPLETE = 'completing' SIGN_CHAN_TERMINATED = 'terminated' SIGN_FLAG_CALL =", "count PRINT(s) def __showResult(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"):", "\"chan proc\" SIGN_CHAN_CALLING = 'calling' SIGN_CHAN_PROCEDDING = 'proceeding' SIGN_CHAN_COMPLETE = 'completing' SIGN_CHAN_TERMINATED =", "logDict:logDict[0]) for line, log in logList: for reExpr, expLen, dropPos, flag in reExpInfo:", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog} if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] =", "callNumber, conclusion = conclusion) # ----------------------------------------------输出简单分析结果到文件---------------------------------------------- def __getOutputResultHeader(self): s = \"%-30s %-36s %-30s", "['normal']: s += \"%-16s: %-s\\n\" % (\"结果\", conclusion) s += \"%-16s: %-s\\n\" %", "True): s, c = self.__getAnalysisResultBody(sessUUID, conclusion, show=False) context += s if s and", "s if s and c.upper() in ['ERROR']: errorCount += 1 elif s and", "len(fileNameList), newPath, fileNameList def outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\", name", "begin=50, end=100) keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if sessUUID == \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": # print(\"\\n\") # for", "self.SIP_INVITE elif context[0] in [self.SIGN_CHAN_PROCEDDING]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_COMPLETE]:", "as Err: s = str(Err, reExpInfo[i], res) PRINT(s) raise res = tuple(l) keyInfoList.append((f,", "= \"send_bye\" SIGN_FLAG_CANCEL = 'cancel' SIGN_FLAG_R_INVITE = \"recv_invite\" # SIP信令 SIP_INVITE = 'INVITE'", "(callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s, c = self.__getAnalysisResultBody(sessUUID, conclusion, show=False)", "#if sessUUID == \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": # print(\"\\n\") # for k in keyInfoList: # print(k)", "reason = res[1] if res else \"\" if reason: res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN,", "__showAnalysisResultBody(self, sessUUID, targConclusion = \"\"): s, c = self.__getAnalysisResultBody(sessUUID, targConclusion) if s: PRINT(s)", "sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\", begin=0, end=50) keyInfoList", "count, targConclusion=\"\"): s = \"\\n总数:%d\" % count PRINT(s) def __showResult(self, sessUUID = \"\",", "def __getAnalysisResultBody(self, sessUUID, targConclusion = \"\", show = True): sessLogInfoDict = self.getSessLogInfoDict() s", "or 错误应答<-) elif self.caseMatch(detailsDict, case_hangup_invite): if self.caseMatch(detailsDict, case_r_183): note += \" -> RINGING(183)\"", "self.SIGN_FLAG_R_INVITE) and \"(R)\" or \"(S)\") # invite-> 200<- if self.caseMatch(detailsDict, case_answer_invite): note +=", "CANCEL to(.*)\", 1, [0], self.SIGN_FLAG_CANCEL), ] sessLen = len(sessLogInfoDict) process = 0 for", "getCallNumber(self, UUID = \"\"): \"\"\"获取呼叫号码 参数列表: UUID:会话的UUID 返回值: 呼叫号码 str 异常: 无 \"\"\"", "c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c,", "\\ self.caseMatch(detailsDict, case_answer_183) or self.caseMatch(detailsDict, case_answerd_183): note += \" -> TALKING\" # invite->", "len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType", "精确匹配状态码 参数列表: keyInfoList:关键信息列表 code:状态码 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN,", "= sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if targConclusion.upper() not in conclusion.upper(): return \"\" logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList", "= \"ERROR\" else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) st, reason =", "!= -1 else True) and \\ (x[1] >= l if l != -1", "= self.getSessLogInfoDict() if UUID: if sessDict.get(UUID, False): return UUID, sessDict[UUID].get(key, False) else: return", "in [self.MATCH_MOD_EXTEND]: return (l[0][1][0], l[0][1][1], l[0][0]) if any(l) else False elif mod in", "+ (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and \"(R)\" or \"(S)\") # invite-> 200<- if self.caseMatch(detailsDict, case_answer_invite):", "else: return 0, outputPath, [] # 输出简单分析结果到文件 def outputReslut(self, outputPath, sessUUID = \"\",", "self.__ignoreLinesDict = {} return super(FsLogAnalyzer, self).clear() def getSessInfo(self, UUID = \"\", key =", "\"\\n总数:%d\" % count PRINT(s) def __showResult(self, sessUUID = \"\", callNumber = \"\", conclusion", "note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s = \"\" if mode in ['normal']: s += \"-\"", "flag = False sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in", "= -1, l = -1, mod=\"normal\"): l = [(i, x) for i, x", "else: self.__sessLogInfoDict = sessLogInfoDict self.__ignoreLinesDict = ignoreLinesDict for sessUUID in sessLogInfoDict.keys(): if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK]", "RINGING(183)\" note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\")", "\"ptime\", audioPTime) res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) reason = res[1] if", "[] # 输出的文件列表 # 如果存在UUID(只输出一个文件) if sessUUID: if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]", "(callNumber or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG if self.output(logDict, newPath, fileName,", "signTime = \"%s\" % self.getLogTime(logDict.get(k[0], {}).get(k[1], \"\")) res = self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime, 7)", "\\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"}, \\ self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else: self.__sessLogInfoDict = sessLogInfoDict self.__ignoreLinesDict =", "process = self.printProc(process, sessLen, widgetType = \"percent\") for f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag =", "not fileName: #orgLogFileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) orgLogFileNames = callNumber +", "case_answerd_183): note += \" -> TALKING\" # invite-> (183<- or 180<-) 200<- bye<->", "\\ conclusion.upper() in ['OK'] and 'green' conclusion = getColor(\"{0:<7}\".format(conclusion), color = color) s", "[sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID in sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] # 显示UUID列表 def showSessUUIDList(self): sessUUIDList =", "= sessUUID, callNumber = callNumber, conclusion = conclusion, fileName = fileName) # ----------------------------------------------输出原始日志到文件----------------------------------------------", "按照UUID归类存放日志信息 if sessUUID in sessLogInfoDict: if f not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog}", "conclusion = \"ERROR\" if detailsDict[\"terminated_list\"]: conclusion = \"ERROR\" note += \"(recv %s)\" %", "time1) PRINT(s, color='green') s = \"正在提取号码...\" PRINT(s, end='') self.__getCallNumber() time3 = time.clock() s", "context = \"\" warningCount, errorCount, okCount = 0, 0, 0 # 输出到文件 if", "= self.getSessLogInfoDict() newPath = outputPath if sessUUID: sessDict = sessLogInfoDict.get(sessUUID, False) if not", "== context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1", "无 \"\"\" return self.getSessInfo(UUID, self.SESS_LOG_DK) def getCallNumber(self, UUID = \"\"): \"\"\"获取呼叫号码 参数列表: UUID:会话的UUID", "%-35s %-16s %-16s %s\\n\\n\" % (\"序号\",\"信令时间\", \"源日志行号\", \"消息类型\", \"详情\") l = [] for", "self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS) s += \"%-16s: %s\\n\" % (\"挂断原因\", res[1] if res", "sessUUID in sessLogInfoDict.keys(): sessDict = sessLogInfoDict[sessUUID] if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else", "= [] for i, k in enumerate(keyInfoList): signTime = \"%s\" % self.getLogTime(logDict.get(k[0], {}).get(k[1],", "= sorted(logDict.items(), key=lambda logDict:logDict[0]) for line, log in logList: for reExpr, expLen, dropPos,", "\"\", conclusion = \"\"): return self.__outputDetails(outputPath, fileName = fileName, sessUUID = sessUUID, callNumber", "= datetime(int(res[0]), int(res[1]), int(res[2]), int(res[3]), int(res[4]), int(res[5])) if signTimePrev and (signTimeThis - signTimePrev).seconds", "UUID = \"\", key = \"\"): \"\"\"清理FS的日志 参数列表: UUID:会话的UUID key:内部的字典名 返回值: 成功标志和错误信息 元组(bool,", "reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE,", "\"UUID列表:\", \"总数:%d\" % len(sessUUIDList)) # 显示呼叫号码列表 def showCallNumberList(self): # 呼叫号码 callNumberList = self.getCallNumberList()", "self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) # 不存在UUID(可能输出多个文件) else: # 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if", "抽取出的关键信息分类 SIGN_FLAG_CHAN = \"chan proc\" SIGN_CHAN_CALLING = 'calling' SIGN_CHAN_PROCEDDING = 'proceeding' SIGN_CHAN_COMPLETE =", "self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) else: if not callNumber: # 确定新的目录,以源日志文件名作为目录名 if not", "return \"\" def getDetails(self, sessUUID = \"\", targConclusion = \"\", mode = \"normal\"):", "= \"\", conclusion = \"\", fileName = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 确定新的目录,以源日志文件名作为目录名", "= sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber if", "# 收到消息类的日志 (\"Callstate Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CALL), # 呼叫状态类的日志 (\"receiving", "(\\d+)\", 7, [0], self.SIGN_FLAG_RTP), # RTP通道信息 (\"Flipping CID from \\\"(.*)\\\" \\<(.*)\\> to \\\"(.*)\\\"", "if any(l) else False else: return False def __matchCsStateChange(self, keyInfoList, fromState, toState): \"\"\"CS状态变迁匹配", "rl) else: return super(FsLogAnalyzer, self).load(path, rl) def clear(self): \"\"\"清理FS的日志 参数列表: 无 返回值: 成功标志和错误信息", "is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None:", "self.SIGN_FLAG_R_INVITE), (\"AUDIO RTP \\[(.*)\\] (.*) port (\\d+) -> (.*) port (\\d+) codec: (\\d+)", "RTP通道信息 (\"Flipping CID from \\\"(.*)\\\" \\<(.*)\\> to \\\"(.*)\\\" \\<(.*)\\>\", 4, [], self.SIGN_FLAG_CALLNUMBER), #", "(.*) version\", 1, [], self.SIGN_FLAG_R_INVITE), (\"AUDIO RTP \\[(.*)\\] (.*) port (\\d+) -> (.*)", "= sessUUID, callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------显示分析结果---------------------------------------------- def __showAnalysisResultHeader(self, targConclusion=\"\"):", "reExpInfo[i], res) PRINT(s) raise res = tuple(l) keyInfoList.append((f, line, flag, res)) break else:", "%-16s %-16s %s\\n\" % (i + 1, signTime, str(k[1]), str(k[2]), str(k[3])) else: s", "self.__showDetailsHeader) if not continueRet: break # 显示尾 self.__showDetailsTail(count) return count # 按照UUID搜索日志,并显示详细分析信息 def", "super(FsLogAnalyzer, self).clear() def getSessInfo(self, UUID = \"\", key = \"\"): \"\"\"清理FS的日志 参数列表: UUID:会话的UUID", "widgetType = \"percent\", begin=0, end=50) keyInfoList = [] logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList =", "显示尾 self.__showAnalysisResultTail(count, conclusion) return count def showResult(self, sessUUID = \"\", callNumber = \"\",", "% len(dupl)) # ----------------------------------------------显示详细分析结果---------------------------------------------- def __showDetailsHeader(self, sessUUID = \"\", callNumber = \"\", conclusion", "%-s\\n\" % (\"显示号码\", numberFrom) s += \"%-16s: %-s\\n\" % (\"呼叫号码\", numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK])", "s = \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" % (\"总计\", errorCount + okCount + warningCount, \"告警\", warningCount, \"失败\",", "+ targConclusion + self.OUTPUT_POSTFIX_DETAILS) if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) else: if not", "sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True # 若有号码变换,需要取变换的号码 res = self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*) Action", "= \"呼叫号码:%s\\nUUID:%s\\n\" % (callNumber, sessUUID) return s def __outputOriginLog(self, outputPath, sessUUID = \"\",", "sessUUID, callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------显示分析结果---------------------------------------------- def __showAnalysisResultHeader(self, targConclusion=\"\"): s", "return self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE elif flag in [self.SIGN_FLAG_CANCEL]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL elif", "l[0][0]) if any(l) else False elif mod in [self.MATCH_MOD_DETAILS]: return l[0][1][3] if any(l)", "+ \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG) if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)):", "== context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1", "context += s if s and c.upper() in ['ERROR']: errorCount += 1 elif", "fromState, toState) def __matchCallStateChange(self, keyInfoList, fromState, toState): \"\"\"call状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值:", "sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] =", "- time3) PRINT(s, color='green') return True, \"\" # 获取UUID列表 def getSessUUIDList(self): sessLogInfoDict =", "\\ self.__showDetailsBody(sessUUID, conclusion): count += 1 else: total = len(sessLogInfoDict) flag = False", "异常: 无 \"\"\" codeList = [] for x in keyInfoList: if x[2] ==", "f, logDict in fileList: logList = sorted(logDict.items(), key=lambda logDict:logDict[0]) for line, log in", "{ \"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList, \"CS_NEW\", \"CS_INIT\"), \"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList, \"CS_INIT\", \"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\", \"CS_CONSUME_MEDIA\"),", "sys from datetime import datetime PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0]", "sessUUID + callNumber + \"_tmp\" newPath = os.path.join(outputPath, fileNames) else: newPath = os.path.join(outputPath,", "flag = True break res = self.reMatch(\"<(\\d*)>->(\\d*) in context\", sessLog, 2) if res:", "sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\", begin=50, end=100) keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK]", "-1, l = -1, mod=\"normal\"): l = [(i, x) for i, x in", "logList = sorted(logDict.items(), key=lambda logDict:logDict[0]) for line, log in logList: for reExpr, expLen,", "['normal']: s += \"-\" * 160 + \"\\n\" s += \"\\n\" + \"{0:*^160}\".format(\"", "\"\" detailsDict = { \"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList, \"CS_NEW\", \"CS_INIT\"), \"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList, \"CS_INIT\", \"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\":", "异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK) def getResultDict(self, UUID = \"\"): \"\"\"获取结果字典 参数列表:", "self.MATCH_MOD_DETAILS) st, reason = res if res else (\"\", \"\") if reason: note", "[\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS)", "') line_len = len(line) # 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if pos is -1 or pos <", "* 160 + \"\\n\" s += \"\\n总数:%d\" % count PRINT(s) return s def", "sessUUID in sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] # 显示UUID列表 def showSessUUIDList(self): sessUUIDList = self.getSessUUIDList() self.printList(sessUUIDList,", "\"\"\" return self.getSessInfo(UUID, self.SESS_LOG_DK) def getCallNumber(self, UUID = \"\"): \"\"\"获取呼叫号码 参数列表: UUID:会话的UUID 返回值:", "str) 异常: 无 \"\"\" sessDict = self.getSessLogInfoDict() if UUID: if sessDict.get(UUID, False): return", "if sessUUID in sessLogInfoDict: if f not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog} if", "def getIgnoreLinesDict(self): \"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表: 无 返回值: 忽略的行字典 例如: {文件索引:{行数:日志}} 异常: 无 \"\"\"", "logList: for reExpr, expLen, dropPos, flag in reExpInfo: res = self.reMatch(reExpr, log, expLen)", "sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber或UUID则认为需要过滤", "2) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True break if flag: break", "__showResult(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict()", "self.inputContinue(i, count, total, flag, self.__showDetailsHeader) if not continueRet: break # 显示尾 self.__showDetailsTail(count) return", "== callNumber if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = fileName or", "conclusion = \"\" note = \"\" detailsDict = { \"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList, \"CS_NEW\", \"CS_INIT\"),", "# 显示Body count = 0 if sessUUID: # 若输入了callNumber则认为需要过滤 if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]", "PY2: from analyzer import LogAnalyzer else: from analyzer.analyzer import LogAnalyzer # FS日志分析器 class", "def __fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode): \"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表: keyInfoList:关键信息列表 fuzzyCode:模糊状态码 返回值: 匹配到的值 异常: 无", "\"\", sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return self.__outputDetails(outputPath, fileName", "if sessUUID: sessDict = sessLogInfoDict.get(sessUUID, False) if not sessDict: return len(fileNameList), newPath, fileNameList", "= \"\", callNumber = \"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头", "if sessUUID: # 若输入了callNumber if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and", "and 'red' or \\ conclusion.upper() in ['WARNING'] and 'yellow' or \\ conclusion.upper() in", "fileName = fileName) # ----------------------------------------------输出原始日志到文件---------------------------------------------- def __getOutputHeader(self, logDict, callNumber, sessUUID): s = \"呼叫号码:%s\\nUUID:%s\\n\"", "返回值: 匹配到的值 异常: 无 \"\"\" codeList = [] for x in keyInfoList: if", "= \"normal\" MATCH_MOD_EXTEND = \"extend\" MATCH_MOD_DETAILS = \"details\" # 输出文件 OUTPUT_POSTFIX_LOG = \".log\"", "in self.getPath()]) fileNames = sessUUID + callNumber + conclusion + \"_tmp\" fileName =", "% detailsDict[\"terminated_list\"][0][1] else: conclusion = \"WARNING\" note += \"[NOT COMPLETE\" note += \"]\"", "\"\", callNumber = \"\", conclusion = \"\"): return self.__outputDetails(outputPath, fileName = fileName, sessUUID", "[] for x in keyInfoList: if x[2] == self.SIGN_FLAG_CHAN: reExpr = \"(\" +", "\"RINGING\", \"ACTIVE\"), \"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"HANGUP\"), \"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"HANGUP\"), \"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList, \"RINGING\",", "self.caseMatch(detailsDict, case_ringing_183) or self.caseMatch(detailsDict, case_ringing_183_180) or \\ self.caseMatch(detailsDict, case_ringinged_180) or self.caseMatch(detailsDict, case_ringinged_183): note", "for sessUUID in sessLogInfoDict.keys(): if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: print(sessUUID, \"\\nis not get time\")", "audioPTime = res if res else (\"\",\"\",\"\",\"\",\"\",\"\") note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s = \"\"", "and \"(S)\" or \"(R)\") # invite-> (183<- or 180<-) 错误应答<- elif self.caseMatch(detailsDict, case_hangup_180)", "len(dupl) and self.printList(dupl, 8, \"重复的号码:\", \"总数:%d\" % len(dupl)) # ----------------------------------------------显示详细分析结果---------------------------------------------- def __showDetailsHeader(self, sessUUID", "无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() sessLen = len(sessLogInfoDict) process = 0", "((len(x[3]) >= 1 and param1.strip() == x[3][0].strip()) if param1 != \"\" else True)", "sessLogInfoDict = self.getSessLogInfoDict() return sessLogInfoDict.keys() # 获取呼叫号码列表 def getCallNumberList(self): sessLogInfoDict = self.getSessLogInfoDict() return", "运行 def run(self, mode = \"Normal\"): time1 = time.clock() s = \"正在收集会话信息...\" PRINT(s,", "\"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if", "4: if f not in ignoreLinesDict: ignoreLinesDict[f] = {} else: ignoreLinesDict[f][i] = line", "print(\"\\n\") # for k in keyInfoList: # print(k) conclusion = \"\" note =", "if flag in [self.SIGN_FLAG_CHAN]: if context[0] in [self.SIGN_CHAN_CALLING]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE elif", "[(文件索引,行数,状态类型,(信息)),] 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_KEYINFO_DK) def getSignInfo(self, flag, context): \"\"\"信令的收发方向(用于上层显示输出) 参数列表:", "0 if sessUUID: # 若输入了callNumber则认为需要过滤 if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True)", "\"completing\"), \"completed_200\": self.__matchChannelStateDesc(keyInfoList, \"completed\"), \"ready_200\": self.__matchChannelStateDesc(keyInfoList, \"ready\"), \"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList,", "case_answer_invite): note += \" -> TALKING\" # invite-> 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve):", "为收响应消息的处理 -- channel类 AUDIO RTP [sofia/external/6797@10.0.7.152:5080] 10.0.7.176 port 24776 -> 192.168.0.178 port 7076", "showSessUUIDList(self): sessUUIDList = self.getSessUUIDList() self.printList(sessUUIDList, 4, \"UUID列表:\", \"总数:%d\" % len(sessUUIDList)) # 显示呼叫号码列表 def", "例如: State Change CS_CONSUME_MEDIA -> CS_EXECUTE 为核心层状态机迁移 -- CS类 Callstate Change ACTIVE ->", "and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 continueRet, flag = self.inputContinue(i, count, total,", "if (callNumber == c if callNumber else True): fileName = (callNumber or c)", "异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_RESULT_DK) def getkeyInfoList(self, UUID = \"\"): \"\"\"获取关键信息列表 参数列表:", "{} else: ignoreLinesDict[f][i] = line continue # 拆分出UUID和日志信息 sessUUID, sessLog = line[0:pos], line[pos", "-- CS类 Callstate Change ACTIVE -> HANGUP 为呼叫层状态机迁移 -- call类 entering state [proceeding][180]", "sessUUID)): fileNameList.append(fileName) return len(fileNameList), newPath, fileNameList def outputOriginLog(self, outputPath, sessUUID = \"\", callNumber", "\"告警\", warningCount, \"失败\", errorCount, \"成功\", okCount) return s def __outputReslut(self, outputPath, sessUUID =", "RTP \\[(.*)\\] (.*) port (\\d+) -> (.*) port (\\d+) codec: (\\d+) ms: (\\d+)\",", "if mode in ['normal']: s += \"%-16s: %-s\\n\" % (\"结果\", conclusion) s +=", "= \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" % (\"总计\", errorCount + okCount + warningCount, \"告警\", warningCount, \"失败\", errorCount,", "若输入了callNumber则认为需要过滤 if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion):", "1 elif s and c.upper() in ['WARNING']: warningCount += 1 elif s and", "conclusion) if not continueRet: break # 显示尾 self.__showAnalysisResultTail(count, conclusion) return count def showResult(self,", "outputPath fileNameList = [] # 输出的文件列表 # 如果存在UUID(只输出一个文件) if sessUUID: if sessLogInfoDict.get(sessUUID, False):", "SIP信令 SIP_INVITE = 'INVITE' SIP_CANCEL = 'CANCEL' SIP_BYE = 'BYE' # 匹配模式 MATCH_MOD_NORMAL", "reason) signTimePrev = None signTimeThis = None if mode in ['normal']: s +=", "self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) # 不存在UUID(可能输出多个文件) else: # 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if not name: #fileNames", "self.caseMatch(detailsDict, case_hangup_invite): if self.caseMatch(detailsDict, case_r_183): note += \" -> RINGING(183)\" note += \"", "if not continueRet: break # 显示尾 self.__showDetailsTail(count) return count # 按照UUID搜索日志,并显示详细分析信息 def showDetails(self,", "len(self.getLines()) process = 0 for f, lines in enumerate(self.getLines()): process = self.printProc(process, fileLen)", "and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 # 输出分段,提示是否继续显示内容 continueRet, flag = self.inputContinue(i, count,", "8, \"重复的号码:\", \"总数:%d\" % len(dupl)) # ----------------------------------------------显示详细分析结果---------------------------------------------- def __showDetailsHeader(self, sessUUID = \"\", callNumber", "\"ready\"), \"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"), }", "20 -- RTP信息类 Hangup sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] -- 挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表: 无 返回值:", "self.getSessLogInfoDict() # 需要匹配的正则表达式 reExpInfo = [ (\"State Change (.*) -> (.*)\", 2, [],", "try: del l[dPos] except Exception as Err: s = str(Err, reExpInfo[i], res) PRINT(s)", "x[3][1].strip()) if param2 != \"\" else True) and \\ (x[0] >= f if", "note += \" -> TALKING\" # invite-> 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note", "\"\", name = \"\"): sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath fileNameList = []", "self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (bye-> or 错误应答<-) elif self.caseMatch(detailsDict, case_hangup_invite):", "__outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\", conclusion = \"\", fileName =", "\"(\" + fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\") + \")\" res = self.reMatch(reExpr, x[3][1], 1) res and", "= \"\"): \"\"\"获取日志字典 参数列表: UUID:会话的UUID 返回值: 日志字典 参照__sessLogInfoDict定义 异常: 无 \"\"\" return self.getSessInfo(UUID,", "+= 1 else: total = len(sessLogInfoDict) flag = False sessList = sorted(sessLogInfoDict.items(), key=lambda", "\"{0:^40}\".format(\" ↓ \") + \"\\n\" if k[0] not in l: s += self.getPathEx(k[0])", "targConclusion = \"\", show = True): sessLogInfoDict = self.getSessLogInfoDict() s = \"\" conclusion", "匹配状态描述 参数列表: keyInfoList:关键信息列表 desc:描述 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN,", "sessLogInfoDict.keys(): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c", "note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion # print \"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#,", "\"远端地址\", RmtIp, RmtPort, \"Payload\", audioPayLoad, \"ptime\", audioPTime) res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod =", "-> (.*)\", 2, [], self.SIGN_FLAG_CS), # 状态转移类的日志 (\"entering state \\[(.*)\\]\\[(.*)\\]\", 2, [], self.SIGN_FLAG_CHAN),", "(\"媒体信息\", \"本端地址\", locIp, locPort, \"远端地址\", RmtIp, RmtPort, \"Payload\", audioPayLoad, \"ptime\", audioPTime) res =", "__showDetailsTail(self, count, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): s =", "\"(S)\" or \"(R)\") # invite-> (183<- or 180<-) 错误应答<- elif self.caseMatch(detailsDict, case_hangup_180) or", "\"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"ACTIVE\"), \"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"RINGING\"), \"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"ACTIVE\"), \"RINGING__ACTIVE\":", "10.0.7.176 port 24776 -> 192.168.0.178 port 7076 codec: 18 ms: 20 -- RTP信息类", "def outputDetails(self, outputPath, fileName = \"\", sessUUID = \"\", callNumber = \"\", conclusion", "newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) else: if not callNumber: # 确定新的目录,以源日志文件名作为目录名 if not fileName:", "self).__init__(self.ANALYZER_TYPE_FS) def getSessLogInfoDict(self): \"\"\"获取会话信息字典 参数列表: 无 返回值: 会话信息字典 例如: {UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}}", "\\\"(.*)\\\" \\<(.*)\\>\", 4, [], self.SIGN_FLAG_CALLNUMBER), # 呼叫号码 (\"952 Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3,", "\"\"\"清理FS的日志 参数列表: UUID:会话的UUID key:内部的字典名 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" sessDict =", "= \"\", conclusion = \"\", fileName = \"\"): return self.__outputReslut(outputPath, sessUUID = sessUUID,", "self.__matchCallStateChange(keyInfoList, \"DOWN\", \"RINGING\"), \"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"EARLY\"), \"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"ACTIVE\"), \"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList,", "{\"EARLY__HANGUP\":True,} case_hangup_acitve = {\"ACTIVE__HANGUP\":True,} case_r_183 = {\"proceeding_183\":True,} # invite-> if self.caseMatch(detailsDict, case_calling_invite) or", "s += \"%-16s: %-s\\n\" % (\"呼叫号码\", numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if locIp and RmtIp:", "sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0]) for f, logDict in fileList: logList = sorted(logDict.items(), key=lambda logDict:logDict[0])", "keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState,", "False sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList): #", "self.printProc(process, sessLen, widgetType = \"percent\") for f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag = False for", "\"\", callNumber = \"\", conclusion = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showDetailsHeader()", "参照__sessLogInfoDict定义 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_LOG_DK) def getCallNumber(self, UUID = \"\"): \"\"\"获取呼叫号码", "self.getSessLogInfoDict() sessLen = len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys(): process =", "callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------显示分析结果---------------------------------------------- def __showAnalysisResultHeader(self, targConclusion=\"\"): s =", "list(res) for dPos in [x for x in sorted(dropPos, reverse=True) if dropPos and", "keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if sessUUID == \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": # print(\"\\n\") # for k in", "'yellow' or \\ conclusion.upper() in ['OK'] and 'green' conclusion = getColor(\"{0:<7}\".format(conclusion), color =", "= \"recv_invite\" # SIP信令 SIP_INVITE = 'INVITE' SIP_CANCEL = 'CANCEL' SIP_BYE = 'BYE'", "in sorted(dropPos, reverse=True) if dropPos and x < len(res)]: try: del l[dPos] except", "\") + \"\\n\" s += \"%s \\n\" % getColor(\"{0:^40}\".format(\"时差:\" + str((signTimeThis - signTimePrev).seconds)", "and 'green' conclusion = getColor(\"{0:<7}\".format(conclusion), color = color) s += \"%-30s %-36s %-30s", "sm\" SIGN_CALL_HANGUP = 'HANGUP' SIGN_FLAG_CS = \"core sm\" SIGN_FLAG_RTP = \"rtp\" SIGN_FLAG_CALLNUMBER =", "enumerate(lines): # 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080 Standard INIT # 找到第一个空格,左边就是会话ID,右边就是日志信息 pos", "flag in reExpInfo: res = self.reMatch(reExpr, log, expLen) if res: l = list(res)", "self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\", \"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"), \"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"RINGING\"), \"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList,", "(\\d+) ms: (\\d+)\", 7, [0], self.SIGN_FLAG_RTP), # RTP通道信息 (\"Flipping CID from \\\"(.*)\\\" \\<(.*)\\>", "toState) def __matchCallStateChange(self, keyInfoList, fromState, toState): \"\"\"call状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败", "s: PRINT(s) return s def __showDetailsTail(self, count, sessUUID = \"\", callNumber = \"\",", "context) in enumerate(sessList): # 若输入了callNumber或UUID则认为需要过滤 if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True)", "return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2 = code) def __matchChannelStateDesc(self, keyInfoList, desc): \"\"\"通道状态描述匹配 匹配状态描述 参数列表:", "# 按照UUID搜索日志,并显示详细分析信息 def showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"):", "codec: 18 ms: 20 -- RTP信息类 Hangup sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] -- 挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果)", "False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c", "sessUUID + self.OUTPUT_POSTFIX_LOG) if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) # 不存在UUID(可能输出多个文件)", "= {} self.__ignoreLinesDict = {} if PY2: return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS) else: return super(FsLogAnalyzer,", "['WARNING'] and 'yellow' or \\ conclusion.upper() in ['OK'] and 'green' conclusion = getColor(\"{0:<7}\".format(conclusion),", "int(res[3]), int(res[4]), int(res[5])) if signTimePrev and (signTimeThis - signTimePrev).seconds > 4: s +=", "def __matchChannelStateCode(self, keyInfoList, code): \"\"\"通道状态码匹配 精确匹配状态码 参数列表: keyInfoList:关键信息列表 code:状态码 返回值: 成功或失败 bool 异常:", "if self.caseMatch(detailsDict, case_hangup_acitve): note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\"", "\"\", fileName = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 确定新的目录,以源日志文件名作为目录名 if not fileName: #fileNames", "{i:sessLog} if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog if", "\"\"\" return self.__sessLogInfoDict def getIgnoreLinesDict(self): \"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表: 无 返回值: 忽略的行字典 例如: {文件索引:{行数:日志}}", "sessUUID pass else: pass # 会话关键信息收集 def __sessKeyInfoCollect(self): \"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如: State Change", "sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\", \\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"}, \\ self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else: self.__sessLogInfoDict", "self.caseMatch(detailsDict, case_answer_183) or self.caseMatch(detailsDict, case_answerd_183): note += \" -> TALKING\" # invite-> (183<-", "= \"percent\", begin=50, end=100) keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if sessUUID == \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": # print(\"\\n\")", "= \"\", callNumber = \"\", conclusion = \"\"): return self.__outputDetails(outputPath, fileName = fileName,", "if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\", \\", "= \"hangup_reason\" SIGN_FLAG_R_BYE = 'recv_bye' SIGN_FLAG_S_BYE = \"send_bye\" SIGN_FLAG_CANCEL = 'cancel' SIGN_FLAG_R_INVITE =", "%s\\n\" % (\"挂断原因\", res[1] if res else reason) signTimePrev = None signTimeThis =", "and \\ ((len(x[3]) >= 1 and param1.strip() == x[3][0].strip()) if param1 != \"\"", "or self.caseMatch(detailsDict, case_answerd_183): note += \" -> TALKING\" # invite-> (183<- or 180<-)", "= {} sessLogInfoDict = {} fileLen = len(self.getLines()) process = 0 for f,", "if callNumber else True): fileName = (callNumber or c) + \"__\" + sessUUID", "\"OK (耗时:%.2f秒)\" % (time4 - time3) PRINT(s, color='green') return True, \"\" # 获取UUID列表", "entering state [proceeding][180] 为收响应消息的处理 -- channel类 AUDIO RTP [sofia/external/6797@10.0.7.152:5080] 10.0.7.176 port 24776 ->", "FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict = {}# 按照会话归类的日志信息 # {会话UUID:{log:{文件索引:{行数:日志}}, # callNumber:呼叫号码, # result:{分析结果}, # keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]}", "\\ ((len(x[3]) >= 2 and param2.strip() == x[3][1].strip()) if param2 != \"\" else", "else True) and \\ (x[1] >= l if l != -1 else True)]", "key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber或UUID则认为需要过滤 if (callNumber ==", "path, rl=False): \"\"\"加载FS的日志 参数列表: path:日志路径 rl:是否重新加载 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\"", "self.caseMatch(detailsDict, case_ringing_183_180) or \\ self.caseMatch(detailsDict, case_ringinged_180) or self.caseMatch(detailsDict, case_ringinged_183): note += \" ->", "errorCount += 1 elif s and c.upper() in ['WARNING']: warningCount += 1 elif", "str(Err, reExpInfo[i], res) PRINT(s) raise res = tuple(l) keyInfoList.append((f, line, flag, res)) break", "return sessLogInfoDict.keys() # 获取呼叫号码列表 def getCallNumberList(self): sessLogInfoDict = self.getSessLogInfoDict() return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID", "% getColor(\"{0:^40}\".format(\"时差:\" + str((signTimeThis - signTimePrev).seconds) + \"s\", color=\"red\", need=True)) s += \"{0:^40}\".format(\"", "sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion # print \"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#, \"\\n\", keyInfoList, \"\\n\",detailsDict, \"\\n\"", "self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE else: pass return '', '', '' # 按照会话,收集日志信息 def __sessCollect(self):", "\"Payload\", audioPayLoad, \"ptime\", audioPTime) res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) reason =", "return s, conclusion callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion =", "\"_\".join([os.path.split(p)[-1] for p in self.getPath()]) orgLogFileNames = callNumber + sessUUID + targConclusion +", "not callNumber: # 确定新的目录,以源日志文件名作为目录名 if not fileName: #orgLogFileNames = \"_\".join([os.path.split(p)[-1] for p in", "from datetime import datetime PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] ==", "= {} fileLen = len(self.getLines()) process = 0 for f, lines in enumerate(self.getLines()):", "fileName: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames = sessUUID + callNumber", "l = -1, mod=\"normal\"): l = [(i, x) for i, x in enumerate(keyInfoList)", "in [self.MATCH_MOD_DETAILS]: return l[0][1][3] if any(l) else False else: return False def __matchCsStateChange(self,", "self.__getCallNumber() time3 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time3 - time2) PRINT(s,", "错误应答<-) elif self.caseMatch(detailsDict, case_hangup_invite): if self.caseMatch(detailsDict, case_r_183): note += \" -> RINGING(183)\" note", "0 # 输出到文件 if sessUUID: if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True):", "process = 0 for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType =", "s += \"\\n\" + \"{0:*^160}\".format(\" 消息交互详情 \") + \"\\n\\n\" s += \"%-4s %-35s", "\"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 需要匹配的正则表达式 reExpInfo = [ (\"State Change (.*) ->", "self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*) Action transfer\\((\\d*) XML default\\)\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2]", "or \"(R)\") # 判断挂断原因 res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) st, reason", "= 'terminated' SIGN_FLAG_CALL = \"channel sm\" SIGN_CALL_HANGUP = 'HANGUP' SIGN_FLAG_CS = \"core sm\"", "callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note =", "呼叫状态类的日志 (\"receiving invite from (.*) version\", 1, [], self.SIGN_FLAG_R_INVITE), (\"AUDIO RTP \\[(.*)\\] (.*)", "sessUUID, callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------输出简单分析结果到文件---------------------------------------------- def __getOutputResultHeader(self): s =", "= sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] # 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 # 默认按照此行日志取号码 res = self.reMatch(\"New Channel sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\", sessLog,", "XML default\\)\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2] flag = True break", "if self.caseMatch(detailsDict, case_calling_invite) or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE): conclusion = \"OK\" note = \"[CALLING\" +", "s += self.getPathEx(k[0]) + \"\\n\" l.append(k[0]) s += \"%02d. %-35s %-16s %-16s %s\\n\"", "note += \" -> RINGING(183)\" note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE)", "(sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS) if self.outputEx(newPath,", "= \"\", show = True): sessLogInfoDict = self.getSessLogInfoDict() s = \"\" conclusion =", "context[0] in [self.SIGN_CHAN_CALLING]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE elif context[0] in [self.SIGN_CHAN_PROCEDDING]: return self.MOD_OUTSIDE,", "# print \"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#, \"\\n\", keyInfoList, \"\\n\",detailsDict, \"\\n\" # 分析会话日志 def", "len(res)]: try: del l[dPos] except Exception as Err: s = str(Err, reExpInfo[i], res)", "# 若输入了callNumber if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID,", "case_answerd_180 = {\"RINGING__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_183 = {\"EARLY__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_183 = {\"EARLY__ACTIVE\":True,", "参数列表: 无 返回值: 忽略的行字典 例如: {文件索引:{行数:日志}} 异常: 无 \"\"\" return self.__ignoreLinesDict def load(self,", "= 'INVITE' SIP_CANCEL = 'CANCEL' SIP_BYE = 'BYE' # 匹配模式 MATCH_MOD_NORMAL = \"normal\"", "= color) s += \"%-30s %-36s %-30s %-7s %-s\\n\" % (callTime, sessUUID, callNumber", "\"正在提取号码...\" PRINT(s, end='') self.__getCallNumber() time3 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time3", "SIGN_CALL_HANGUP = 'HANGUP' SIGN_FLAG_CS = \"core sm\" SIGN_FLAG_RTP = \"rtp\" SIGN_FLAG_CALLNUMBER = \"callnumber\"", "sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if sessUUID == \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": # print(\"\\n\") # for k in keyInfoList: #", "% (time4 - time3) PRINT(s, color='green') return True, \"\" # 获取UUID列表 def getSessUUIDList(self):", "= \"\", callNumber = \"\", conclusion = \"\", fileName = \"\"): return self.__outputReslut(outputPath,", "not get time\") return sessLogInfoDict, ignoreLinesDict # 获取会话中的呼叫号码 def __getCallNumber(self): \"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码", "keyInfoList, fuzzyCode): \"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表: keyInfoList:关键信息列表 fuzzyCode:模糊状态码 返回值: 匹配到的值 异常: 无 \"\"\" codeList", "# 显示UUID列表 def showSessUUIDList(self): sessUUIDList = self.getSessUUIDList() self.printList(sessUUIDList, 4, \"UUID列表:\", \"总数:%d\" % len(sessUUIDList))", "if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] # 显示UUID列表 def showSessUUIDList(self): sessUUIDList = self.getSessUUIDList() self.printList(sessUUIDList, 4, \"UUID列表:\", \"总数:%d\"", "fileNameList for sessUUID in sessLogInfoDict.keys(): sessDict = sessLogInfoDict[sessUUID] if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if", "无 \"\"\" return self.getSessInfo(UUID, self.SESS_RESULT_DK) def getkeyInfoList(self, UUID = \"\"): \"\"\"获取关键信息列表 参数列表: UUID:会话的UUID", "%-16s %-16s %s\\n\\n\" % (\"序号\",\"信令时间\", \"源日志行号\", \"消息类型\", \"详情\") l = [] for i,", "if not keyInfoList or not logDict: return s, conclusion callTime = \"%s\" %", "color) s += \"%-30s %-36s %-30s %-7s %-s\\n\" % (callTime, sessUUID, callNumber or", "dupl = self.findDupl(callNumberList) len(dupl) and self.printList(dupl, 8, \"重复的号码:\", \"总数:%d\" % len(dupl)) # ----------------------------------------------显示详细分析结果----------------------------------------------", "if reason: note += \"{[\" + st + \"]\" + reason + \"}\"", "% (i + 1, signTime, str(k[1]), str(k[2]), str(k[3])) else: s += \"\\n\" return", "\"\", name = \"\"): return self.__outputOriginLog(outputPath, sessUUID = sessUUID, callNumber = callNumber, name", "\"\", callNumber = \"\", name = \"\"): sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath", "-1 else True) and \\ (x[1] >= l if l != -1 else", "\"%-16s: %s:%s:%s -> %s:%s:%s (%s:%s %s:%s)\\n\" % (\"媒体信息\", \"本端地址\", locIp, locPort, \"远端地址\", RmtIp,", "self.printProc(process, fileLen) for i, line in enumerate(lines): # 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:40", "True): sessLogInfoDict = self.getSessLogInfoDict() s = \"\" conclusion = \"\" if sessLogInfoDict.get(sessUUID, False):", "# print(k) conclusion = \"\" note = \"\" detailsDict = { \"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList,", "% self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if targConclusion.upper()", "locIp, locPort, RmtIp, RmtPort, audioPayLoad, audioPTime = res if res else (\"\",\"\",\"\",\"\",\"\",\"\") note", "异常: 无 \"\"\" if PY2: return LogAnalyzer.load(self, path, rl) else: return super(FsLogAnalyzer, self).load(path,", "= {} self.__ignoreLinesDict = {} return super(FsLogAnalyzer, self).clear() def getSessInfo(self, UUID = \"\",", "\"RINGING\", \"HANGUP\"), \"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList, \"ACTIVE\", \"HANGUP\"), \"calling_0\": self.__matchChannelStateDesc(keyInfoList, \"calling\"), \"proceeding_180\": self.__matchChannelStateCode(keyInfoList, \"180\"), \"proceeding_183\":", "\\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 # 输出分段,提示是否继续显示内容 continueRet, flag = self.inputContinue(i,", "LogAnalyzer # FS日志分析器 class FsLogAnalyzer(LogAnalyzer): __sessLogInfoDict = {}# 按照会话归类的日志信息 # {会话UUID:{log:{文件索引:{行数:日志}}, # callNumber:呼叫号码,", "\"%-30s %-36s %-30s %-7s %-s\\n\" % (callTime, sessUUID, callNumber or getColor(\"{0:<20}\".format(\"null\"), color='gray', need=show),", "sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList def __match(self, keyInfoList, flag, param1 = \"\", param2 = \"\",", "else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 else: total = len(sessLogInfoDict)", "else (\"\", \"\") if reason: note += \"{[\" + st + \"]\" +", "+ conclusion + \"_tmp\" fileName = \"Result\" + fileNames + self.OUTPUT_POSTFIX_RESULT context =", "== x[3][0].strip()) if param1 != \"\" else True) and \\ ((len(x[3]) >= 2", "(耗时:%.2f秒)\" % (time4 - time3) PRINT(s, color='green') return True, \"\" # 获取UUID列表 def", "-> (.*) port (\\d+) codec: (\\d+) ms: (\\d+)\", 7, [0], self.SIGN_FLAG_RTP), # RTP通道信息", "res) PRINT(s) raise res = tuple(l) keyInfoList.append((f, line, flag, res)) break else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK]", "= {\"DOWN__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_invite = {\"DOWN__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_180 = {\"RINGING__ACTIVE\":True, \"completing_200\":True,", "context: context = self.__getOutputResultHeader() + context context += self.__getOutputResultTail(warningCount, errorCount, okCount) if self.outputEx(outputPath,", "line in enumerate(lines): # 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080 Standard INIT #", "\" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (bye->", "self.__getAnalysisResultBody(sessUUID, targConclusion) if s: PRINT(s) return s def __showAnalysisResultTail(self, count, targConclusion=\"\"): s =", "= list(res) for dPos in [x for x in sorted(dropPos, reverse=True) if dropPos", "\"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"EARLY\"), \"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"ACTIVE\"), \"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"RINGING\"), \"EARLY__ACTIVE\":", "sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber", "在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() #", ">= l if l != -1 else True)] if mod in [self.MATCH_MOD_NORMAL]: return", "按照会话,收集日志信息 def __sessCollect(self): \"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表: 无 返回值: 成功解析的会话日志字典和无法解析的会话日志字典 dict,dict 异常:", "bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState, toState) def __matchCallStateChange(self, keyInfoList, fromState,", "str(k[3])) else: s += \"\\n\" return s def __showDetailsBody(self, sessUUID = \"\", targConclusion", "s += \"%-16s: %-s\\n\" % (\"UUID\", sessUUID) if numberFrom: s += \"%-16s: %-s\\n\"", "logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not keyInfoList or not logDict: return", "- signTimePrev).seconds) + \"s\", color=\"red\", need=True)) s += \"{0:^40}\".format(\" ↓ \") + \"\\n\"", "= \"\", conclusion = \"\"): return self.__showDetails(sessUUID = sessUUID, callNumber = callNumber, conclusion", "= (callNumber or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG if self.output(logDict, newPath,", "else True) and \\ (x[0] >= f if f != -1 else True)", "\"\" conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if targConclusion.upper() not in conclusion.upper(): return \"\" logDict =", "count = 0 if sessUUID: # 若输入了callNumber则认为需要过滤 if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber", "\"%-4s %-35s %-16s %-16s %s\\n\\n\" % (\"序号\",\"信令时间\", \"源日志行号\", \"消息类型\", \"详情\") l = []", "self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if targConclusion.upper() in", ">= 1 and param1.strip() == x[3][0].strip()) if param1 != \"\" else True) and", "= \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showAnalysisResultHeader(conclusion) # 显示Body count = 0", "\"\", fileName = \"\"): return self.__outputReslut(outputPath, sessUUID = sessUUID, callNumber = callNumber, conclusion", "若有号码变换,需要取变换的号码 res = self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*) Action transfer\\((\\d*) XML default\\)\", sessLog, 3) if res:", "\"\"): \"\"\"获取结果字典 参数列表: UUID:会话的UUID 返回值: 结果字典 {'conclusion':\"\", 'details':{}, 'note':\"\"} 异常: 无 \"\"\" return", "reExpInfo: res = self.reMatch(reExpr, log, expLen) if res: l = list(res) for dPos", "getSessLogInfoDict(self): \"\"\"获取会话信息字典 参数列表: 无 返回值: 会话信息字典 例如: {UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常: 无", "\"ACTIVE\"), \"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"RINGING\"), \"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"ACTIVE\"), \"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"ACTIVE\"),", "case_hangup_acitve = {\"ACTIVE__HANGUP\":True,} case_r_183 = {\"proceeding_183\":True,} # invite-> if self.caseMatch(detailsDict, case_calling_invite) or self.__match(keyInfoList,", "%s\\n\" % (i + 1, signTime, str(k[1]), str(k[2]), str(k[3])) else: s += \"\\n\"", "\"{[\" + st + \"]\" + reason + \"}\" if reason not in", "8, \"呼叫号码列表:\", \"总数:%d\" % len(tmp)) # 重复的呼叫号码 dupl = self.findDupl(callNumberList) len(dupl) and self.printList(dupl,", "self.__showDetailsHeader() # 显示Body count = 0 if sessUUID: # 若输入了callNumber if (callNumber ==", "\"\", targConclusion = \"\"): s = self.getDetails(sessUUID, targConclusion) if s: PRINT(s) return s", "结果字典 {'conclusion':\"\", 'details':{}, 'note':\"\"} 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_RESULT_DK) def getkeyInfoList(self, UUID", "ignoreLinesDict: ignoreLinesDict[f] = {} else: ignoreLinesDict[f][i] = line continue # 拆分出UUID和日志信息 sessUUID, sessLog", "in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod =", "or pos < 36 or line[0:pos].count('-') != 4: if f not in ignoreLinesDict:", "= \"OK (耗时:%.2f秒)\" % (time2 - time1) PRINT(s, color='green') s = \"正在提取号码...\" PRINT(s,", "True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 else: total = len(sessLogInfoDict) flag", "case_answer_invite = {\"DOWN__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_invite = {\"DOWN__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_180 = {\"RINGING__ACTIVE\":True,", "case_ringinged_183 = {\"DOWN__EARLY\":True,} case_ringing_183_180 = {\"DOWN__EARLY\":True, \"proceeding_183\":True, \"EARLY__RINGING\":True, \"proceeding_180\":True,} case_answer_invite = {\"DOWN__ACTIVE\":True, \"completing_200\":True,", "(.*) -> (.*)\", 2, [], self.SIGN_FLAG_CALL), # 呼叫状态类的日志 (\"receiving invite from (.*) version\",", "= \"\"): sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath fileNameList = [] # 输出的文件列表", "HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") else: # invite-> (183<- or", "if sessDict.get(UUID, False): return UUID, sessDict[UUID].get(key, False) else: return UUID, None else: return", "warningCount, errorCount, okCount): s = \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" % (\"总计\", errorCount + okCount + warningCount,", "\"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames = sessUUID + callNumber + \"_tmp\" newPath", "channel类 AUDIO RTP [sofia/external/6797@10.0.7.152:5080] 10.0.7.176 port 24776 -> 192.168.0.178 port 7076 codec: 18", "def __getOutputResultTail(self, warningCount, errorCount, okCount): s = \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" % (\"总计\", errorCount + okCount", "+ sessUUID + targConclusion + \"_tmp\" newPath = os.path.join(outputPath, orgLogFileNames) else: newPath =", "参数列表: UUID:会话的UUID 返回值: 日志字典 参照__sessLogInfoDict定义 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_LOG_DK) def getCallNumber(self,", "else True): fileName = name or ((callNumber or c) + \"__\" + sessUUID", "\"\\n\" l.append(k[0]) s += \"%02d. %-35s %-16s %-16s %s\\n\" % (i + 1,", "% count PRINT(s) def __showResult(self, sessUUID = \"\", callNumber = \"\", conclusion =", "\"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames = sessUUID + callNumber + conclusion +", "def showResult(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return self.__showResult(sessUUID", "参数列表: UUID:会话的UUID key:内部的字典名 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" sessDict = self.getSessLogInfoDict()", "c.upper() in ['OK']: okCount += 1 else: for sessUUID in sessLogInfoDict.keys(): if (callNumber", "else: pass return '', '', '' # 按照会话,收集日志信息 def __sessCollect(self): \"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。", "%-s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") PRINT(s) def __getAnalysisResultBody(self, sessUUID, targConclusion =", "if s and c.upper() in ['ERROR']: errorCount += 1 elif s and c.upper()", "s += \"%-16s: %s\\n\" % (\"挂断原因\", res[1] if res else reason) signTimePrev =", "getColor(\"{0:<7}\".format(conclusion), color = color) s += \"%-30s %-36s %-30s %-7s %-s\\n\" % (callTime,", "= None signTimeThis = None if mode in ['normal']: s += \"%-16s: %-s\\n\"", "if s: PRINT(s) return s def __showDetailsTail(self, count, sessUUID = \"\", callNumber =", "or \"(R)\") # invite-> (bye-> or 错误应答<-) elif self.caseMatch(detailsDict, case_hangup_invite): if self.caseMatch(detailsDict, case_r_183):", "keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常: 无 \"\"\" return self.__sessLogInfoDict def getIgnoreLinesDict(self): \"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表: 无 返回值:", "+ fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\") + \")\" res = self.reMatch(reExpr, x[3][1], 1) res and codeList.append(x[3])", "= self.MATCH_MOD_DETAILS) disFrom, numberFrom, disTo, numberTo = res if res else (\"\",\"\",\"\",\"\") callTime", "if UUID: if sessDict.get(UUID, False): return UUID, sessDict[UUID].get(key, False) else: return UUID, None", "line, flag, res)) break else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList def __match(self, keyInfoList, flag, param1", "\"(R)\") else: # invite-> (183<- or 180<-) if self.caseMatch(detailsDict, case_ringing_180) or self.caseMatch(detailsDict, case_ringing_183)", "= \"%s\" % self.getLogTime(logDict.get(k[0], {}).get(k[1], \"\")) res = self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime, 7) if", "\"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表: 无 返回值: 成功解析的会话日志字典和无法解析的会话日志字典 dict,dict 异常: 无 \"\"\" ignoreLinesDict", "state \\[(.*)\\]\\[(.*)\\]\", 2, [], self.SIGN_FLAG_CHAN), # 收到消息类的日志 (\"Callstate Change (.*) -> (.*)\", 2,", "%-7s %-s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") PRINT(s) def __getAnalysisResultBody(self, sessUUID, targConclusion", "\"\"\" self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} return super(FsLogAnalyzer, self).clear() def getSessInfo(self, UUID", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" + targConclusion +", "s = \"OK (耗时:%.2f秒)\" % (time3 - time2) PRINT(s, color='green') s = \"正在分析会话过程...\"", "(\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime, 7) if res: signTimePrev = signTimeThis signTimeThis = datetime(int(res[0]), int(res[1]), int(res[2]),", "\"正在分析会话过程...\" PRINT(s, end='') self.__analysis() time4 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time4", "s def __outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\", name = \"\"):", "sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if targConclusion.upper() in conclusion.upper(): color =", "def __getCallNumber(self): \"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict", "str((signTimeThis - signTimePrev).seconds) + \"s\", color=\"red\", need=True)) s += \"{0:^40}\".format(\" ↓ \") +", "sessUUIDList = self.getSessUUIDList() self.printList(sessUUIDList, 4, \"UUID列表:\", \"总数:%d\" % len(sessUUIDList)) # 显示呼叫号码列表 def showCallNumberList(self):", "进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 # 默认按照此行日志取号码 res = self.reMatch(\"New Channel sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\", sessLog, 3) if res:", "#print \"Not find the call number. UUID:%s\" % sessUUID pass else: pass #", "18 ms: 20 -- RTP信息类 Hangup sofia/external/1920@10.0.7.152:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] -- 挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表:", "获取UUID列表 def getSessUUIDList(self): sessLogInfoDict = self.getSessLogInfoDict() return sessLogInfoDict.keys() # 获取呼叫号码列表 def getCallNumberList(self): sessLogInfoDict", "(signTimeThis - signTimePrev).seconds > 4: s += \"{0:^40}\".format(\" ↑ \") + \"\\n\" s", "keyInfoList or not logDict: return s, conclusion callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber", "__matchChannelStateDesc(self, keyInfoList, desc): \"\"\"通道状态描述匹配 匹配状态描述 参数列表: keyInfoList:关键信息列表 desc:描述 返回值: 成功或失败 bool 异常: 无", "invite-> 200<- if self.caseMatch(detailsDict, case_answer_invite): note += \" -> TALKING\" # invite-> 200<-", "self.printProc(process, sessLen, widgetType = \"percent\", begin=50, end=100) keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if sessUUID ==", "res else reason) signTimePrev = None signTimeThis = None if mode in ['normal']:", "False for l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): # 取一行日志 sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l] # 进行正则匹配,以\"(sofia/external/\"作为开头关键字,以\")\"作为结尾,\"@\"作为分隔,提取其中的号码 #", "signTimePrev).seconds > 4: s += \"{0:^40}\".format(\" ↑ \") + \"\\n\" s += \"%s", "True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\"", "[self.SIGN_CHAN_CALLING]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE elif context[0] in [self.SIGN_CHAN_PROCEDDING]: return self.MOD_OUTSIDE, self.MOD_FS, context[1]", "signTimePrev and (signTimeThis - signTimePrev).seconds > 4: s += \"{0:^40}\".format(\" ↑ \") +", "in conclusion.upper(): return \"\" logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not logDict", "else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 else: total = len(sessLogInfoDict)", "targConclusion + self.OUTPUT_POSTFIX_DETAILS if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) return len(fileNameList), newPath, fileNameList", "# 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c if callNumber else True):", "end='') self.__getCallNumber() time3 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time3 - time2)", "targConclusion) if s: PRINT(s) return s def __showAnalysisResultTail(self, count, targConclusion=\"\"): s = \"\\n总数:%d\"", "= os.path.join(outputPath, fileName) # 创建新的目录 if not self.makeDir(newPath): return len(fileNameList), newPath, fileNameList for", "= \"\", fileName = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 确定新的目录,以源日志文件名作为目录名 if not fileName:", "res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True # 若有号码变换,需要取变换的号码 res = self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*)", "newPath = os.path.join(outputPath, fileNames) else: newPath = os.path.join(outputPath, name) # 创建新的目录,若存在则删除 self.makeDir(newPath) for", "invite-> (bye-> or 错误应答<-) elif self.caseMatch(detailsDict, case_hangup_invite): if self.caseMatch(detailsDict, case_r_183): note += \"", "show=False) context += s if s and c.upper() in ['ERROR']: errorCount += 1", "ignoreLinesDict = {} sessLogInfoDict = {} fileLen = len(self.getLines()) process = 0 for", "if targConclusion.upper() in conclusion.upper(): color = conclusion.upper() in ['ERROR'] and 'red' or \\", "str) 异常: 无 \"\"\" self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} return super(FsLogAnalyzer, self).clear()", "process = self.printProc(process, sessLen, widgetType = \"percent\", begin=0, end=50) keyInfoList = [] logFileDict", "\"ACTIVE\", \"HANGUP\"), \"calling_0\": self.__matchChannelStateDesc(keyInfoList, \"calling\"), \"proceeding_180\": self.__matchChannelStateCode(keyInfoList, \"180\"), \"proceeding_183\": self.__matchChannelStateCode(keyInfoList, \"183\"), \"completing_200\": self.__matchChannelStateDesc(keyInfoList,", "case_ringing_180 = {\"proceeding_180\":True,} case_ringing_183 = {\"proceeding_183\":True,} case_ringinged_180 = {\"DOWN__RINGING\":True,} case_ringinged_183 = {\"DOWN__EARLY\":True,} case_ringing_183_180", "case_calling_invite) or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE): conclusion = \"OK\" note = \"[CALLING\" + (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE)", "return count def showResult(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"):", "\"\" warningCount, errorCount, okCount = 0, 0, 0 # 输出到文件 if sessUUID: if", "无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState, toState) def __fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode): \"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码", "def __getOutputResultHeader(self): s = \"%-30s %-36s %-30s %-6s %s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\",", "= sys.version_info[0] == 3 from base.base import PRINT, INPUT, getColor if PY2: from", "参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() sessLen = len(sessLogInfoDict)", "{\"proceeding_183\":True,} case_ringinged_180 = {\"DOWN__RINGING\":True,} case_ringinged_183 = {\"DOWN__EARLY\":True,} case_ringing_183_180 = {\"DOWN__EARLY\":True, \"proceeding_183\":True, \"EARLY__RINGING\":True, \"proceeding_180\":True,}", "例如: {UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常: 无 \"\"\" return self.__sessLogInfoDict def getIgnoreLinesDict(self): \"\"\"获取忽略的行字典", "fileLen) for i, line in enumerate(lines): # 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080", "elif self.caseMatch(detailsDict, case_hangup_180) or self.caseMatch(detailsDict, case_hangup_183): note += \" -> HANGUP\" + (self.__match(keyInfoList,", "reason + \"}\" if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" if", "= self.inputContinue(i, count, total, flag, self.__showAnalysisResultHeader, conclusion) if not continueRet: break # 显示尾", "36 or line[0:pos].count('-') != 4: if f not in ignoreLinesDict: ignoreLinesDict[f] = {}", "= callNumber, conclusion = conclusion, fileName = fileName) # ----------------------------------------------输出原始日志到文件---------------------------------------------- def __getOutputHeader(self, logDict,", "fileNameList if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName", "# 显示Body count = 0 if sessUUID: # 若输入了callNumber if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]", "\\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_HANGUP), (\"Sending BYE to(.*)\", 1, [0], self.SIGN_FLAG_S_BYE), (\"Sending CANCEL", "= None if mode in ['normal']: s += \"%-16s: %-s\\n\" % (\"结果\", conclusion)", "True) and \\ ((len(x[3]) >= 2 and param2.strip() == x[3][1].strip()) if param2 !=", "\"ACTIVE\"), \"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"HANGUP\"), \"EARLY__HANGUP\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"HANGUP\"), \"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"HANGUP\"),", "+ \"}\" if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" else: res", "sessLogInfoDict, ignoreLinesDict # 获取会话中的呼叫号码 def __getCallNumber(self): \"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表: 无 返回值: 无", "\"\", show = True): sessLogInfoDict = self.getSessLogInfoDict() s = \"\" conclusion = \"\"", "port 24776 -> 192.168.0.178 port 7076 codec: 18 ms: 20 -- RTP信息类 Hangup", "ignoreLinesDict # 获取会话中的呼叫号码 def __getCallNumber(self): \"\"\"获取呼叫号码 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。 号码的提取样例为(sofia/external/6010@10.0.7.152:5080),其中的6010为号码 参数列表: 无 返回值: 无 异常:", "c.upper() in ['ERROR']: errorCount += 1 elif s and c.upper() in ['WARNING']: warningCount", "+ \"s\", color=\"red\", need=True)) s += \"{0:^40}\".format(\" ↓ \") + \"\\n\" if k[0]", "reason: res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS) s +=", "[DEBUG] switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080) Running State Change CS_INIT sessLen = len(sessLogInfoDict) process = 0", "conclusion.upper() in ['ERROR'] and 'red' or \\ conclusion.upper() in ['WARNING'] and 'yellow' or", "in logList: for reExpr, expLen, dropPos, flag in reExpInfo: res = self.reMatch(reExpr, log,", "okCount += 1 if context: context = self.__getOutputResultHeader() + context context += self.__getOutputResultTail(warningCount,", "sessUUID = \"\", callNumber = \"\", name = \"\"): return self.__outputOriginLog(outputPath, sessUUID =", "{\"DOWN__EARLY\":True, \"proceeding_183\":True, \"EARLY__RINGING\":True, \"proceeding_180\":True,} case_answer_invite = {\"DOWN__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_invite = {\"DOWN__ACTIVE\":True, \"completed_200\":True,", "fileNameList.append(newFileName) else: if not callNumber: # 确定新的目录,以源日志文件名作为目录名 if not fileName: #orgLogFileNames = \"_\".join([os.path.split(p)[-1]", "0, outputPath, [] else: return 0, outputPath, [] # 输出简单分析结果到文件 def outputReslut(self, outputPath,", "True): fileName = name or ((callNumber or c) + \"__\" + sessUUID +", "conclusion.upper(): color = conclusion.upper() in ['ERROR'] and 'red' or \\ conclusion.upper() in ['WARNING']", "line, log in logList: for reExpr, expLen, dropPos, flag in reExpInfo: res =", "case_calling_invite = {\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,} case_ringing_180 = {\"proceeding_180\":True,} case_ringing_183 = {\"proceeding_183\":True,} case_ringinged_180 =", "RINGING\" # invite-> (183<- or 180<-) 200<- if self.caseMatch(detailsDict, case_answer_180) or self.caseMatch(detailsDict, case_answerd_180)", "self.SIGN_FLAG_RTP, mod = self.MATCH_MOD_DETAILS) locIp, locPort, RmtIp, RmtPort, audioPayLoad, audioPTime = res if", "[self.SIGN_CHAN_TERMINATED]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif flag in [self.SIGN_FLAG_R_BYE]: return self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE", "[(UUID, sessDict[UUID].get(key, False)) for UUID in sessDict.keys()] def getLogDict(self, UUID = \"\"): \"\"\"获取日志字典", "keyInfoList, \"\\n\",detailsDict, \"\\n\" # 分析会话日志 def __analysis(self): self.__sessKeyInfoCollect() self.__sessAnalysis() # 运行 def run(self,", "\"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2 = code) def __matchChannelStateDesc(self, keyInfoList, desc): \"\"\"通道状态描述匹配 匹配状态描述", "\" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") else: # invite->", "[self.MATCH_MOD_DETAILS]: return l[0][1][3] if any(l) else False else: return False def __matchCsStateChange(self, keyInfoList,", "self.__matchChannelStateDesc(keyInfoList, \"completing\"), \"completed_200\": self.__matchChannelStateDesc(keyInfoList, \"completed\"), \"ready_200\": self.__matchChannelStateDesc(keyInfoList, \"ready\"), \"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\") + \\", "# 重复的呼叫号码 dupl = self.findDupl(callNumberList) len(dupl) and self.printList(dupl, 8, \"重复的号码:\", \"总数:%d\" % len(dupl))", "= self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS) s += \"%-16s: %s\\n\"", "\"OK (耗时:%.2f秒)\" % (time2 - time1) PRINT(s, color='green') s = \"正在提取号码...\" PRINT(s, end='')", "sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath fileNameList = [] # 输出的文件列表 # 如果存在UUID(只输出一个文件)", "mode = \"Normal\"): time1 = time.clock() s = \"正在收集会话信息...\" PRINT(s, end='') self.__sessCollect() time2", "(\\d+) -> (.*) port (\\d+) codec: (\\d+) ms: (\\d+)\", 7, [0], self.SIGN_FLAG_RTP), #", "= \"\"): s = self.getDetails(sessUUID, targConclusion) if s: PRINT(s) return s def __showDetailsTail(self,", "show = True): sessLogInfoDict = self.getSessLogInfoDict() s = \"\" conclusion = \"\" if", "= {\"proceeding_183\":True,} case_ringinged_180 = {\"DOWN__RINGING\":True,} case_ringinged_183 = {\"DOWN__EARLY\":True,} case_ringing_183_180 = {\"DOWN__EARLY\":True, \"proceeding_183\":True, \"EARLY__RINGING\":True,", "note = \"[CALLING\" + (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and \"(R)\" or \"(S)\") # invite-> 200<-", "callNumber:呼叫号码, # result:{分析结果}, # keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} # } __ignoreLinesDict = {}# 忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS =", "+= self.getPathEx(k[0]) + \"\\n\" l.append(k[0]) s += \"%02d. %-35s %-16s %-16s %s\\n\" %", "-> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (bye-> or", "errorCount, okCount) if self.outputEx(outputPath, fileName, context): return 1, outputPath, [fileName] else: return 0,", "sessUUID, sessLog = line[0:pos], line[pos + 1:-1] # 按照UUID归类存放日志信息 if sessUUID in sessLogInfoDict:", "= self.getSessUUIDList() self.printList(sessUUIDList, 4, \"UUID列表:\", \"总数:%d\" % len(sessUUIDList)) # 显示呼叫号码列表 def showCallNumberList(self): #", "\"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": # print(\"\\n\") # for k in keyInfoList: # print(k) conclusion = \"\"", "outputPath, [fileName] else: return 0, outputPath, [] else: return 0, outputPath, [] #", "and \"(S)\" or \"(R)\") # 判断挂断原因 res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS)", "Err: s = str(Err, reExpInfo[i], res) PRINT(s) raise res = tuple(l) keyInfoList.append((f, line,", "s = \"\" if mode in ['normal']: s += \"-\" * 160 +", "180<-) if self.caseMatch(detailsDict, case_ringing_180) or self.caseMatch(detailsDict, case_ringing_183) or self.caseMatch(detailsDict, case_ringing_183_180) or \\ self.caseMatch(detailsDict,", "= len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen,", "{}# 按照会话归类的日志信息 # {会话UUID:{log:{文件索引:{行数:日志}}, # callNumber:呼叫号码, # result:{分析结果}, # keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} # } __ignoreLinesDict", "= sessLog if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}},", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not logDict or not keyInfoList: return \"\" res", "\"DOWN\", \"RINGING\"), \"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"EARLY\"), \"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"ACTIVE\"), \"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList, \"EARLY\",", "= \"\"): \"\"\"获取呼叫号码 参数列表: UUID:会话的UUID 返回值: 呼叫号码 str 异常: 无 \"\"\" return self.getSessInfo(UUID,", "self.outputEx(outputPath, fileName, context): return 1, outputPath, [fileName] else: return 0, outputPath, [] else:", "(\"序号\",\"信令时间\", \"源日志行号\", \"消息类型\", \"详情\") l = [] for i, k in enumerate(keyInfoList): signTime", "in l: s += self.getPathEx(k[0]) + \"\\n\" l.append(k[0]) s += \"%02d. %-35s %-16s", "sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if locIp and RmtIp: s += \"%-16s: %s:%s:%s -> %s:%s:%s (%s:%s %s:%s)\\n\"", "Change CS_CONSUME_MEDIA -> CS_EXECUTE 为核心层状态机迁移 -- CS类 Callstate Change ACTIVE -> HANGUP 为呼叫层状态机迁移", "s, conclusion callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK]", "+ \"\\n\\n\" s += \"%-16s: %-s\\n\" % (\"呼叫开始时间\", callTime) s += \"%-16s: %-s\\n\"", "= \"\", callNumber = \"\", conclusion = \"\", fileName = \"\"): sessLogInfoDict =", "self.__showDetailsBody(sessUUID, conclusion): count += 1 else: total = len(sessLogInfoDict) flag = False sessList", "Change CS_INIT sessLen = len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys(): process", "= res[2] flag = True break res = self.reMatch(\"<(\\d*)>->(\\d*) in context\", sessLog, 2)", "%s:%s)\\n\" % (\"媒体信息\", \"本端地址\", locIp, locPort, \"远端地址\", RmtIp, RmtPort, \"Payload\", audioPayLoad, \"ptime\", audioPTime)", "if targConclusion.upper() not in conclusion.upper(): return \"\" logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK]", "RmtPort, \"Payload\", audioPayLoad, \"ptime\", audioPTime) res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) reason", "\"ready_200\":True,} case_answerd_180 = {\"RINGING__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_183 = {\"EARLY__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_183 =", "= conclusion # print \"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#, \"\\n\", keyInfoList, \"\\n\",detailsDict, \"\\n\" #", "s: PRINT(s) return s def __showAnalysisResultTail(self, count, targConclusion=\"\"): s = \"\\n总数:%d\" % count", "['OK']: okCount += 1 if context: context = self.__getOutputResultHeader() + context context +=", "datetime(int(res[0]), int(res[1]), int(res[2]), int(res[3]), int(res[4]), int(res[5])) if signTimePrev and (signTimeThis - signTimePrev).seconds >", "fromState, toState) def __fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode): \"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表: keyInfoList:关键信息列表 fuzzyCode:模糊状态码 返回值: 匹配到的值", "无 \"\"\" return self.__sessLogInfoDict def getIgnoreLinesDict(self): \"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表: 无 返回值: 忽略的行字典 例如:", "numberFrom) s += \"%-16s: %-s\\n\" % (\"呼叫号码\", numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if locIp and", "(\"952 Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_R_BYE), (\"Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3,", "self.getLogTime(logDict.get(k[0], {}).get(k[1], \"\")) res = self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime, 7) if res: signTimePrev =", "1 and param1.strip() == x[3][0].strip()) if param1 != \"\" else True) and \\", "如果存在UUID(只输出一个文件) if sessUUID: if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c =", "newPath = os.path.join(outputPath, fileName) # 创建新的目录 if not self.makeDir(newPath): return len(fileNameList), newPath, fileNameList", "keyInfoList:关键信息列表 desc:描述 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 =", "break if flag: break # 没有找到号码,可能是日志文件的格式发生了变化 else: #print \"Not find the call number.", "warningCount += 1 elif s and c.upper() in ['OK']: okCount += 1 if", "self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_TERMINATED]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif flag", "+ warningCount, \"告警\", warningCount, \"失败\", errorCount, \"成功\", okCount) return s def __outputReslut(self, outputPath,", "self.caseMatch(detailsDict, case_answer_invite): note += \" -> TALKING\" # invite-> 200<- bye<-> if self.caseMatch(detailsDict,", "% count PRINT(s) return s def __showDetails(self, sessUUID = \"\", callNumber = \"\",", "\"失败\", errorCount, \"成功\", okCount) return s def __outputReslut(self, outputPath, sessUUID = \"\", callNumber", "# result:{分析结果}, # keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} # } __ignoreLinesDict = {}# 忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS = 'fs'", "self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE): conclusion = \"OK\" note = \"[CALLING\" + (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and \"(R)\"", "res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True break if flag: break # 没有找到号码,可能是日志文件的格式发生了变化", "返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState, toState) def __matchCallStateChange(self,", "or self.caseMatch(detailsDict, case_answerd_180) or \\ self.caseMatch(detailsDict, case_answer_183) or self.caseMatch(detailsDict, case_answerd_183): note += \"", "warningCount, errorCount, okCount = 0, 0, 0 # 输出到文件 if sessUUID: if (callNumber", "if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s, c = self.__getAnalysisResultBody(sessUUID, conclusion,", "in ['OK'] and 'green' conclusion = getColor(\"{0:<7}\".format(conclusion), color = color) s += \"%-30s", "# invite-> (183<- or 180<-) 错误应答<- elif self.caseMatch(detailsDict, case_hangup_180) or self.caseMatch(detailsDict, case_hangup_183): note", "[fileName] else: return 0, outputPath, [] else: return 0, outputPath, [] # 输出简单分析结果到文件", "or \\ conclusion.upper() in ['OK'] and 'green' conclusion = getColor(\"{0:<7}\".format(conclusion), color = color)", "widgetType = \"percent\", begin=50, end=100) keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if sessUUID == \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": #", "in ['WARNING']: warningCount += 1 elif s and c.upper() in ['OK']: okCount +=", "\\ (x[0] >= f if f != -1 else True) and \\ (x[1]", "确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if not name: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames =", "0 for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\") for", "----------------------------------------------显示详细分析结果---------------------------------------------- def __showDetailsHeader(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return", "= 0 for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\",", "2, [], self.SIGN_FLAG_CALL), # 呼叫状态类的日志 (\"receiving invite from (.*) version\", 1, [], self.SIGN_FLAG_R_INVITE),", "# 标志性处理类的状态 case_calling_invite = {\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,} case_ringing_180 = {\"proceeding_180\":True,} case_ringing_183 = {\"proceeding_183\":True,}", "+= \"%-16s: %s\\n\" % (\"挂断原因\", res[1] if res else reason) else: res =", "conclusion = \"\" if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if", "or self.caseMatch(detailsDict, case_hangup_183): note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\"", "+ context context += self.__getOutputResultTail(warningCount, errorCount, okCount) if self.outputEx(outputPath, fileName, context): return 1,", "orgLogFileNames = callNumber + sessUUID + targConclusion + \"_tmp\" newPath = os.path.join(outputPath, orgLogFileNames)", "\"\"): return self.__outputOriginLog(outputPath, sessUUID = sessUUID, callNumber = callNumber, name = name) #", "\"6xx\"), } # 标志性处理类的状态 case_calling_invite = {\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,} case_ringing_180 = {\"proceeding_180\":True,} case_ringing_183", "elif flag in [self.SIGN_FLAG_R_BYE]: return self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE elif flag in [self.SIGN_FLAG_CANCEL]: return", "1 # 输出分段,提示是否继续显示内容 continueRet, flag = self.inputContinue(i, count, total, flag, self.__showAnalysisResultHeader, conclusion) if", "= self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS) s += \"%-16s: %s\\n\" % (\"挂断原因\", res[1] if", "not in l: s += self.getPathEx(k[0]) + \"\\n\" l.append(k[0]) s += \"%02d. %-35s", "----------------------------------------------输出详细分析结果到文件---------------------------------------------- def __outputDetails(self, outputPath, fileName = \"\", callNumber = \"\", sessUUID = \"\",", "flag, res)) break else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList def __match(self, keyInfoList, flag, param1 =", "= \"正在提取号码...\" PRINT(s, end='') self.__getCallNumber() time3 = time.clock() s = \"OK (耗时:%.2f秒)\" %", "0, outputPath, [] # 输出简单分析结果到文件 def outputReslut(self, outputPath, sessUUID = \"\", callNumber =", "SIGN_FLAG_HANGUP = \"hangup_reason\" SIGN_FLAG_R_BYE = 'recv_bye' SIGN_FLAG_S_BYE = \"send_bye\" SIGN_FLAG_CANCEL = 'cancel' SIGN_FLAG_R_INVITE", "确定新的目录,以源日志文件名作为目录名 if not fileName: #orgLogFileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) orgLogFileNames =", "!= 4: if f not in ignoreLinesDict: ignoreLinesDict[f] = {} else: ignoreLinesDict[f][i] =", "# 若有号码变换,需要取变换的号码 res = self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*) Action transfer\\((\\d*) XML default\\)\", sessLog, 3) if", "return False def __matchCsStateChange(self, keyInfoList, fromState, toState): \"\"\"CS状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值:", "__showAnalysisResultHeader(self, targConclusion=\"\"): s = \"%-30s %-36s %-30s %-7s %-s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\",", "in keyInfoList: if x[2] == self.SIGN_FLAG_CHAN: reExpr = \"(\" + fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\") +", "in enumerate(keyInfoList) if x[2] == flag and \\ ((len(x[3]) >= 1 and param1.strip()", "\"calling_0\":True,} case_ringing_180 = {\"proceeding_180\":True,} case_ringing_183 = {\"proceeding_183\":True,} case_ringinged_180 = {\"DOWN__RINGING\":True,} case_ringinged_183 = {\"DOWN__EARLY\":True,}", "\"180\"), \"proceeding_183\": self.__matchChannelStateCode(keyInfoList, \"183\"), \"completing_200\": self.__matchChannelStateDesc(keyInfoList, \"completing\"), \"completed_200\": self.__matchChannelStateDesc(keyInfoList, \"completed\"), \"ready_200\": self.__matchChannelStateDesc(keyInfoList, \"ready\"),", "else reason) else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) reason = res[1]", "= {} return super(FsLogAnalyzer, self).clear() def getSessInfo(self, UUID = \"\", key = \"\"):", "若输入了callNumber if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID, conclusion):", "== c if callNumber else True): fileName = name or ((callNumber or c)", "= sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList = sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0]) for f, logDict in fileList: logList", "\" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # 判断挂断原因 res", "if not name: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames = sessUUID", "for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if", "{} self.__ignoreLinesDict = {} if PY2: return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS) else: return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS)", "\"\", callNumber = \"\", conclusion = \"\"): s = \"-\" * 160 +", "mod=\"normal\"): l = [(i, x) for i, x in enumerate(keyInfoList) if x[2] ==", "okCount) if self.outputEx(outputPath, fileName, context): return 1, outputPath, [fileName] else: return 0, outputPath,", "key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber if (callNumber ==", "else (\"\",\"\",\"\",\"\",\"\",\"\") note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s = \"\" if mode in ['normal']: s", "fileNames + self.OUTPUT_POSTFIX_RESULT context = \"\" warningCount, errorCount, okCount = 0, 0, 0", "if not callNumber: # 确定新的目录,以源日志文件名作为目录名 if not fileName: #orgLogFileNames = \"_\".join([os.path.split(p)[-1] for p", "3, [0], self.SIGN_FLAG_R_BYE), (\"Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_HANGUP), (\"Sending BYE to(.*)\",", "% (\"结果\", conclusion) s += \"%-16s: %-s\\n\" % (\"消息流\", self.showNote(note)) s += \"\\n\"", "s and c.upper() in ['OK']: okCount += 1 else: for sessUUID in sessLogInfoDict.keys():", "(\"\", \"\") if reason: note += \"{[\" + st + \"]\" + reason", "= outputPath fileNameList = [] # 输出的文件列表 # 如果存在UUID(只输出一个文件) if sessUUID: if sessLogInfoDict.get(sessUUID,", "sessLen, widgetType = \"percent\", begin=50, end=100) keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if sessUUID == \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\":", "+ okCount + warningCount, \"告警\", warningCount, \"失败\", errorCount, \"成功\", okCount) return s def", "SIGN_FLAG_CHAN = \"chan proc\" SIGN_CHAN_CALLING = 'calling' SIGN_CHAN_PROCEDDING = 'proceeding' SIGN_CHAN_COMPLETE = 'completing'", "# 会话关键信息收集 def __sessKeyInfoCollect(self): \"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如: State Change CS_CONSUME_MEDIA -> CS_EXECUTE 为核心层状态机迁移", "invite-> (183<- or 180<-) 错误应答<- elif self.caseMatch(detailsDict, case_hangup_180) or self.caseMatch(detailsDict, case_hangup_183): note +=", "return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = desc) # 分析会话过程 def __sessAnalysis(self): \"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表:", "在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如: State Change CS_CONSUME_MEDIA -> CS_EXECUTE 为核心层状态机迁移 -- CS类 Callstate Change ACTIVE", "例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080 Standard INIT # 找到第一个空格,左边就是会话ID,右边就是日志信息 pos = line.find('", "res[1] flag = True # 若有号码变换,需要取变换的号码 res = self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*) Action transfer\\((\\d*) XML", "= self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) st, reason = res if res else", "1 if context: context = self.__getOutputResultHeader() + context context += self.__getOutputResultTail(warningCount, errorCount, okCount)", "'cancel' SIGN_FLAG_R_INVITE = \"recv_invite\" # SIP信令 SIP_INVITE = 'INVITE' SIP_CANCEL = 'CANCEL' SIP_BYE", "= time.clock() s = \"正在收集会话信息...\" PRINT(s, end='') self.__sessCollect() time2 = time.clock() s =", "or getColor(\"{0:<20}\".format(\"null\"), color='gray', need=show), conclusion, note) return s, conclusion def __showAnalysisResultBody(self, sessUUID, targConclusion", "self.__sessAnalysis() # 运行 def run(self, mode = \"Normal\"): time1 = time.clock() s =", "color='green') s = \"正在提取号码...\" PRINT(s, end='') self.__getCallNumber() time3 = time.clock() s = \"OK", "INPUT, getColor if PY2: from analyzer import LogAnalyzer else: from analyzer.analyzer import LogAnalyzer", "= 'fs' # 会话类字典key SESS_FS_CALLNUMBER_DK = \"callNumber\" # 抽取出的关键信息分类 SIGN_FLAG_CHAN = \"chan proc\"", "异常: 无 \"\"\" if flag in [self.SIGN_FLAG_CHAN]: if context[0] in [self.SIGN_CHAN_CALLING]: return self.MOD_FS,", "newPath = os.path.join(outputPath, name) # 创建新的目录,若存在则删除 self.makeDir(newPath) for sessUUID in sessLogInfoDict.keys(): logDict =", "invite-> (183<- or 180<-) 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \" ->", "result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常: 无 \"\"\" return self.__sessLogInfoDict def getIgnoreLinesDict(self): \"\"\"获取忽略的行字典 在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中 参数列表: 无", "{\"EARLY__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_hangup_invite = {\"DOWN__HANGUP\":True,} case_hangup_180 = {\"RINGING__HANGUP\":True,} case_hangup_183 = {\"EARLY__HANGUP\":True,} case_hangup_acitve", "{}).get(k[1], \"\")) res = self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime, 7) if res: signTimePrev = signTimeThis", "无 返回值: 忽略的行字典 例如: {文件索引:{行数:日志}} 异常: 无 \"\"\" return self.__ignoreLinesDict def load(self, path,", "if flag: break # 没有找到号码,可能是日志文件的格式发生了变化 else: #print \"Not find the call number. UUID:%s\"", "self.MOD_OUTSIDE, self.SIP_BYE else: pass return '', '', '' # 按照会话,收集日志信息 def __sessCollect(self): \"\"\"按照UUID收集会话日志", "self.__sessCollect() time2 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time2 - time1) PRINT(s,", "Change ACTIVE -> HANGUP 为呼叫层状态机迁移 -- call类 entering state [proceeding][180] 为收响应消息的处理 -- channel类", "note += \"[NOT COMPLETE\" note += \"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict", "audioPayLoad, \"ptime\", audioPTime) res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) reason = res[1]", "(.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_R_BYE), (\"Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_HANGUP),", "else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID +", "\"\"): \"\"\"清理FS的日志 参数列表: UUID:会话的UUID key:内部的字典名 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" sessDict", "fileNames = sessUUID + callNumber + conclusion + \"_tmp\" fileName = \"Result\" +", "True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID", "return super(FsLogAnalyzer, self).load(path, rl) def clear(self): \"\"\"清理FS的日志 参数列表: 无 返回值: 成功标志和错误信息 元组(bool, str)", "s = \"OK (耗时:%.2f秒)\" % (time4 - time3) PRINT(s, color='green') return True, \"\"", "showResult(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return self.__showResult(sessUUID =", "param1 != \"\" else True) and \\ ((len(x[3]) >= 2 and param2.strip() ==", "not fileName: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames = sessUUID +", "= sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0]) for f, logDict in fileList: logList = sorted(logDict.items(), key=lambda", "\"\"\"信令的收发方向(用于上层显示输出) 参数列表: flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段 返回值: 元组(FromModule, ToModule, Sign) 异常: 无 \"\"\" if flag", "= self.__getAnalysisResultBody(sessUUID, conclusion, show=False) context += s if s and c.upper() in ['ERROR']:", "return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_TERMINATED]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif", "\"callNumber\" # 抽取出的关键信息分类 SIGN_FLAG_CHAN = \"chan proc\" SIGN_CHAN_CALLING = 'calling' SIGN_CHAN_PROCEDDING = 'proceeding'", "for sessUUID in sessLogInfoDict.keys(): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if", "s def __showDetailsBody(self, sessUUID = \"\", targConclusion = \"\"): s = self.getDetails(sessUUID, targConclusion)", "in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag = False for l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): # 取一行日志 sessLog =", "detailsDict sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion # print \"\\n\", sessLogInfoDict[sessUUID][\"callNumber\"], sessLogInfoDict[sessUUID][\"result\"][\"conclusion\"], note,#, \"\\n\", keyInfoList, \"\\n\",detailsDict,", "+ sessUUID + self.OUTPUT_POSTFIX_LOG) if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) #", "res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) reason = res[1] if res else", "int(res[2]), int(res[3]), int(res[4]), int(res[5])) if signTimePrev and (signTimeThis - signTimePrev).seconds > 4: s", "7) if res: signTimePrev = signTimeThis signTimeThis = datetime(int(res[0]), int(res[1]), int(res[2]), int(res[3]), int(res[4]),", "= self.inputContinue(i, count, total, flag, self.__showDetailsHeader) if not continueRet: break # 显示尾 self.__showDetailsTail(count)", "sessUUID = \"\", callNumber = \"\", name = \"\"): sessLogInfoDict = self.getSessLogInfoDict() newPath", "c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c if callNumber else True): fileName =", "\"ERROR\" note += \"(recv %s)\" % detailsDict[\"terminated_list\"][0][1] else: conclusion = \"WARNING\" note +=", "or line[0:pos].count('-') != 4: if f not in ignoreLinesDict: ignoreLinesDict[f] = {} else:", "CID from \\\"(.*)\\\" \\<(.*)\\> to \\\"(.*)\\\" \\<(.*)\\>\", 4, [], self.SIGN_FLAG_CALLNUMBER), # 呼叫号码 (\"952", "(\"挂断原因\", res[1] if res else reason) signTimePrev = None signTimeThis = None if", "Exception as Err: s = str(Err, reExpInfo[i], res) PRINT(s) raise res = tuple(l)", "LogAnalyzer.load(self, path, rl) else: return super(FsLogAnalyzer, self).load(path, rl) def clear(self): \"\"\"清理FS的日志 参数列表: 无", "\"OK\" note = \"[CALLING\" + (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and \"(R)\" or \"(S)\") # invite->", "\"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState, toState) def __fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode): \"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表:", "= {\"RINGING__HANGUP\":True,} case_hangup_183 = {\"EARLY__HANGUP\":True,} case_hangup_acitve = {\"ACTIVE__HANGUP\":True,} case_r_183 = {\"proceeding_183\":True,} # invite->", "PRINT(s) def __showResult(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): sessLogInfoDict", "# 需要匹配的正则表达式 reExpInfo = [ (\"State Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CS),", "# 会话类字典key SESS_FS_CALLNUMBER_DK = \"callNumber\" # 抽取出的关键信息分类 SIGN_FLAG_CHAN = \"chan proc\" SIGN_CHAN_CALLING =", "无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 需要匹配的正则表达式 reExpInfo =", "for x in keyInfoList: if x[2] == self.SIGN_FLAG_CHAN: reExpr = \"(\" + fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\",", "c, sessUUID)): fileNameList.append(fileName) # 不存在UUID(可能输出多个文件) else: # 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if not name: #fileNames =", "\"HANGUP\"), \"RINGING__HANGUP\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"HANGUP\"), \"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList, \"ACTIVE\", \"HANGUP\"), \"calling_0\": self.__matchChannelStateDesc(keyInfoList, \"calling\"), \"proceeding_180\":", "count, sessUUID = \"\", callNumber = \"\", conclusion = \"\"): s = \"-\"", "False)) for UUID in sessDict.keys()] def getLogDict(self, UUID = \"\"): \"\"\"获取日志字典 参数列表: UUID:会话的UUID", "else True) and \\ ((len(x[3]) >= 2 and param2.strip() == x[3][1].strip()) if param2", "+= \"%-4s %-35s %-16s %-16s %s\\n\\n\" % (\"序号\",\"信令时间\", \"源日志行号\", \"消息类型\", \"详情\") l =", "\"(S)\" or \"(R)\") # invite-> (bye-> or 错误应答<-) elif self.caseMatch(detailsDict, case_hangup_invite): if self.caseMatch(detailsDict,", "= self.getSessLogInfoDict() # 显示头 self.__showDetailsHeader() # 显示Body count = 0 if sessUUID: #", "self.MOD_FS, self.SIP_BYE elif flag in [self.SIGN_FLAG_CANCEL]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL elif flag in", "= \"(\" + fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\") + \")\" res = self.reMatch(reExpr, x[3][1], 1) res", "else: #print \"Not find the call number. UUID:%s\" % sessUUID pass else: pass", "else: for sessUUID in sessLogInfoDict.keys(): if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True):", "\"(recv %s)\" % detailsDict[\"terminated_list\"][0][1] else: conclusion = \"WARNING\" note += \"[NOT COMPLETE\" note", "in [self.SIGN_CHAN_COMPLETE]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_TERMINATED]: return self.MOD_OUTSIDE, self.MOD_FS,", "'' # 按照会话,收集日志信息 def __sessCollect(self): \"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表: 无 返回值: 成功解析的会话日志字典和无法解析的会话日志字典", "\\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_R_BYE), (\"Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_HANGUP), (\"Sending", "x in enumerate(keyInfoList) if x[2] == flag and \\ ((len(x[3]) >= 1 and", "参数列表: flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段 返回值: 元组(FromModule, ToModule, Sign) 异常: 无 \"\"\" if flag in", "self.caseMatch(detailsDict, case_hangup_183): note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or", "'CANCEL' SIP_BYE = 'BYE' # 匹配模式 MATCH_MOD_NORMAL = \"normal\" MATCH_MOD_EXTEND = \"extend\" MATCH_MOD_DETAILS", "fromState, toState): \"\"\"CS状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\"", "logDict: return s, conclusion callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion", "'', '' # 按照会话,收集日志信息 def __sessCollect(self): \"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表: 无 返回值:", "分析会话日志 def __analysis(self): self.__sessKeyInfoCollect() self.__sessAnalysis() # 运行 def run(self, mode = \"Normal\"): time1", "\"\", param2 = \"\", f = -1, l = -1, mod=\"normal\"): l =", "返回值: 呼叫号码 str 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK) def getResultDict(self, UUID =", "\"core sm\" SIGN_FLAG_RTP = \"rtp\" SIGN_FLAG_CALLNUMBER = \"callnumber\" SIGN_FLAG_HANGUP = \"hangup_reason\" SIGN_FLAG_R_BYE =", "7, [0], self.SIGN_FLAG_RTP), # RTP通道信息 (\"Flipping CID from \\\"(.*)\\\" \\<(.*)\\> to \\\"(.*)\\\" \\<(.*)\\>\",", "\"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState, toState) def __matchCallStateChange(self, keyInfoList, fromState, toState): \"\"\"call状态变迁匹配 参数列表:", "== self.SIGN_FLAG_CHAN: reExpr = \"(\" + fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\") + \")\" res = self.reMatch(reExpr,", "conclusion = \"ERROR\" note += \"(recv %s)\" % detailsDict[\"terminated_list\"][0][1] else: conclusion = \"WARNING\"", "= \"\", targConclusion = \"\", mode = \"normal\"): sessLogInfoDict = self.getSessLogInfoDict() if not", "s = \"OK (耗时:%.2f秒)\" % (time2 - time1) PRINT(s, color='green') s = \"正在提取号码...\"", "(%s:%s %s:%s)\\n\" % (\"媒体信息\", \"本端地址\", locIp, locPort, \"远端地址\", RmtIp, RmtPort, \"Payload\", audioPayLoad, \"ptime\",", "\"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\" % (\"总计\", errorCount + okCount + warningCount, \"告警\", warningCount, \"失败\", errorCount, \"成功\",", "logDict, callNumber, sessUUID): s = \"呼叫号码:%s\\nUUID:%s\\n\" % (callNumber, sessUUID) return s def __outputOriginLog(self,", "keyInfoList:关键信息列表 fuzzyCode:模糊状态码 返回值: 匹配到的值 异常: 无 \"\"\" codeList = [] for x in", "not name: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames = sessUUID +", "for sessUUID in sessLogInfoDict.keys(): sessDict = sessLogInfoDict[sessUUID] if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber", "expLen, dropPos, flag in reExpInfo: res = self.reMatch(reExpr, log, expLen) if res: l", "+ callNumber + \"_tmp\" newPath = os.path.join(outputPath, fileNames) else: newPath = os.path.join(outputPath, name)", "= conclusion) # ----------------------------------------------输出简单分析结果到文件---------------------------------------------- def __getOutputResultHeader(self): s = \"%-30s %-36s %-30s %-6s %s\\n\"", "targConclusion.upper() not in conclusion.upper(): return \"\" logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if", "= sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber或UUID则认为需要过滤 if", "self.SIGN_FLAG_CALLNUMBER, mod = self.MATCH_MOD_DETAILS) disFrom, numberFrom, disTo, numberTo = res if res else", "若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c if callNumber else True): fileName", "total = len(sessLogInfoDict) flag = False sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i,", "in sessDict.keys()] def getLogDict(self, UUID = \"\"): \"\"\"获取日志字典 参数列表: UUID:会话的UUID 返回值: 日志字典 参照__sessLogInfoDict定义", "UUID = \"\"): \"\"\"获取关键信息列表 参数列表: UUID:会话的UUID 返回值: 关键信息 [(文件索引,行数,状态类型,(信息)),] 异常: 无 \"\"\" return", "__matchCsStateChange(self, keyInfoList, fromState, toState): \"\"\"CS状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常:", "or ((callNumber or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG) if self.output(logDict, newPath,", "+= \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") else: #", "keyInfoList.append((f, line, flag, res)) break else: sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList def __match(self, keyInfoList, flag,", "参数列表: path:日志路径 rl:是否重新加载 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" if PY2: return", "sessUUID = \"\", callNumber = \"\", conclusion = \"\"): return \"\" def getDetails(self,", "\"\"\" codeList = [] for x in keyInfoList: if x[2] == self.SIGN_FLAG_CHAN: reExpr", "in [self.SIGN_FLAG_S_BYE]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE else: pass return '', '', '' #", "self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) reason = res[1] if res else \"\" if", "conclusion) # ----------------------------------------------输出简单分析结果到文件---------------------------------------------- def __getOutputResultHeader(self): s = \"%-30s %-36s %-30s %-6s %s\\n\" %", "conclusion = \"\"): return \"\" def getDetails(self, sessUUID = \"\", targConclusion = \"\",", "x) for i, x in enumerate(keyInfoList) if x[2] == flag and \\ ((len(x[3])", "输出分段,提示是否继续显示内容 continueRet, flag = self.inputContinue(i, count, total, flag, self.__showAnalysisResultHeader, conclusion) if not continueRet:", "= len(self.getLines()) process = 0 for f, lines in enumerate(self.getLines()): process = self.printProc(process,", "if not logDict or not keyInfoList: return \"\" res = self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod", "+ targConclusion + self.OUTPUT_POSTFIX_DETAILS if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) return len(fileNameList), newPath,", "expLen) if res: l = list(res) for dPos in [x for x in", "continueRet, flag = self.inputContinue(i, count, total, flag, self.__showDetailsHeader) if not continueRet: break #", "# invite-> (bye-> or 错误应答<-) elif self.caseMatch(detailsDict, case_hangup_invite): if self.caseMatch(detailsDict, case_r_183): note +=", "# ----------------------------------------------显示分析结果---------------------------------------------- def __showAnalysisResultHeader(self, targConclusion=\"\"): s = \"%-30s %-36s %-30s %-7s %-s\\n\" %", "else: return [(UUID, sessDict[UUID].get(key, False)) for UUID in sessDict.keys()] def getLogDict(self, UUID =", "continueRet, flag = self.inputContinue(i, count, total, flag, self.__showAnalysisResultHeader, conclusion) if not continueRet: break", "return any(l) elif mod in [self.MATCH_MOD_EXTEND]: return (l[0][1][0], l[0][1][1], l[0][0]) if any(l) else", "返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" sessDict = self.getSessLogInfoDict() if UUID: if", "case_hangup_invite = {\"DOWN__HANGUP\":True,} case_hangup_180 = {\"RINGING__HANGUP\":True,} case_hangup_183 = {\"EARLY__HANGUP\":True,} case_hangup_acitve = {\"ACTIVE__HANGUP\":True,} case_r_183", "if not continueRet: break # 显示尾 self.__showAnalysisResultTail(count, conclusion) return count def showResult(self, sessUUID", "不存在UUID(可能输出多个文件) else: # 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名 if not name: #fileNames = \"_\".join([os.path.split(p)[-1] for p in", "= \".result\" OUTPUT_POSTFIX_DETAILS = \".details\" def __init__(self): self.__sessLogInfoDict = {} self.__ignoreLinesDict = {}", "0 for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\", begin=50,", "参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 需要匹配的正则表达式 reExpInfo", "for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\", begin=0, end=50)", "or (sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS) if", "FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表: 无 返回值: 成功解析的会话日志字典和无法解析的会话日志字典 dict,dict 异常: 无 \"\"\" ignoreLinesDict =", "in conclusion.upper(): color = conclusion.upper() in ['ERROR'] and 'red' or \\ conclusion.upper() in", "[self.SIGN_FLAG_CANCEL]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL elif flag in [self.SIGN_FLAG_S_BYE]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE", "+ (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # invite-> (bye-> or 错误应答<-) elif", "if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) return len(fileNameList), newPath, fileNameList def", "process = 0 for f, lines in enumerate(self.getLines()): process = self.printProc(process, fileLen) for", "\\[(.*)\\]\", 3, [0], self.SIGN_FLAG_R_BYE), (\"Hangup (.*) \\[(.*)\\] \\[(.*)\\]\", 3, [0], self.SIGN_FLAG_HANGUP), (\"Sending BYE", "self.printList(sessUUIDList, 4, \"UUID列表:\", \"总数:%d\" % len(sessUUIDList)) # 显示呼叫号码列表 def showCallNumberList(self): # 呼叫号码 callNumberList", "s += \"%-16s: %s\\n\" % (\"挂断原因\", res[1] if res else reason) else: res", "# 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if pos is -1 or pos < 36 or line[0:pos].count('-') !=", "\"CS_EXECUTE\"), \"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"RINGING\"), \"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"EARLY\"), \"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"ACTIVE\"),", "+ (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # 判断挂断原因 res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP,", "sessUUID: # 若输入了callNumber则认为需要过滤 if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and", "__sessLogInfoDict = {}# 按照会话归类的日志信息 # {会话UUID:{log:{文件索引:{行数:日志}}, # callNumber:呼叫号码, # result:{分析结果}, # keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]} #", "# 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080 Standard INIT # 找到第一个空格,左边就是会话ID,右边就是日志信息 pos =", "[x for x in sorted(dropPos, reverse=True) if dropPos and x < len(res)]: try:", "\"\"): \"\"\"获取呼叫号码 参数列表: UUID:会话的UUID 返回值: 呼叫号码 str 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK)", "end=100) keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if sessUUID == \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": # print(\"\\n\") # for k", "self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} if PY2: return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS) else: return", "sessUUID = \"\", callNumber = \"\", conclusion = \"\"): s = \"-\" *", "= name or ((callNumber or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG) if", "continue # 拆分出UUID和日志信息 sessUUID, sessLog = line[0:pos], line[pos + 1:-1] # 按照UUID归类存放日志信息 if", "mod in [self.MATCH_MOD_NORMAL]: return any(l) elif mod in [self.MATCH_MOD_EXTEND]: return (l[0][1][0], l[0][1][1], l[0][0])", "res[2] flag = True break res = self.reMatch(\"<(\\d*)>->(\\d*) in context\", sessLog, 2) if", "outputPath, [] # 输出简单分析结果到文件 def outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\",", "else True) and \\ self.__showDetailsBody(sessUUID, conclusion): count += 1 continueRet, flag = self.inputContinue(i,", "errorCount + okCount + warningCount, \"告警\", warningCount, \"失败\", errorCount, \"成功\", okCount) return s", "res: signTimePrev = signTimeThis signTimeThis = datetime(int(res[0]), int(res[1]), int(res[2]), int(res[3]), int(res[4]), int(res[5])) if", "\"\\n\" # 分析会话日志 def __analysis(self): self.__sessKeyInfoCollect() self.__sessAnalysis() # 运行 def run(self, mode =", "SIGN_FLAG_CALL = \"channel sm\" SIGN_CALL_HANGUP = 'HANGUP' SIGN_FLAG_CS = \"core sm\" SIGN_FLAG_RTP =", "错误应答<- elif self.caseMatch(detailsDict, case_hangup_180) or self.caseMatch(detailsDict, case_hangup_183): note += \" -> HANGUP\" +", "\"\\n\\n\" s += \"%-4s %-35s %-16s %-16s %s\\n\\n\" % (\"序号\",\"信令时间\", \"源日志行号\", \"消息类型\", \"详情\")", "if context: context = self.__getOutputResultHeader() + context context += self.__getOutputResultTail(warningCount, errorCount, okCount) if", "\\\"(.*)\\\" \\<(.*)\\> to \\\"(.*)\\\" \\<(.*)\\>\", 4, [], self.SIGN_FLAG_CALLNUMBER), # 呼叫号码 (\"952 Hangup (.*)", "if dropPos and x < len(res)]: try: del l[dPos] except Exception as Err:", "UUID, None else: return [(UUID, sessDict[UUID].get(key, False)) for UUID in sessDict.keys()] def getLogDict(self,", "= \"WARNING\" note += \"[NOT COMPLETE\" note += \"]\" sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK]", "sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber ==", "callNumber if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK]", "% (\"总计\", errorCount + okCount + warningCount, \"告警\", warningCount, \"失败\", errorCount, \"成功\", okCount)", "self.__ignoreLinesDict = ignoreLinesDict for sessUUID in sessLogInfoDict.keys(): if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: print(sessUUID, \"\\nis", "((callNumber or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG) if self.output(logDict, newPath, fileName,", "返回值: 关键信息 [(文件索引,行数,状态类型,(信息)),] 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_KEYINFO_DK) def getSignInfo(self, flag, context):", "成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState, toState) def __matchCallStateChange(self, keyInfoList,", "[CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] -- 挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict", "\"正在收集会话信息...\" PRINT(s, end='') self.__sessCollect() time2 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time2", "in keyInfoList: # print(k) conclusion = \"\" note = \"\" detailsDict = {", "['ERROR'] and 'red' or \\ conclusion.upper() in ['WARNING'] and 'yellow' or \\ conclusion.upper()", "in [self.SIGN_CHAN_TERMINATED]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif flag in [self.SIGN_FLAG_R_BYE]: return self.MOD_OUTSIDE, self.MOD_FS,", "UUID = \"\"): \"\"\"获取结果字典 参数列表: UUID:会话的UUID 返回值: 结果字典 {'conclusion':\"\", 'details':{}, 'note':\"\"} 异常: 无", "{\"EARLY__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_183 = {\"EARLY__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_hangup_invite = {\"DOWN__HANGUP\":True,} case_hangup_180 =", "\"ERROR\" else: res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS) st, reason = res", "callNumber + \"_tmp\" newPath = os.path.join(outputPath, fileNames) else: newPath = os.path.join(outputPath, name) #", "newPath, fileNameList def outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\", name =", "self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL elif flag in [self.SIGN_FLAG_S_BYE]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE else: pass", "= 'BYE' # 匹配模式 MATCH_MOD_NORMAL = \"normal\" MATCH_MOD_EXTEND = \"extend\" MATCH_MOD_DETAILS = \"details\"", "!= -1 else True)] if mod in [self.MATCH_MOD_NORMAL]: return any(l) elif mod in", "[(i, x) for i, x in enumerate(keyInfoList) if x[2] == flag and \\", "UUID: if sessDict.get(UUID, False): return UUID, sessDict[UUID].get(key, False) else: return UUID, None else:", "self.caseMatch(detailsDict, case_ringinged_183): note += \" -> RINGING\" # invite-> (183<- or 180<-) 200<-", "\"\" # 获取UUID列表 def getSessUUIDList(self): sessLogInfoDict = self.getSessLogInfoDict() return sessLogInfoDict.keys() # 获取呼叫号码列表 def", "sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber if (callNumber", "1, signTime, str(k[1]), str(k[2]), str(k[3])) else: s += \"\\n\" return s def __showDetailsBody(self,", "\"\" if reason: res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS)", "if PY2: return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS) else: return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS) def getSessLogInfoDict(self): \"\"\"获取会话信息字典 参数列表:", "# ----------------------------------------------输出简单分析结果到文件---------------------------------------------- def __getOutputResultHeader(self): s = \"%-30s %-36s %-30s %-6s %s\\n\" % (\"呼叫开始时间\",", "None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK]", "160 + \"\\n\" s += \"\\n总数:%d\" % count PRINT(s) return s def __showDetails(self,", "\"\\\\d\") + \")\" res = self.reMatch(reExpr, x[3][1], 1) res and codeList.append(x[3]) return codeList", "\" -> TALKING\" # invite-> 200<- bye<-> if self.caseMatch(detailsDict, case_hangup_acitve): note += \"", "res else (\"\",\"\",\"\",\"\",\"\",\"\") note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s = \"\" if mode in ['normal']:", "\"总数:%d\" % len(tmp)) # 重复的呼叫号码 dupl = self.findDupl(callNumberList) len(dupl) and self.printList(dupl, 8, \"重复的号码:\",", "= \"\", name = \"\"): return self.__outputOriginLog(outputPath, sessUUID = sessUUID, callNumber = callNumber,", "sessLog = line[0:pos], line[pos + 1:-1] # 按照UUID归类存放日志信息 if sessUUID in sessLogInfoDict: if", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList = sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0]) for f, logDict in fileList: logList =", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag = False for l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): # 取一行日志 sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l]", "= {\"proceeding_183\":True,} # invite-> if self.caseMatch(detailsDict, case_calling_invite) or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE): conclusion = \"OK\"", "创建新的目录,若存在则删除 self.makeDir(newPath) for sessUUID in sessLogInfoDict.keys(): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c =", "time2 = time.clock() s = \"OK (耗时:%.2f秒)\" % (time2 - time1) PRINT(s, color='green')", "异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_LOG_DK) def getCallNumber(self, UUID = \"\"): \"\"\"获取呼叫号码 参数列表:", "= \"channel sm\" SIGN_CALL_HANGUP = 'HANGUP' SIGN_FLAG_CS = \"core sm\" SIGN_FLAG_RTP = \"rtp\"", "(耗时:%.2f秒)\" % (time2 - time1) PRINT(s, color='green') s = \"正在提取号码...\" PRINT(s, end='') self.__getCallNumber()", "case_ringing_183 = {\"proceeding_183\":True,} case_ringinged_180 = {\"DOWN__RINGING\":True,} case_ringinged_183 = {\"DOWN__EARLY\":True,} case_ringing_183_180 = {\"DOWN__EARLY\":True, \"proceeding_183\":True,", "'green' conclusion = getColor(\"{0:<7}\".format(conclusion), color = color) s += \"%-30s %-36s %-30s %-7s", "%-36s %-30s %-6s %s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") return s def", "\"details\" # 输出文件 OUTPUT_POSTFIX_LOG = \".log\" OUTPUT_POSTFIX_RESULT = \".result\" OUTPUT_POSTFIX_DETAILS = \".details\" def", "c = self.__getAnalysisResultBody(sessUUID, conclusion, show=False) context += s if s and c.upper() in", "(callNumber, sessUUID) return s def __outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\",", "self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) else: if not callNumber: # 确定新的目录,以源日志文件名作为目录名 if not fileName: #orgLogFileNames", "标志性处理类的状态 case_calling_invite = {\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,} case_ringing_180 = {\"proceeding_180\":True,} case_ringing_183 = {\"proceeding_183\":True,} case_ringinged_180", "res = self.reMatch(\"New Channel sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1]", "拆分出UUID和日志信息 sessUUID, sessLog = line[0:pos], line[pos + 1:-1] # 按照UUID归类存放日志信息 if sessUUID in", "okCount = 0, 0, 0 # 输出到文件 if sessUUID: if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]", "State Change CS_CONSUME_MEDIA -> CS_EXECUTE 为核心层状态机迁移 -- CS类 Callstate Change ACTIVE -> HANGUP", "= sessUUID, callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------输出简单分析结果到文件---------------------------------------------- def __getOutputResultHeader(self): s", "__getOutputResultHeader(self): s = \"%-30s %-36s %-30s %-6s %s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\",", "= \"\"): return self.__outputReslut(outputPath, sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion,", "State Change CS_INIT sessLen = len(sessLogInfoDict) process = 0 for sessUUID in sessLogInfoDict.keys():", "i, (sessUUID, context) in enumerate(sessList): # 若输入了callNumber或UUID则认为需要过滤 if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber", "callNumber else True): fileName = (callNumber or c) + \"__\" + sessUUID +", "+ \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS) if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)): fileNameList.append(newFileName) else:", "callNumber = \"\", conclusion = \"\"): return self.__showDetails(sessUUID = sessUUID, callNumber = callNumber,", "res else (\"\",\"\",\"\",\"\") callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], \"\")) res = self.__match(keyInfoList,", "return \"\" conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if targConclusion.upper() not in conclusion.upper(): return \"\" logDict", "+ \"_tmp\" fileName = \"Result\" + fileNames + self.OUTPUT_POSTFIX_RESULT context = \"\" warningCount,", "\"extend\" MATCH_MOD_DETAILS = \"details\" # 输出文件 OUTPUT_POSTFIX_LOG = \".log\" OUTPUT_POSTFIX_RESULT = \".result\" OUTPUT_POSTFIX_DETAILS", "{}# 忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS = 'fs' # 会话类字典key SESS_FS_CALLNUMBER_DK = \"callNumber\" # 抽取出的关键信息分类 SIGN_FLAG_CHAN", "成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" if PY2: return LogAnalyzer.load(self, path, rl) else:", "s += \"\\n\" return s def __showDetailsBody(self, sessUUID = \"\", targConclusion = \"\"):", "self.SIGN_FLAG_CALL, fromState, toState) def __fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode): \"\"\"通道状态码模糊匹配 模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码 参数列表: keyInfoList:关键信息列表 fuzzyCode:模糊状态码 返回值:", "-*- import os import time import sys from datetime import datetime PY2 =", "toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState, toState) def", "dropPos and x < len(res)]: try: del l[dPos] except Exception as Err: s", "c = self.__getAnalysisResultBody(sessUUID, targConclusion) if s: PRINT(s) return s def __showAnalysisResultTail(self, count, targConclusion=\"\"):", "return 0, outputPath, [] else: return 0, outputPath, [] # 输出简单分析结果到文件 def outputReslut(self,", "\"(S)\" or \"(R)\") # 判断挂断原因 res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) st,", "self.SIGN_FLAG_RTP), # RTP通道信息 (\"Flipping CID from \\\"(.*)\\\" \\<(.*)\\> to \\\"(.*)\\\" \\<(.*)\\>\", 4, [],", "if res else (\"\", \"\") if reason: note += \"{[\" + st +", "['WARNING']: warningCount += 1 elif s and c.upper() in ['OK']: okCount += 1", "sessUUID = sessUUID, callNumber = callNumber, name = name) # ----------------------------------------------输出详细分析结果到文件---------------------------------------------- def __outputDetails(self,", "for x in sorted(dropPos, reverse=True) if dropPos and x < len(res)]: try: del", "and \\ (x[1] >= l if l != -1 else True)] if mod", "fileName = \"\"): return self.__outputReslut(outputPath, sessUUID = sessUUID, callNumber = callNumber, conclusion =", "\\ and self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 else: total = len(sessLogInfoDict) flag =", "= len(sessLogInfoDict) flag = False sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK]) for i, (sessUUID,", "\"ACTIVE__HANGUP\": self.__matchCallStateChange(keyInfoList, \"ACTIVE\", \"HANGUP\"), \"calling_0\": self.__matchChannelStateDesc(keyInfoList, \"calling\"), \"proceeding_180\": self.__matchChannelStateCode(keyInfoList, \"180\"), \"proceeding_183\": self.__matchChannelStateCode(keyInfoList, \"183\"),", "in fileList: logList = sorted(logDict.items(), key=lambda logDict:logDict[0]) for line, log in logList: for", "def clear(self): \"\"\"清理FS的日志 参数列表: 无 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" self.__sessLogInfoDict", "callNumber + conclusion + \"_tmp\" fileName = \"Result\" + fileNames + self.OUTPUT_POSTFIX_RESULT context", "= [] sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath if sessUUID: sessDict = sessLogInfoDict.get(sessUUID,", "+= \"%-16s: %-s\\n\" % (\"呼叫号码\", numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if locIp and RmtIp: s", "= \"\", fileName = \"\"): return self.__outputReslut(outputPath, sessUUID = sessUUID, callNumber = callNumber,", "__showAnalysisResultTail(self, count, targConclusion=\"\"): s = \"\\n总数:%d\" % count PRINT(s) def __showResult(self, sessUUID =", "= self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) st, reason = res if res else", "for sessUUID in sessLogInfoDict.keys(): process = self.printProc(process, sessLen, widgetType = \"percent\", begin=50, end=100)", "self.ANALYZER_TYPE_FS) else: return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS) def getSessLogInfoDict(self): \"\"\"获取会话信息字典 参数列表: 无 返回值: 会话信息字典 例如:", "count += 1 else: total = len(sessLogInfoDict) flag = False sessList = sorted(sessLogInfoDict.items(),", "= sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c if callNumber else True): fileName = (callNumber", "pos is -1 or pos < 36 or line[0:pos].count('-') != 4: if f", "def showSessUUIDList(self): sessUUIDList = self.getSessUUIDList() self.printList(sessUUIDList, 4, \"UUID列表:\", \"总数:%d\" % len(sessUUIDList)) # 显示呼叫号码列表", "fileNames) else: newPath = os.path.join(outputPath, name) # 创建新的目录,若存在则删除 self.makeDir(newPath) for sessUUID in sessLogInfoDict.keys():", "\"\"): s, c = self.__getAnalysisResultBody(sessUUID, targConclusion) if s: PRINT(s) return s def __showAnalysisResultTail(self,", "s def __showAnalysisResultTail(self, count, targConclusion=\"\"): s = \"\\n总数:%d\" % count PRINT(s) def __showResult(self,", "self.SIGN_FLAG_HANGUP), (\"Sending BYE to(.*)\", 1, [0], self.SIGN_FLAG_S_BYE), (\"Sending CANCEL to(.*)\", 1, [0], self.SIGN_FLAG_CANCEL),", "sessUUID + targConclusion + \"_tmp\" newPath = os.path.join(outputPath, orgLogFileNames) else: newPath = os.path.join(outputPath,", "s = \"正在收集会话信息...\" PRINT(s, end='') self.__sessCollect() time2 = time.clock() s = \"OK (耗时:%.2f秒)\"", "sessUUID: sessDict = sessLogInfoDict.get(sessUUID, False) if not sessDict: return len(fileNameList), newPath, fileNameList if", "self.getCallNumberList() tmp = set(callNumberList) self.printList(tmp, 8, \"呼叫号码列表:\", \"总数:%d\" % len(tmp)) # 重复的呼叫号码 dupl", "bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState, toState) def __fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode):", "return codeList def __matchChannelStateCode(self, keyInfoList, code): \"\"\"通道状态码匹配 精确匹配状态码 参数列表: keyInfoList:关键信息列表 code:状态码 返回值: 成功或失败", "return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL elif flag in [self.SIGN_FLAG_S_BYE]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE else:", "分析会话过程 def __sessAnalysis(self): \"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict", "x in keyInfoList: if x[2] == self.SIGN_FLAG_CHAN: reExpr = \"(\" + fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\")", "if PY2: return LogAnalyzer.load(self, path, rl) else: return super(FsLogAnalyzer, self).load(path, rl) def clear(self):", "or 180<-) 200<- if self.caseMatch(detailsDict, case_answer_180) or self.caseMatch(detailsDict, case_answerd_180) or \\ self.caseMatch(detailsDict, case_answer_183)", "# 呼叫号码 callNumberList = self.getCallNumberList() tmp = set(callNumberList) self.printList(tmp, 8, \"呼叫号码列表:\", \"总数:%d\" %", "keyInfoList, code): \"\"\"通道状态码匹配 精确匹配状态码 参数列表: keyInfoList:关键信息列表 code:状态码 返回值: 成功或失败 bool 异常: 无 \"\"\"", "flag, context): \"\"\"信令的收发方向(用于上层显示输出) 参数列表: flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段 返回值: 元组(FromModule, ToModule, Sign) 异常: 无 \"\"\"", "# 判断挂断原因 res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS) st, reason = res", "mode in ['normal']: s += \"%-16s: %-s\\n\" % (\"结果\", conclusion) s += \"%-16s:", "targConclusion = \"\", mode = \"normal\"): sessLogInfoDict = self.getSessLogInfoDict() if not sessLogInfoDict.get(sessUUID, False):", "+= 1 # 输出分段,提示是否继续显示内容 continueRet, flag = self.inputContinue(i, count, total, flag, self.__showAnalysisResultHeader, conclusion)", "end=50) keyInfoList = [] logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList = sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0]) for", "2, [], self.SIGN_FLAG_CHAN), # 收到消息类的日志 (\"Callstate Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CALL),", "+= \"%-16s: %s\\n\" % (\"挂断原因\", res[1] if res else reason) signTimePrev = None", "SIP_BYE = 'BYE' # 匹配模式 MATCH_MOD_NORMAL = \"normal\" MATCH_MOD_EXTEND = \"extend\" MATCH_MOD_DETAILS =", "audioPayLoad, audioPTime = res if res else (\"\",\"\",\"\",\"\",\"\",\"\") note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s =", "ACTIVE -> HANGUP 为呼叫层状态机迁移 -- call类 entering state [proceeding][180] 为收响应消息的处理 -- channel类 AUDIO", "if numberFrom: s += \"%-16s: %-s\\n\" % (\"显示号码\", numberFrom) s += \"%-16s: %-s\\n\"", "or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE): conclusion = \"OK\" note = \"[CALLING\" + (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and", "= sessUUID + callNumber + \"_tmp\" newPath = os.path.join(outputPath, fileNames) else: newPath =", "and \"(R)\" or \"(S)\") # invite-> 200<- if self.caseMatch(detailsDict, case_answer_invite): note += \"", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if (callNumber == c if callNumber else", "in self.getPath()]) orgLogFileNames = callNumber + sessUUID + targConclusion + \"_tmp\" newPath =", "\"\"\" return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK) def getResultDict(self, UUID = \"\"): \"\"\"获取结果字典 参数列表: UUID:会话的UUID 返回值:", "getColor if PY2: from analyzer import LogAnalyzer else: from analyzer.analyzer import LogAnalyzer #", "= callNumber, conclusion = conclusion) # ----------------------------------------------显示分析结果---------------------------------------------- def __showAnalysisResultHeader(self, targConclusion=\"\"): s = \"%-30s", "(\"State Change (.*) -> (.*)\", 2, [], self.SIGN_FLAG_CS), # 状态转移类的日志 (\"entering state \\[(.*)\\]\\[(.*)\\]\",", "not in ignoreLinesDict: ignoreLinesDict[f] = {} else: ignoreLinesDict[f][i] = line continue # 拆分出UUID和日志信息", "3, [0], self.SIGN_FLAG_HANGUP), (\"Sending BYE to(.*)\", 1, [0], self.SIGN_FLAG_S_BYE), (\"Sending CANCEL to(.*)\", 1,", "load(self, path, rl=False): \"\"\"加载FS的日志 参数列表: path:日志路径 rl:是否重新加载 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无", "return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE elif context[0] in [self.SIGN_CHAN_PROCEDDING]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif", "sessLogInfoDict = self.getSessLogInfoDict() if not sessLogInfoDict.get(sessUUID, False): return \"\" conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if", "\"\"): \"\"\"获取日志字典 参数列表: UUID:会话的UUID 返回值: 日志字典 参照__sessLogInfoDict定义 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_LOG_DK)", "fileName = (callNumber or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG if self.output(logDict,", "* 160 + \"\\n\" s += \"\\n\" + \"{0:*^160}\".format(\" 基本信息 \") + \"\\n\\n\"", "= sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if targConclusion.upper() in conclusion.upper(): color", "outputPath, sessUUID = \"\", callNumber = \"\", conclusion = \"\", fileName = \"\"):", "= \"\", callNumber = \"\", conclusion = \"\"): s = \"-\" * 160", "(self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") else: # invite-> (183<- or 180<-) if", "+ fileNames + self.OUTPUT_POSTFIX_RESULT context = \"\" warningCount, errorCount, okCount = 0, 0,", "sessUUID = \"\", targConclusion=\"\"): fileNameList = [] sessLogInfoDict = self.getSessLogInfoDict() newPath = outputPath", "newFileName = fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" + targConclusion", "UUID:%s\" % sessUUID pass else: pass # 会话关键信息收集 def __sessKeyInfoCollect(self): \"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如:", "= \"Result\" + fileNames + self.OUTPUT_POSTFIX_RESULT context = \"\" warningCount, errorCount, okCount =", "\"呼叫号码\", \"结果\", \"备注\") return s def __getOutputResultTail(self, warningCount, errorCount, okCount): s = \"%s:%d\\n%s:%d\\n%s:%d\\n%s:%d\\n\"", "self).clear() def getSessInfo(self, UUID = \"\", key = \"\"): \"\"\"清理FS的日志 参数列表: UUID:会话的UUID key:内部的字典名", "+ str((signTimeThis - signTimePrev).seconds) + \"s\", color=\"red\", need=True)) s += \"{0:^40}\".format(\" ↓ \")", "= self.reMatch(\"<(\\d*)>->(\\d*) in context\", sessLog, 2) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag =", "time1 = time.clock() s = \"正在收集会话信息...\" PRINT(s, end='') self.__sessCollect() time2 = time.clock() s", "-1 else True)] if mod in [self.MATCH_MOD_NORMAL]: return any(l) elif mod in [self.MATCH_MOD_EXTEND]:", "sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]] # 显示UUID列表 def showSessUUIDList(self): sessUUIDList = self.getSessUUIDList() self.printList(sessUUIDList, 4, \"UUID列表:\",", "显示头 self.__showDetailsHeader() # 显示Body count = 0 if sessUUID: # 若输入了callNumber if (callNumber", "outputDetails(self, outputPath, fileName = \"\", sessUUID = \"\", callNumber = \"\", conclusion =", "for f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag = False for l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): # 取一行日志", "\"completed_200\":True, \"ready_200\":True,} case_answer_180 = {\"RINGING__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_180 = {\"RINGING__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_183", "return self.__showResult(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------输出简单分析结果到文件---------------------------------------------- def", "返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState, toState) def __fuzzyMatchChannelStateCode(self,", "self.__showAnalysisResultBody(sessUUID, conclusion): count += 1 # 输出分段,提示是否继续显示内容 continueRet, flag = self.inputContinue(i, count, total,", "def __showAnalysisResultBody(self, sessUUID, targConclusion = \"\"): s, c = self.__getAnalysisResultBody(sessUUID, targConclusion) if s:", "s += \"%-16s: %-s\\n\" % (\"显示号码\", numberFrom) s += \"%-16s: %-s\\n\" % (\"呼叫号码\",", "= os.path.join(outputPath, orgLogFileNames) else: newPath = os.path.join(outputPath, fileName) # 创建新的目录 if not self.makeDir(newPath):", "def getSessUUIDList(self): sessLogInfoDict = self.getSessLogInfoDict() return sessLogInfoDict.keys() # 获取呼叫号码列表 def getCallNumberList(self): sessLogInfoDict =", "\\<(.*)\\> to \\\"(.*)\\\" \\<(.*)\\>\", 4, [], self.SIGN_FLAG_CALLNUMBER), # 呼叫号码 (\"952 Hangup (.*) \\[(.*)\\]", "import os import time import sys from datetime import datetime PY2 = sys.version_info[0]", "\"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"RINGING\"), \"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"ACTIVE\"), \"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"ACTIVE\"), \"DOWN__HANGUP\":", "s += \"-\" * 160 + \"\\n\" s += \"\\n\" + \"{0:*^160}\".format(\" 基本信息", "conclusion = conclusion, fileName = fileName) # ----------------------------------------------输出原始日志到文件---------------------------------------------- def __getOutputHeader(self, logDict, callNumber, sessUUID):", "= fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" + targConclusion +", "self.__showDetails(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------显示分析结果---------------------------------------------- def __showAnalysisResultHeader(self,", "log, expLen) if res: l = list(res) for dPos in [x for x", "\"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 例如 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080) Running State", "pass return '', '', '' # 按照会话,收集日志信息 def __sessCollect(self): \"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用", "callNumber = \"\", conclusion = \"\", fileName = \"\"): sessLogInfoDict = self.getSessLogInfoDict() #", "+ \"\\n\" if k[0] not in l: s += self.getPathEx(k[0]) + \"\\n\" l.append(k[0])", "self.__showAnalysisResultHeader(conclusion) # 显示Body count = 0 if sessUUID: # 若输入了callNumber则认为需要过滤 if (callNumber ==", "logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" +", "\"呼叫号码列表:\", \"总数:%d\" % len(tmp)) # 重复的呼叫号码 dupl = self.findDupl(callNumberList) len(dupl) and self.printList(dupl, 8,", "\"\"\"获取会话信息字典 参数列表: 无 返回值: 会话信息字典 例如: {UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}} 异常: 无 \"\"\"", "in [self.SIGN_CHAN_PROCEDDING]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in [self.SIGN_CHAN_COMPLETE]: return self.MOD_OUTSIDE, self.MOD_FS,", "\"RINGING\"), \"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"ACTIVE\"), \"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"ACTIVE\"), \"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"HANGUP\"),", "# 如果存在UUID(只输出一个文件) if sessUUID: if sessLogInfoDict.get(sessUUID, False): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] # 若输入了号码,则需要过滤号码 c", "\"总数:%d\" % len(sessUUIDList)) # 显示呼叫号码列表 def showCallNumberList(self): # 呼叫号码 callNumberList = self.getCallNumberList() tmp", "显示头 self.__showAnalysisResultHeader(conclusion) # 显示Body count = 0 if sessUUID: # 若输入了callNumber则认为需要过滤 if (callNumber", "sessUUID) if numberFrom: s += \"%-16s: %-s\\n\" % (\"显示号码\", numberFrom) s += \"%-16s:", "not logDict or not keyInfoList: return \"\" res = self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod =", "if any(l) else False elif mod in [self.MATCH_MOD_DETAILS]: return l[0][1][3] if any(l) else", "fileLen = len(self.getLines()) process = 0 for f, lines in enumerate(self.getLines()): process =", "x < len(res)]: try: del l[dPos] except Exception as Err: s = str(Err,", "self.getSessUUIDList() self.printList(sessUUIDList, 4, \"UUID列表:\", \"总数:%d\" % len(sessUUIDList)) # 显示呼叫号码列表 def showCallNumberList(self): # 呼叫号码", "(.*) port (\\d+) codec: (\\d+) ms: (\\d+)\", 7, [0], self.SIGN_FLAG_RTP), # RTP通道信息 (\"Flipping", "else: return False def __matchCsStateChange(self, keyInfoList, fromState, toState): \"\"\"CS状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态", "参数列表: keyInfoList:关键信息列表 fuzzyCode:模糊状态码 返回值: 匹配到的值 异常: 无 \"\"\" codeList = [] for x", "elif self.caseMatch(detailsDict, case_hangup_invite): if self.caseMatch(detailsDict, case_r_183): note += \" -> RINGING(183)\" note +=", "case_hangup_183): note += \" -> HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\")", "keyInfoList = [] logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] fileList = sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0]) for f,", "sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS if self.outputEx(newPath,", "self.getSessLogInfoDict() if not sessLogInfoDict.get(sessUUID, False): return \"\" conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if targConclusion.upper() not", "x[2] == self.SIGN_FLAG_CHAN: reExpr = \"(\" + fuzzyCode.replace(\"X\",\"\\\\d\").replace(\"x\", \"\\\\d\") + \")\" res =", "PRINT(s, color='green') s = \"正在提取号码...\" PRINT(s, end='') self.__getCallNumber() time3 = time.clock() s =", "callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] if targConclusion.upper() in conclusion.upper():", "or 180<-) if self.caseMatch(detailsDict, case_ringing_180) or self.caseMatch(detailsDict, case_ringing_183) or self.caseMatch(detailsDict, case_ringing_183_180) or \\", "== c if callNumber else True): fileName = (callNumber or c) + \"__\"", "- signTimePrev).seconds > 4: s += \"{0:^40}\".format(\" ↑ \") + \"\\n\" s +=", "= self.getSessLogInfoDict() # 确定新的目录,以源日志文件名作为目录名 if not fileName: #fileNames = \"_\".join([os.path.split(p)[-1] for p in", "in sessLogInfoDict.keys(): sessDict = sessLogInfoDict[sessUUID] if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True:", "self.MOD_OUTSIDE, self.SIP_CANCEL elif flag in [self.SIGN_FLAG_S_BYE]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE else: pass return", "# RTP通道信息 (\"Flipping CID from \\\"(.*)\\\" \\<(.*)\\> to \\\"(.*)\\\" \\<(.*)\\>\", 4, [], self.SIGN_FLAG_CALLNUMBER),", "case_answerd_180) or \\ self.caseMatch(detailsDict, case_answer_183) or self.caseMatch(detailsDict, case_answerd_183): note += \" -> TALKING\"", "warningCount, \"告警\", warningCount, \"失败\", errorCount, \"成功\", okCount) return s def __outputReslut(self, outputPath, sessUUID", "if res else \"\" if reason: res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED,", "= {}# 忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS = 'fs' # 会话类字典key SESS_FS_CALLNUMBER_DK = \"callNumber\" # 抽取出的关键信息分类", "= \"\" warningCount, errorCount, okCount = 0, 0, 0 # 输出到文件 if sessUUID:", "= len(line) # 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if pos is -1 or pos < 36 or", "newPath = outputPath if sessUUID: sessDict = sessLogInfoDict.get(sessUUID, False) if not sessDict: return", "if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: print(sessUUID, \"\\nis not get time\") return sessLogInfoDict, ignoreLinesDict #", "(callNumber == c if callNumber else True): fileName = name or ((callNumber or", "# 显示尾 self.__showDetailsTail(count) return count # 按照UUID搜索日志,并显示详细分析信息 def showDetails(self, sessUUID = \"\", callNumber", "conclusion = \"\"): s = \"-\" * 160 + \"\\n\" s += \"\\n总数:%d\"", "count, total, flag, self.__showDetailsHeader) if not continueRet: break # 显示尾 self.__showDetailsTail(count) return count", "17:41:14.701532 [DEBUG] switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080 Standard INIT # 找到第一个空格,左边就是会话ID,右边就是日志信息 pos = line.find(' ') line_len", "if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) # 不存在UUID(可能输出多个文件) else: # 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名", "+ \"__\" + sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS) if self.outputEx(newPath, newFileName,", "== sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s, c = self.__getAnalysisResultBody(sessUUID, conclusion, show=False) context", "Channel sofia\\/(.*)\\/(\\d*)\\@(.*?) \\[\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True", "sorted(logDict.items(), key=lambda logDict:logDict[0]) for line, log in logList: for reExpr, expLen, dropPos, flag", "sessUUID == \"4befcdab-a4cc-4d6a-979f-bbff65d729b0\": # print(\"\\n\") # for k in keyInfoList: # print(k) conclusion", "signTime, str(k[1]), str(k[2]), str(k[3])) else: s += \"\\n\" return s def __showDetailsBody(self, sessUUID", "i, k in enumerate(keyInfoList): signTime = \"%s\" % self.getLogTime(logDict.get(k[0], {}).get(k[1], \"\")) res =", "= sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not logDict or not keyInfoList: return \"\"", "sessUUID in sessLogInfoDict.keys(): if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s, c", "\"\" note = \"\" detailsDict = { \"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList, \"CS_NEW\", \"CS_INIT\"), \"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList,", "datetime PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 from base.base", "self.__matchChannelStateDesc(keyInfoList, \"completed\"), \"ready_200\": self.__matchChannelStateDesc(keyInfoList, \"ready\"), \"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\") +", "if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \\ and self.__showAnalysisResultBody(sessUUID, conclusion): count", "[self.SIGN_FLAG_CHAN]: if context[0] in [self.SIGN_CHAN_CALLING]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE elif context[0] in [self.SIGN_CHAN_PROCEDDING]:", "\"completing_200\":True, \"ready_200\":True,} case_answerd_180 = {\"RINGING__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_183 = {\"EARLY__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_183", "[], self.SIGN_FLAG_CS), # 状态转移类的日志 (\"entering state \\[(.*)\\]\\[(.*)\\]\", 2, [], self.SIGN_FLAG_CHAN), # 收到消息类的日志 (\"Callstate", "if self.caseMatch(detailsDict, case_answer_invite): note += \" -> TALKING\" # invite-> 200<- bye<-> if", "self.__outputReslut(outputPath, sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion, fileName = fileName)", "any(l) else False elif mod in [self.MATCH_MOD_DETAILS]: return l[0][1][3] if any(l) else False", "or \"(S)\") # invite-> 200<- if self.caseMatch(detailsDict, case_answer_invite): note += \" -> TALKING\"", "RmtIp, RmtPort, audioPayLoad, audioPTime = res if res else (\"\",\"\",\"\",\"\",\"\",\"\") note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK]", "if sessUUID: if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True): s, c =", "return self.__outputOriginLog(outputPath, sessUUID = sessUUID, callNumber = callNumber, name = name) # ----------------------------------------------输出详细分析结果到文件----------------------------------------------", "key=lambda logFileDict:logFileDict[0]) for f, logDict in fileList: logList = sorted(logDict.items(), key=lambda logDict:logDict[0]) for", "fileList = sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0]) for f, logDict in fileList: logList = sorted(logDict.items(),", "case_r_183 = {\"proceeding_183\":True,} # invite-> if self.caseMatch(detailsDict, case_calling_invite) or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE): conclusion =", "PRINT(s) return s def __showDetailsTail(self, count, sessUUID = \"\", callNumber = \"\", conclusion", "self.__ignoreLinesDict def load(self, path, rl=False): \"\"\"加载FS的日志 参数列表: path:日志路径 rl:是否重新加载 返回值: 成功标志和错误信息 元组(bool, str)", "__showDetailsBody(self, sessUUID = \"\", targConclusion = \"\"): s = self.getDetails(sessUUID, targConclusion) if s:", "elif flag in [self.SIGN_FLAG_S_BYE]: return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE else: pass return '', '',", "= desc) # 分析会话过程 def __sessAnalysis(self): \"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表: 无 返回值: 无 异常:", "targConclusion = \"\"): s = self.getDetails(sessUUID, targConclusion) if s: PRINT(s) return s def", "logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] newFileName = fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK] + \"__\" + sessUUID +", "%-s\\n\" % (\"消息流\", self.showNote(note)) s += \"\\n\" + \"{0:*^160}\".format(\" 消息交互详情 \") + \"\\n\\n\"", "# invite-> (183<- or 180<-) if self.caseMatch(detailsDict, case_ringing_180) or self.caseMatch(detailsDict, case_ringing_183) or self.caseMatch(detailsDict,", "+ \"\\n\" s += \"\\n总数:%d\" % count PRINT(s) return s def __showDetails(self, sessUUID", "+ self.OUTPUT_POSTFIX_LOG if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)): fileNameList.append(fileName) return len(fileNameList), newPath,", "mod in [self.MATCH_MOD_DETAILS]: return l[0][1][3] if any(l) else False else: return False def", "\"\"\"call状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList,", "res = self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*) Action transfer\\((\\d*) XML default\\)\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]", "例如 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080) Running State Change CS_INIT sessLen = len(sessLogInfoDict)", "path:日志路径 rl:是否重新加载 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" if PY2: return LogAnalyzer.load(self,", "sessLog, 2) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag = True break if flag:", "+ sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)):", "codec: (\\d+) ms: (\\d+)\", 7, [0], self.SIGN_FLAG_RTP), # RTP通道信息 (\"Flipping CID from \\\"(.*)\\\"", "sessDict = self.getSessLogInfoDict() if UUID: if sessDict.get(UUID, False): return UUID, sessDict[UUID].get(key, False) else:", "key=lambda logDict:logDict[0]) for line, log in logList: for reExpr, expLen, dropPos, flag in", "s = str(Err, reExpInfo[i], res) PRINT(s) raise res = tuple(l) keyInfoList.append((f, line, flag,", "if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" if detailsDict[\"terminated_list\"]: conclusion =", "count # 按照UUID搜索日志,并显示详细分析信息 def showDetails(self, sessUUID = \"\", callNumber = \"\", conclusion =", "flag = self.inputContinue(i, count, total, flag, self.__showDetailsHeader) if not continueRet: break # 显示尾", "= \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 确定新的目录,以源日志文件名作为目录名 if not fileName: #fileNames = \"_\".join([os.path.split(p)[-1]", "= {\"DOWN__RINGING\":True,} case_ringinged_183 = {\"DOWN__EARLY\":True,} case_ringing_183_180 = {\"DOWN__EARLY\":True, \"proceeding_183\":True, \"EARLY__RINGING\":True, \"proceeding_180\":True,} case_answer_invite =", "def __matchCallStateChange(self, keyInfoList, fromState, toState): \"\"\"call状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool", "\"\"\" sessDict = self.getSessLogInfoDict() if UUID: if sessDict.get(UUID, False): return UUID, sessDict[UUID].get(key, False)", "-- 挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict()", "= \"%-30s %-36s %-30s %-7s %-s\\n\" % (\"呼叫开始时间\", \"UUID\", \"呼叫号码\", \"结果\", \"备注\") PRINT(s)", "若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录) if pos is -1 or pos < 36 or line[0:pos].count('-') != 4:", "count = 0 if sessUUID: # 若输入了callNumber if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber", "sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is", "f not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog} if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] =", "\"\\n\" s += \"\\n\" + \"{0:*^160}\".format(\" 基本信息 \") + \"\\n\\n\" s += \"%-16s:", "元组(bool, str) 异常: 无 \"\"\" if PY2: return LogAnalyzer.load(self, path, rl) else: return", "无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() sessLen = len(sessLogInfoDict) process = 0 for sessUUID", "+ \"\\n\" l.append(k[0]) s += \"%02d. %-35s %-16s %-16s %s\\n\" % (i +", "errorCount, \"成功\", okCount) return s def __outputReslut(self, outputPath, sessUUID = \"\", callNumber =", "os.path.join(outputPath, name) # 创建新的目录,若存在则删除 self.makeDir(newPath) for sessUUID in sessLogInfoDict.keys(): logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] #", "返回值: 日志字典 参照__sessLogInfoDict定义 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_LOG_DK) def getCallNumber(self, UUID =", "\"\"): sessLogInfoDict = self.getSessLogInfoDict() # 显示头 self.__showAnalysisResultHeader(conclusion) # 显示Body count = 0 if", "getLogDict(self, UUID = \"\"): \"\"\"获取日志字典 参数列表: UUID:会话的UUID 返回值: 日志字典 参照__sessLogInfoDict定义 异常: 无 \"\"\"", "self.getSessLogInfoDict() newPath = outputPath if sessUUID: sessDict = sessLogInfoDict.get(sessUUID, False) if not sessDict:", "= \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1])) callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK]", "if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" else: res = self.__match(keyInfoList,", "keyInfoList: return \"\" res = self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod = self.MATCH_MOD_DETAILS) disFrom, numberFrom, disTo,", "无 \"\"\" if flag in [self.SIGN_FLAG_CHAN]: if context[0] in [self.SIGN_CHAN_CALLING]: return self.MOD_FS, self.MOD_OUTSIDE,", "\"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() sessLen", "sessUUID + callNumber + conclusion + \"_tmp\" fileName = \"Result\" + fileNames +", "fileNameList def outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\", name = \"\"):", "= self.printProc(process, sessLen, widgetType = \"percent\", begin=50, end=100) keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if sessUUID", "= {\"EARLY__HANGUP\":True,} case_hangup_acitve = {\"ACTIVE__HANGUP\":True,} case_r_183 = {\"proceeding_183\":True,} # invite-> if self.caseMatch(detailsDict, case_calling_invite)", "= sessLogInfoDict[sessUUID] if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True: logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]", "desc): \"\"\"通道状态描述匹配 匹配状态描述 参数列表: keyInfoList:关键信息列表 desc:描述 返回值: 成功或失败 bool 异常: 无 \"\"\" return", "无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() sessLen = len(sessLogInfoDict) process", "process = self.printProc(process, sessLen, widgetType = \"percent\", begin=50, end=100) keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] #if", "sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion, fileName = fileName) #", "[DEBUG] switch_core_state_machine.c:40 sofia/external/6010@10.0.7.152:5080 Standard INIT # 找到第一个空格,左边就是会话ID,右边就是日志信息 pos = line.find(' ') line_len =", "----------------------------------------------输出简单分析结果到文件---------------------------------------------- def __getOutputResultHeader(self): s = \"%-30s %-36s %-30s %-6s %s\\n\" % (\"呼叫开始时间\", \"UUID\",", "= \"\", mode = \"normal\"): sessLogInfoDict = self.getSessLogInfoDict() if not sessLogInfoDict.get(sessUUID, False): return", "\"%s \\n\" % getColor(\"{0:^40}\".format(\"时差:\" + str((signTimeThis - signTimePrev).seconds) + \"s\", color=\"red\", need=True)) s", "def __sessCollect(self): \"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表: 无 返回值: 成功解析的会话日志字典和无法解析的会话日志字典 dict,dict 异常: 无", "= self.MATCH_MOD_DETAILS) reason = res[1] if res else \"\" if reason: res =", "异常: 无 \"\"\" self.__sessLogInfoDict = {} self.__ignoreLinesDict = {} return super(FsLogAnalyzer, self).clear() def", "case_hangup_183 = {\"EARLY__HANGUP\":True,} case_hangup_acitve = {\"ACTIVE__HANGUP\":True,} case_r_183 = {\"proceeding_183\":True,} # invite-> if self.caseMatch(detailsDict,", "\"CS_ROUTING\", \"CS_CONSUME_MEDIA\"), \"CS_CONSUME_MEDIA__CS_EXECUTE\": self.__matchCsStateChange(keyInfoList, \"CS_CONSUME_MEDIA\", \"CS_EXECUTE\"), \"DOWN__RINGING\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"RINGING\"), \"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList, \"DOWN\",", "res[1] flag = True break if flag: break # 没有找到号码,可能是日志文件的格式发生了变化 else: #print \"Not", "targConclusion + \"_tmp\" newPath = os.path.join(outputPath, orgLogFileNames) else: newPath = os.path.join(outputPath, fileName) #", "\\[(.*)\\]\\[(.*)\\]\", 2, [], self.SIGN_FLAG_CHAN), # 收到消息类的日志 (\"Callstate Change (.*) -> (.*)\", 2, [],", "\"\"\"清理FS的日志 参数列表: 无 返回值: 成功标志和错误信息 元组(bool, str) 异常: 无 \"\"\" self.__sessLogInfoDict = {}", "color='green') s = \"正在分析会话过程...\" PRINT(s, end='') self.__analysis() time4 = time.clock() s = \"OK", "self.__showResult(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion) # ----------------------------------------------输出简单分析结果到文件---------------------------------------------- def __getOutputResultHeader(self):", "k[0] not in l: s += self.getPathEx(k[0]) + \"\\n\" l.append(k[0]) s += \"%02d.", "# 若输入了callNumber if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \\ self.__showDetailsBody(sessUUID,", "or not keyInfoList: return \"\" res = self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod = self.MATCH_MOD_DETAILS) disFrom,", "+= 1 elif s and c.upper() in ['OK']: okCount += 1 if context:", "invite from (.*) version\", 1, [], self.SIGN_FLAG_R_INVITE), (\"AUDIO RTP \\[(.*)\\] (.*) port (\\d+)", "sm\" SIGN_FLAG_RTP = \"rtp\" SIGN_FLAG_CALLNUMBER = \"callnumber\" SIGN_FLAG_HANGUP = \"hangup_reason\" SIGN_FLAG_R_BYE = 'recv_bye'", "SIGN_FLAG_R_INVITE = \"recv_invite\" # SIP信令 SIP_INVITE = 'INVITE' SIP_CANCEL = 'CANCEL' SIP_BYE =", "+= \"{0:^40}\".format(\" ↓ \") + \"\\n\" if k[0] not in l: s +=", "p in self.getPath()]) orgLogFileNames = callNumber + sessUUID + targConclusion + \"_tmp\" newPath", "sessDict[UUID].get(key, False)) for UUID in sessDict.keys()] def getLogDict(self, UUID = \"\"): \"\"\"获取日志字典 参数列表:", "self.MOD_OUTSIDE, self.SIP_INVITE elif context[0] in [self.SIGN_CHAN_PROCEDDING]: return self.MOD_OUTSIDE, self.MOD_FS, context[1] elif context[0] in", "(\"总计\", errorCount + okCount + warningCount, \"告警\", warningCount, \"失败\", errorCount, \"成功\", okCount) return", "False) if not sessDict: return len(fileNameList), newPath, fileNameList if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if", "self.SESS_KEYINFO_DK) def getSignInfo(self, flag, context): \"\"\"信令的收发方向(用于上层显示输出) 参数列表: flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段 返回值: 元组(FromModule, ToModule, Sign)", "\"MANAGER_REQUEST\"]: conclusion = \"ERROR\" if detailsDict[\"terminated_list\"]: conclusion = \"ERROR\" note += \"(recv %s)\"", "else: pass # 会话关键信息收集 def __sessKeyInfoCollect(self): \"\"\"会话关键信息收集 在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。 例如: State Change CS_CONSUME_MEDIA ->", "transfer\\((\\d*) XML default\\)\", sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2] flag = True", "fileName = \"\"): sessLogInfoDict = self.getSessLogInfoDict() # 确定新的目录,以源日志文件名作为目录名 if not fileName: #fileNames =", "\"\") if reason: note += \"{[\" + st + \"]\" + reason +", "def __showDetailsBody(self, sessUUID = \"\", targConclusion = \"\"): s = self.getDetails(sessUUID, targConclusion) if", "+= \"%s \\n\" % getColor(\"{0:^40}\".format(\"时差:\" + str((signTimeThis - signTimePrev).seconds) + \"s\", color=\"red\", need=True))", "\"EARLY__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"ACTIVE\"), \"RINGING__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"RINGING\", \"ACTIVE\"), \"DOWN__HANGUP\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"HANGUP\"), \"EARLY__HANGUP\":", "# ----------------------------------------------显示详细分析结果---------------------------------------------- def __showDetailsHeader(self, sessUUID = \"\", callNumber = \"\", conclusion = \"\"):", "context += self.__getOutputResultTail(warningCount, errorCount, okCount) if self.outputEx(outputPath, fileName, context): return 1, outputPath, [fileName]", "__ignoreLinesDict = {}# 忽略的行{文件索引:{行数:日志}} ANALYZER_TYPE_FS = 'fs' # 会话类字典key SESS_FS_CALLNUMBER_DK = \"callNumber\" #", "return self.getSessInfo(UUID, self.SESS_KEYINFO_DK) def getSignInfo(self, flag, context): \"\"\"信令的收发方向(用于上层显示输出) 参数列表: flag:keyInfoList中元组的‘状态类型’字段 context:keyInfoList中元组的‘信息’字段 返回值: 元组(FromModule,", "{\"DOWN__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,} case_answerd_invite = {\"DOWN__ACTIVE\":True, \"completed_200\":True, \"ready_200\":True,} case_answer_180 = {\"RINGING__ACTIVE\":True, \"completing_200\":True, \"ready_200\":True,}", "\"\" logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not logDict or not keyInfoList:", "bool 异常: 无 \"\"\" return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2 = code) def __matchChannelStateDesc(self, keyInfoList,", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK] keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] if not keyInfoList or not logDict: return s, conclusion", "\\ self.__showDetailsBody(sessUUID, conclusion): count += 1 continueRet, flag = self.inputContinue(i, count, total, flag,", "[proceeding][180] 为收响应消息的处理 -- channel类 AUDIO RTP [sofia/external/6797@10.0.7.152:5080] 10.0.7.176 port 24776 -> 192.168.0.178 port", "def __match(self, keyInfoList, flag, param1 = \"\", param2 = \"\", f = -1,", "消息交互详情 \") + \"\\n\\n\" s += \"%-4s %-35s %-16s %-16s %s\\n\\n\" % (\"序号\",\"信令时间\",", "callNumber, name = name) # ----------------------------------------------输出详细分析结果到文件---------------------------------------------- def __outputDetails(self, outputPath, fileName = \"\", callNumber", "self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState, toState) def __matchCallStateChange(self, keyInfoList, fromState, toState): \"\"\"call状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态", "for i, x in enumerate(keyInfoList) if x[2] == flag and \\ ((len(x[3]) >=", "17:41:14.701532 [DEBUG] switch_core_state_machine.c:473 (sofia/external/6010@10.0.7.152:5080) Running State Change CS_INIT sessLen = len(sessLogInfoDict) process =", "len(tmp)) # 重复的呼叫号码 dupl = self.findDupl(callNumberList) len(dupl) and self.printList(dupl, 8, \"重复的号码:\", \"总数:%d\" %", "----------------------------------------------显示分析结果---------------------------------------------- def __showAnalysisResultHeader(self, targConclusion=\"\"): s = \"%-30s %-36s %-30s %-7s %-s\\n\" % (\"呼叫开始时间\",", "= 0 for f, lines in enumerate(self.getLines()): process = self.printProc(process, fileLen) for i,", "reason: note += \"{[\" + st + \"]\" + reason + \"}\" if", "+= \"%-30s %-36s %-30s %-7s %-s\\n\" % (callTime, sessUUID, callNumber or getColor(\"{0:<20}\".format(\"null\"), color='gray',", "codeList.append(x[3]) return codeList def __matchChannelStateCode(self, keyInfoList, code): \"\"\"通道状态码匹配 精确匹配状态码 参数列表: keyInfoList:关键信息列表 code:状态码 返回值:", "= \"\"): \"\"\"获取关键信息列表 参数列表: UUID:会话的UUID 返回值: 关键信息 [(文件索引,行数,状态类型,(信息)),] 异常: 无 \"\"\" return self.getSessInfo(UUID,", "int(res[4]), int(res[5])) if signTimePrev and (signTimeThis - signTimePrev).seconds > 4: s += \"{0:^40}\".format(\"", "def __outputReslut(self, outputPath, sessUUID = \"\", callNumber = \"\", conclusion = \"\", fileName", "\"%-16s: %-s\\n\" % (\"呼叫号码\", numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]) if locIp and RmtIp: s +=", "= 0, 0, 0 # 输出到文件 if sessUUID: if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if", "= self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog)", "outputPath, fileName = \"\", callNumber = \"\", sessUUID = \"\", targConclusion=\"\"): fileNameList =", "def getkeyInfoList(self, UUID = \"\"): \"\"\"获取关键信息列表 参数列表: UUID:会话的UUID 返回值: 关键信息 [(文件索引,行数,状态类型,(信息)),] 异常: 无", "'completing' SIGN_CHAN_TERMINATED = 'terminated' SIGN_FLAG_CALL = \"channel sm\" SIGN_CALL_HANGUP = 'HANGUP' SIGN_FLAG_CS =", "conclusion def __showAnalysisResultBody(self, sessUUID, targConclusion = \"\"): s, c = self.__getAnalysisResultBody(sessUUID, targConclusion) if", "else True): fileName = (callNumber or c) + \"__\" + sessUUID + self.OUTPUT_POSTFIX_LOG", "def outputOriginLog(self, outputPath, sessUUID = \"\", callNumber = \"\", name = \"\"): return", "self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = desc) # 分析会话过程 def __sessAnalysis(self): \"\"\"会话分析 分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程 参数列表: 无", "sessLog, 3) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2] flag = True break res =", "= {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\", \\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"}, \\ self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)} else: self.__sessLogInfoDict =", "\\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"), } # 标志性处理类的状态 case_calling_invite = {\"CS_INIT__CS_ROUTING\":True, \"CS_ROUTING__CS_CONSUME_MEDIA\":True, \"calling_0\":True,} case_ringing_180 =", "\"%-16s: %s\\n\" % (\"挂断原因\", res[1] if res else reason) signTimePrev = None signTimeThis", "\"_tmp\" fileName = \"Result\" + fileNames + self.OUTPUT_POSTFIX_RESULT context = \"\" warningCount, errorCount,", "any(l) else False else: return False def __matchCsStateChange(self, keyInfoList, fromState, toState): \"\"\"CS状态变迁匹配 参数列表:", "\"RINGING\"), \"DOWN__EARLY\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"EARLY\"), \"DOWN__ACTIVE\": self.__matchCallStateChange(keyInfoList, \"DOWN\", \"ACTIVE\"), \"EARLY__RINGING\": self.__matchCallStateChange(keyInfoList, \"EARLY\", \"RINGING\"),", "HANGUP\" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") # 判断挂断原因 res = self.__match(keyInfoList,", "sessLogInfoDict[sessUUID][self.SESS_LOG_DK]: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog} if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i]", "return '', '', '' # 按照会话,收集日志信息 def __sessCollect(self): \"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表:", "f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys(): flag = False for l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys(): # 取一行日志 sessLog", "time.clock() s = \"OK (耗时:%.2f秒)\" % (time4 - time3) PRINT(s, color='green') return True,", "in ['WARNING'] and 'yellow' or \\ conclusion.upper() in ['OK'] and 'green' conclusion =", "-- channel类 AUDIO RTP [sofia/external/6797@10.0.7.152:5080] 10.0.7.176 port 24776 -> 192.168.0.178 port 7076 codec:", "self.SIGN_FLAG_S_BYE) and \"(S)\" or \"(R)\") else: # invite-> (183<- or 180<-) if self.caseMatch(detailsDict,", "= sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] s = \"\" if mode in ['normal']: s += \"-\" *", "= sessUUID + callNumber + conclusion + \"_tmp\" fileName = \"Result\" + fileNames", "note) return s, conclusion def __showAnalysisResultBody(self, sessUUID, targConclusion = \"\"): s, c =", "\"normal\" MATCH_MOD_EXTEND = \"extend\" MATCH_MOD_DETAILS = \"details\" # 输出文件 OUTPUT_POSTFIX_LOG = \".log\" OUTPUT_POSTFIX_RESULT", "okCount + warningCount, \"告警\", warningCount, \"失败\", errorCount, \"成功\", okCount) return s def __outputReslut(self,", "res = self.reMatch(\"<(\\d*)>->(\\d*) in context\", sessLog, 2) if res: sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1] flag", "res else (\"\", \"\") if reason: note += \"{[\" + st + \"]\"", "\"\\n\",detailsDict, \"\\n\" # 分析会话日志 def __analysis(self): self.__sessKeyInfoCollect() self.__sessAnalysis() # 运行 def run(self, mode", "f if f != -1 else True) and \\ (x[1] >= l if", "= { \"CS_NEW__CS_INIT\": self.__matchCsStateChange(keyInfoList, \"CS_NEW\", \"CS_INIT\"), \"CS_INIT__CS_ROUTING\": self.__matchCsStateChange(keyInfoList, \"CS_INIT\", \"CS_ROUTING\"), \"CS_ROUTING__CS_CONSUME_MEDIA\": self.__matchCsStateChange(keyInfoList, \"CS_ROUTING\",", "\"\"\"CS状态变迁匹配 参数列表: keyInfoList:关键信息列表 fromState:迁移前的状态 toState:迁移到的状态 返回值: 成功或失败 bool 异常: 无 \"\"\" return self.__match(keyInfoList,", "__matchChannelStateCode(self, keyInfoList, code): \"\"\"通道状态码匹配 精确匹配状态码 参数列表: keyInfoList:关键信息列表 code:状态码 返回值: 成功或失败 bool 异常: 无", "% self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], \"\")) res = self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod = self.MATCH_MOD_DETAILS) locIp, locPort,", "\"%-16s: %-s\\n\" % (\"结果\", conclusion) s += \"%-16s: %-s\\n\" % (\"消息流\", self.showNote(note)) s", "None: sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog) else: sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:\"\", \\ self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:\"\", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:\"\"},", "for dPos in [x for x in sorted(dropPos, reverse=True) if dropPos and x", "k in keyInfoList: # print(k) conclusion = \"\" note = \"\" detailsDict =", "\"\"): sessLogInfoDict = self.getSessLogInfoDict() # 确定新的目录,以源日志文件名作为目录名 if not fileName: #fileNames = \"_\".join([os.path.split(p)[-1] for", "无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() # 需要匹配的正则表达式 reExpInfo = [ (\"State Change (.*)", "+ sessUUID + \"__\" + targConclusion + self.OUTPUT_POSTFIX_DETAILS) if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)):", "\"completed_200\": self.__matchChannelStateDesc(keyInfoList, \"completed\"), \"ready_200\": self.__matchChannelStateDesc(keyInfoList, \"ready\"), \"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\")", "\"\"\"获取日志字典 参数列表: UUID:会话的UUID 返回值: 日志字典 参照__sessLogInfoDict定义 异常: 无 \"\"\" return self.getSessInfo(UUID, self.SESS_LOG_DK) def", "case_r_183): note += \" -> RINGING(183)\" note += \" -> HANGUP\" + (self.__match(keyInfoList,", "import sys from datetime import datetime PY2 = sys.version_info[0] == 2 PY3 =", "= res if res else (\"\",\"\",\"\",\"\") callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], \"\"))", "self.__matchChannelStateCode(keyInfoList, \"180\"), \"proceeding_183\": self.__matchChannelStateCode(keyInfoList, \"183\"), \"completing_200\": self.__matchChannelStateDesc(keyInfoList, \"completing\"), \"completed_200\": self.__matchChannelStateDesc(keyInfoList, \"completed\"), \"ready_200\": self.__matchChannelStateDesc(keyInfoList,", "\"\"): return self.__outputReslut(outputPath, sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion, fileName", "= \"\", conclusion = \"\"): return self.__outputDetails(outputPath, fileName = fileName, sessUUID = sessUUID,", "挂断原因类 提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果) 参数列表: 无 返回值: 无 异常: 无 \"\"\" sessLogInfoDict = self.getSessLogInfoDict() #", "base.base import PRINT, INPUT, getColor if PY2: from analyzer import LogAnalyzer else: from", "\"源日志行号\", \"消息类型\", \"详情\") l = [] for i, k in enumerate(keyInfoList): signTime =", "-*- coding: utf-8 -*- import os import time import sys from datetime import", "self.getSessInfo(UUID, self.SESS_LOG_DK) def getCallNumber(self, UUID = \"\"): \"\"\"获取呼叫号码 参数列表: UUID:会话的UUID 返回值: 呼叫号码 str", "\"terminated_list\": self.__fuzzyMatchChannelStateCode(keyInfoList, \"4xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"5xx\") + \\ self.__fuzzyMatchChannelStateCode(keyInfoList, \"6xx\"), } #", "version\", 1, [], self.SIGN_FLAG_R_INVITE), (\"AUDIO RTP \\[(.*)\\] (.*) port (\\d+) -> (.*) port", "% self.getLogTime(logDict.get(k[0], {}).get(k[1], \"\")) res = self.reMatch(\"(\\\\d{4})-(\\\\d{1,2})-(\\\\d{1,2}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2}).(\\\\d{6})\", signTime, 7) if res: signTimePrev", "self.__showAnalysisResultHeader, conclusion) if not continueRet: break # 显示尾 self.__showAnalysisResultTail(count, conclusion) return count def", "True # 若有号码变换,需要取变换的号码 res = self.reMatch(\"Dialplan: sofia\\/(.*)\\/(.*) Action transfer\\((\\d*) XML default\\)\", sessLog, 3)", "\"}\" if reason not in [\"NORMAL_CLEARING\", \"MANAGER_REQUEST\"]: conclusion = \"ERROR\" if detailsDict[\"terminated_list\"]: conclusion", "mode in ['normal']: s += \"-\" * 160 + \"\\n\" s += \"\\n\"", "sessUUID, callNumber = callNumber, name = name) # ----------------------------------------------输出详细分析结果到文件---------------------------------------------- def __outputDetails(self, outputPath, fileName", "显示UUID列表 def showSessUUIDList(self): sessUUIDList = self.getSessUUIDList() self.printList(sessUUIDList, 4, \"UUID列表:\", \"总数:%d\" % len(sessUUIDList)) #", "or self.caseMatch(detailsDict, case_ringing_183_180) or \\ self.caseMatch(detailsDict, case_ringinged_180) or self.caseMatch(detailsDict, case_ringinged_183): note += \"", "self.getSessLogInfoDict() # 显示头 self.__showDetailsHeader() # 显示Body count = 0 if sessUUID: # 若输入了callNumber", "return s def __showAnalysisResultTail(self, count, targConclusion=\"\"): s = \"\\n总数:%d\" % count PRINT(s) def", "= \"\", sessUUID = \"\", targConclusion=\"\"): fileNameList = [] sessLogInfoDict = self.getSessLogInfoDict() newPath", "return self.__outputReslut(outputPath, sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion, fileName =", "-1, mod=\"normal\"): l = [(i, x) for i, x in enumerate(keyInfoList) if x[2]", "# 按照会话,收集日志信息 def __sessCollect(self): \"\"\"按照UUID收集会话日志 FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f) 函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。 最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用 参数列表: 无 返回值: 成功解析的会话日志字典和无法解析的会话日志字典 dict,dict", "disTo, numberTo = res if res else (\"\",\"\",\"\",\"\") callTime = \"%s\" % self.getLogTime(logDict.get(keyInfoList[0][0],", "def __showAnalysisResultTail(self, count, targConclusion=\"\"): s = \"\\n总数:%d\" % count PRINT(s) def __showResult(self, sessUUID", "if not fileName: #fileNames = \"_\".join([os.path.split(p)[-1] for p in self.getPath()]) fileNames = sessUUID", "[0], self.SIGN_FLAG_RTP), # RTP通道信息 (\"Flipping CID from \\\"(.*)\\\" \\<(.*)\\> to \\\"(.*)\\\" \\<(.*)\\>\", 4,", "= self.getSessLogInfoDict() if not sessLogInfoDict.get(sessUUID, False): return \"\" conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] if targConclusion.upper()", "\\ conclusion.upper() in ['WARNING'] and 'yellow' or \\ conclusion.upper() in ['OK'] and 'green'" ]
[ "import demo3_module a = np.array([[1, 2], [3, 4]], dtype=float) print \"Trace of a:\",", "numpy as np import demo3_module a = np.array([[1, 2], [3, 4]], dtype=float) print", "demo3_module a = np.array([[1, 2], [3, 4]], dtype=float) print \"Trace of a:\", demo3_module.trace(a)", "import numpy as np import demo3_module a = np.array([[1, 2], [3, 4]], dtype=float)", "np import demo3_module a = np.array([[1, 2], [3, 4]], dtype=float) print \"Trace of", "<reponame>davidcortesortuno/finmag<filename>examples/boost_python/demo3_numpy/demo3.py import numpy as np import demo3_module a = np.array([[1, 2], [3, 4]],", "as np import demo3_module a = np.array([[1, 2], [3, 4]], dtype=float) print \"Trace" ]
[ "import add_default_exception_handling from functools import partial class NotificationWindowController(ControllerBase): \"\"\"Controller of the notification window.\"\"\"", "import NotificationWindowView from lib.exception_feedback import add_default_exception_handling from functools import partial class NotificationWindowController(ControllerBase): \"\"\"Controller", "notification window.\"\"\" from lib.mvc.bases import ControllerBase from lib.mvc.notification_window.model import NotificationWindowModel from lib.mvc.notification_window.view import", "functools import partial class NotificationWindowController(ControllerBase): \"\"\"Controller of the notification window.\"\"\" def __init__(self, app):", "<reponame>realmar/gnome-quota-indicator<gh_stars>0 \"\"\"Controller of the notification window.\"\"\" from lib.mvc.bases import ControllerBase from lib.mvc.notification_window.model import", "def __init__(self, app): \"\"\"Ctor of NotificationWindowController.\"\"\" super().__init__(app, NotificationWindowModel, NotificationWindowView) self.view.register_open_usage_event( partial(self.view.app.quota_window.view.cb_show, 0, 0))", "add_default_exception_handling from functools import partial class NotificationWindowController(ControllerBase): \"\"\"Controller of the notification window.\"\"\" def", "import NotificationWindowModel from lib.mvc.notification_window.view import NotificationWindowView from lib.exception_feedback import add_default_exception_handling from functools import", "NotificationWindowView from lib.exception_feedback import add_default_exception_handling from functools import partial class NotificationWindowController(ControllerBase): \"\"\"Controller of", "\"\"\"Controller of the notification window.\"\"\" def __init__(self, app): \"\"\"Ctor of NotificationWindowController.\"\"\" super().__init__(app, NotificationWindowModel,", "lib.mvc.notification_window.model import NotificationWindowModel from lib.mvc.notification_window.view import NotificationWindowView from lib.exception_feedback import add_default_exception_handling from functools", "from lib.mvc.notification_window.model import NotificationWindowModel from lib.mvc.notification_window.view import NotificationWindowView from lib.exception_feedback import add_default_exception_handling from", "lib.mvc.bases import ControllerBase from lib.mvc.notification_window.model import NotificationWindowModel from lib.mvc.notification_window.view import NotificationWindowView from lib.exception_feedback", "of the notification window.\"\"\" def __init__(self, app): \"\"\"Ctor of NotificationWindowController.\"\"\" super().__init__(app, NotificationWindowModel, NotificationWindowView)", "window.\"\"\" def __init__(self, app): \"\"\"Ctor of NotificationWindowController.\"\"\" super().__init__(app, NotificationWindowModel, NotificationWindowView) self.view.register_open_usage_event( partial(self.view.app.quota_window.view.cb_show, 0,", "class NotificationWindowController(ControllerBase): \"\"\"Controller of the notification window.\"\"\" def __init__(self, app): \"\"\"Ctor of NotificationWindowController.\"\"\"", "lib.mvc.notification_window.view import NotificationWindowView from lib.exception_feedback import add_default_exception_handling from functools import partial class NotificationWindowController(ControllerBase):", "partial class NotificationWindowController(ControllerBase): \"\"\"Controller of the notification window.\"\"\" def __init__(self, app): \"\"\"Ctor of", "NotificationWindowModel from lib.mvc.notification_window.view import NotificationWindowView from lib.exception_feedback import add_default_exception_handling from functools import partial", "lib.exception_feedback import add_default_exception_handling from functools import partial class NotificationWindowController(ControllerBase): \"\"\"Controller of the notification", "from functools import partial class NotificationWindowController(ControllerBase): \"\"\"Controller of the notification window.\"\"\" def __init__(self,", "the notification window.\"\"\" from lib.mvc.bases import ControllerBase from lib.mvc.notification_window.model import NotificationWindowModel from lib.mvc.notification_window.view", "import ControllerBase from lib.mvc.notification_window.model import NotificationWindowModel from lib.mvc.notification_window.view import NotificationWindowView from lib.exception_feedback import", "ControllerBase from lib.mvc.notification_window.model import NotificationWindowModel from lib.mvc.notification_window.view import NotificationWindowView from lib.exception_feedback import add_default_exception_handling", "from lib.mvc.bases import ControllerBase from lib.mvc.notification_window.model import NotificationWindowModel from lib.mvc.notification_window.view import NotificationWindowView from", "window.\"\"\" from lib.mvc.bases import ControllerBase from lib.mvc.notification_window.model import NotificationWindowModel from lib.mvc.notification_window.view import NotificationWindowView", "the notification window.\"\"\" def __init__(self, app): \"\"\"Ctor of NotificationWindowController.\"\"\" super().__init__(app, NotificationWindowModel, NotificationWindowView) self.view.register_open_usage_event(", "import partial class NotificationWindowController(ControllerBase): \"\"\"Controller of the notification window.\"\"\" def __init__(self, app): \"\"\"Ctor", "of the notification window.\"\"\" from lib.mvc.bases import ControllerBase from lib.mvc.notification_window.model import NotificationWindowModel from", "NotificationWindowController(ControllerBase): \"\"\"Controller of the notification window.\"\"\" def __init__(self, app): \"\"\"Ctor of NotificationWindowController.\"\"\" super().__init__(app,", "\"\"\"Controller of the notification window.\"\"\" from lib.mvc.bases import ControllerBase from lib.mvc.notification_window.model import NotificationWindowModel", "from lib.mvc.notification_window.view import NotificationWindowView from lib.exception_feedback import add_default_exception_handling from functools import partial class", "notification window.\"\"\" def __init__(self, app): \"\"\"Ctor of NotificationWindowController.\"\"\" super().__init__(app, NotificationWindowModel, NotificationWindowView) self.view.register_open_usage_event( partial(self.view.app.quota_window.view.cb_show,", "__init__(self, app): \"\"\"Ctor of NotificationWindowController.\"\"\" super().__init__(app, NotificationWindowModel, NotificationWindowView) self.view.register_open_usage_event( partial(self.view.app.quota_window.view.cb_show, 0, 0)) self.view.initialize()", "from lib.exception_feedback import add_default_exception_handling from functools import partial class NotificationWindowController(ControllerBase): \"\"\"Controller of the" ]
[ "'https://github.com/nuxy/bittrex_autotrader', download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version), keywords = ['trading-bot', 'api-client', 'cryptocurrency', 'bittrex'], classifiers = [],", "nutshell.', author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/nuxy/bittrex_autotrader', download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version),", "from distutils.core import setup version = '0.0.6' setup( name = 'bittrex_autotrader', packages =", "'<NAME>', author_email = '<EMAIL>', url = 'https://github.com/nuxy/bittrex_autotrader', download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version), keywords = ['trading-bot',", "exchange autotrading script in a nutshell.', author = '<NAME>', author_email = '<EMAIL>', url", "= version, description = 'Bittrex currency exchange autotrading script in a nutshell.', author", "'0.0.6' setup( name = 'bittrex_autotrader', packages = ['bittrex_autotrader'], version = version, description =", "setup( name = 'bittrex_autotrader', packages = ['bittrex_autotrader'], version = version, description = 'Bittrex", "<filename>setup.py from distutils.core import setup version = '0.0.6' setup( name = 'bittrex_autotrader', packages", "a nutshell.', author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/nuxy/bittrex_autotrader', download_url =", "script in a nutshell.', author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/nuxy/bittrex_autotrader',", "version = '0.0.6' setup( name = 'bittrex_autotrader', packages = ['bittrex_autotrader'], version = version,", "distutils.core import setup version = '0.0.6' setup( name = 'bittrex_autotrader', packages = ['bittrex_autotrader'],", "= 'bittrex_autotrader', packages = ['bittrex_autotrader'], version = version, description = 'Bittrex currency exchange", "name = 'bittrex_autotrader', packages = ['bittrex_autotrader'], version = version, description = 'Bittrex currency", "'bittrex_autotrader', packages = ['bittrex_autotrader'], version = version, description = 'Bittrex currency exchange autotrading", "= 'Bittrex currency exchange autotrading script in a nutshell.', author = '<NAME>', author_email", "currency exchange autotrading script in a nutshell.', author = '<NAME>', author_email = '<EMAIL>',", "version = version, description = 'Bittrex currency exchange autotrading script in a nutshell.',", "'<EMAIL>', url = 'https://github.com/nuxy/bittrex_autotrader', download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version), keywords = ['trading-bot', 'api-client', 'cryptocurrency', 'bittrex'],", "description = 'Bittrex currency exchange autotrading script in a nutshell.', author = '<NAME>',", "= '<EMAIL>', url = 'https://github.com/nuxy/bittrex_autotrader', download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version), keywords = ['trading-bot', 'api-client', 'cryptocurrency',", "download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version), keywords = ['trading-bot', 'api-client', 'cryptocurrency', 'bittrex'], classifiers = [], )", "version, description = 'Bittrex currency exchange autotrading script in a nutshell.', author =", "['bittrex_autotrader'], version = version, description = 'Bittrex currency exchange autotrading script in a", "in a nutshell.', author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/nuxy/bittrex_autotrader', download_url", "'Bittrex currency exchange autotrading script in a nutshell.', author = '<NAME>', author_email =", "setup version = '0.0.6' setup( name = 'bittrex_autotrader', packages = ['bittrex_autotrader'], version =", "= '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/nuxy/bittrex_autotrader', download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version), keywords =", "= 'https://github.com/nuxy/bittrex_autotrader', download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version), keywords = ['trading-bot', 'api-client', 'cryptocurrency', 'bittrex'], classifiers =", "author_email = '<EMAIL>', url = 'https://github.com/nuxy/bittrex_autotrader', download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version), keywords = ['trading-bot', 'api-client',", "import setup version = '0.0.6' setup( name = 'bittrex_autotrader', packages = ['bittrex_autotrader'], version", "author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/nuxy/bittrex_autotrader', download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version), keywords", "url = 'https://github.com/nuxy/bittrex_autotrader', download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version), keywords = ['trading-bot', 'api-client', 'cryptocurrency', 'bittrex'], classifiers", "packages = ['bittrex_autotrader'], version = version, description = 'Bittrex currency exchange autotrading script", "autotrading script in a nutshell.', author = '<NAME>', author_email = '<EMAIL>', url =", "= ['bittrex_autotrader'], version = version, description = 'Bittrex currency exchange autotrading script in", "= '0.0.6' setup( name = 'bittrex_autotrader', packages = ['bittrex_autotrader'], version = version, description" ]
[ "\"\"\" from typing import Optional, Any from smlb import ( params, Random, RandomVectorSampler,", "_minimize(self, data: VectorSpaceData, function_tracker: TrackedTransformation): \"\"\"Generate num_samples random samples and evaluate them.\"\"\" samples", "of `data` parameter passed to `optimize()`. rng: pseudo-random number generator \"\"\" def __init__(self,", "random samples. Scientific Machine Learning Benchmark A benchmark of regression models in chem-", "VectorSpaceData, function_tracker: TrackedTransformation): \"\"\"Generate num_samples random samples and evaluate them.\"\"\" samples = self._sampler.apply(data)", "draw values. If not provided, then the optimization domain is taken to be", "chem- and materials informatics. 2019-2020, Citrine Informatics. \"\"\" from typing import Optional, Any", "draws random samples. Scientific Machine Learning Benchmark A benchmark of regression models in", "not provided, then the optimization domain is taken to be that of `data`", "Parameters: num_samples: the number of random samples to draw domain: optional domain from", "from which to draw values. If not provided, then the optimization domain is", "then the optimization domain is taken to be that of `data` parameter passed", "\"optimizer\" that draws random samples. Scientific Machine Learning Benchmark A benchmark of regression", "Random): \"\"\"Draws random samples. Parameters: num_samples: the number of random samples to draw", "`data` parameter passed to `optimize()`. rng: pseudo-random number generator \"\"\" def __init__(self, num_samples:", "random samples. Parameters: num_samples: the number of random samples to draw domain: optional", "samples to draw domain: optional domain from which to draw values. If not", "class RandomOptimizer(Optimizer, Random): \"\"\"Draws random samples. Parameters: num_samples: the number of random samples", "Citrine Informatics. \"\"\" from typing import Optional, Any from smlb import ( params,", "materials informatics. 2019-2020, Citrine Informatics. \"\"\" from typing import Optional, Any from smlb", "Any from smlb import ( params, Random, RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation, ) class", "**kwargs): super().__init__(rng=rng, **kwargs) self._num_samples = params.integer(num_samples, above=0) self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng) def", "the optimization domain is taken to be that of `data` parameter passed to", "number generator \"\"\" def __init__(self, num_samples: int, domain: Optional[Any] = None, rng=None, **kwargs):", "generator \"\"\" def __init__(self, num_samples: int, domain: Optional[Any] = None, rng=None, **kwargs): super().__init__(rng=rng,", "to `optimize()`. rng: pseudo-random number generator \"\"\" def __init__(self, num_samples: int, domain: Optional[Any]", "super().__init__(rng=rng, **kwargs) self._num_samples = params.integer(num_samples, above=0) self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng) def _minimize(self,", "params.integer(num_samples, above=0) self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng) def _minimize(self, data: VectorSpaceData, function_tracker: TrackedTransformation):", "self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng) def _minimize(self, data: VectorSpaceData, function_tracker: TrackedTransformation): \"\"\"Generate num_samples", "Scientific Machine Learning Benchmark A benchmark of regression models in chem- and materials", "= None, rng=None, **kwargs): super().__init__(rng=rng, **kwargs) self._num_samples = params.integer(num_samples, above=0) self._sampler = RandomVectorSampler(size=self._num_samples,", "import Optional, Any from smlb import ( params, Random, RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation,", "function_tracker: TrackedTransformation): \"\"\"Generate num_samples random samples and evaluate them.\"\"\" samples = self._sampler.apply(data) function_tracker.apply(samples)", "that of `data` parameter passed to `optimize()`. rng: pseudo-random number generator \"\"\" def", "typing import Optional, Any from smlb import ( params, Random, RandomVectorSampler, VectorSpaceData, Optimizer,", "def __init__(self, num_samples: int, domain: Optional[Any] = None, rng=None, **kwargs): super().__init__(rng=rng, **kwargs) self._num_samples", "regression models in chem- and materials informatics. 2019-2020, Citrine Informatics. \"\"\" from typing", "A benchmark of regression models in chem- and materials informatics. 2019-2020, Citrine Informatics.", "RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng) def _minimize(self, data: VectorSpaceData, function_tracker: TrackedTransformation): \"\"\"Generate num_samples random samples", "\"\"\"An \"optimizer\" that draws random samples. Scientific Machine Learning Benchmark A benchmark of", "random samples to draw domain: optional domain from which to draw values. If", "num_samples: int, domain: Optional[Any] = None, rng=None, **kwargs): super().__init__(rng=rng, **kwargs) self._num_samples = params.integer(num_samples,", "taken to be that of `data` parameter passed to `optimize()`. rng: pseudo-random number", "draw domain: optional domain from which to draw values. If not provided, then", "number of random samples to draw domain: optional domain from which to draw", "pseudo-random number generator \"\"\" def __init__(self, num_samples: int, domain: Optional[Any] = None, rng=None,", "VectorSpaceData, Optimizer, TrackedTransformation, ) class RandomOptimizer(Optimizer, Random): \"\"\"Draws random samples. Parameters: num_samples: the", "`optimize()`. rng: pseudo-random number generator \"\"\" def __init__(self, num_samples: int, domain: Optional[Any] =", "\"\"\" def __init__(self, num_samples: int, domain: Optional[Any] = None, rng=None, **kwargs): super().__init__(rng=rng, **kwargs)", "Machine Learning Benchmark A benchmark of regression models in chem- and materials informatics.", "rng=rng) def _minimize(self, data: VectorSpaceData, function_tracker: TrackedTransformation): \"\"\"Generate num_samples random samples and evaluate", "TrackedTransformation, ) class RandomOptimizer(Optimizer, Random): \"\"\"Draws random samples. Parameters: num_samples: the number of", "of random samples to draw domain: optional domain from which to draw values.", "2019-2020, Citrine Informatics. \"\"\" from typing import Optional, Any from smlb import (", "data: VectorSpaceData, function_tracker: TrackedTransformation): \"\"\"Generate num_samples random samples and evaluate them.\"\"\" samples =", "domain from which to draw values. If not provided, then the optimization domain", "__init__(self, num_samples: int, domain: Optional[Any] = None, rng=None, **kwargs): super().__init__(rng=rng, **kwargs) self._num_samples =", "samples. Scientific Machine Learning Benchmark A benchmark of regression models in chem- and", "of regression models in chem- and materials informatics. 2019-2020, Citrine Informatics. \"\"\" from", "**kwargs) self._num_samples = params.integer(num_samples, above=0) self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng) def _minimize(self, data:", "parameter passed to `optimize()`. rng: pseudo-random number generator \"\"\" def __init__(self, num_samples: int,", "benchmark of regression models in chem- and materials informatics. 2019-2020, Citrine Informatics. \"\"\"", "which to draw values. If not provided, then the optimization domain is taken", "Random, RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation, ) class RandomOptimizer(Optimizer, Random): \"\"\"Draws random samples. Parameters:", "smlb import ( params, Random, RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation, ) class RandomOptimizer(Optimizer, Random):", "to be that of `data` parameter passed to `optimize()`. rng: pseudo-random number generator", "int, domain: Optional[Any] = None, rng=None, **kwargs): super().__init__(rng=rng, **kwargs) self._num_samples = params.integer(num_samples, above=0)", "rng: pseudo-random number generator \"\"\" def __init__(self, num_samples: int, domain: Optional[Any] = None,", "models in chem- and materials informatics. 2019-2020, Citrine Informatics. \"\"\" from typing import", "optional domain from which to draw values. If not provided, then the optimization", "provided, then the optimization domain is taken to be that of `data` parameter", "Optional, Any from smlb import ( params, Random, RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation, )", "Optional[Any] = None, rng=None, **kwargs): super().__init__(rng=rng, **kwargs) self._num_samples = params.integer(num_samples, above=0) self._sampler =", "is taken to be that of `data` parameter passed to `optimize()`. rng: pseudo-random", "in chem- and materials informatics. 2019-2020, Citrine Informatics. \"\"\" from typing import Optional,", "to draw values. If not provided, then the optimization domain is taken to", "RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation, ) class RandomOptimizer(Optimizer, Random): \"\"\"Draws random samples. Parameters: num_samples:", "import ( params, Random, RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation, ) class RandomOptimizer(Optimizer, Random): \"\"\"Draws", "Learning Benchmark A benchmark of regression models in chem- and materials informatics. 2019-2020,", "= RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng) def _minimize(self, data: VectorSpaceData, function_tracker: TrackedTransformation): \"\"\"Generate num_samples random", "from typing import Optional, Any from smlb import ( params, Random, RandomVectorSampler, VectorSpaceData,", "from smlb import ( params, Random, RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation, ) class RandomOptimizer(Optimizer,", "that draws random samples. Scientific Machine Learning Benchmark A benchmark of regression models", "to draw domain: optional domain from which to draw values. If not provided,", "RandomOptimizer(Optimizer, Random): \"\"\"Draws random samples. Parameters: num_samples: the number of random samples to", "num_samples: the number of random samples to draw domain: optional domain from which", "be that of `data` parameter passed to `optimize()`. rng: pseudo-random number generator \"\"\"", "optimization domain is taken to be that of `data` parameter passed to `optimize()`.", "values. If not provided, then the optimization domain is taken to be that", "passed to `optimize()`. rng: pseudo-random number generator \"\"\" def __init__(self, num_samples: int, domain:", "above=0) self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng) def _minimize(self, data: VectorSpaceData, function_tracker: TrackedTransformation): \"\"\"Generate", "\"\"\"Draws random samples. Parameters: num_samples: the number of random samples to draw domain:", "samples. Parameters: num_samples: the number of random samples to draw domain: optional domain", "informatics. 2019-2020, Citrine Informatics. \"\"\" from typing import Optional, Any from smlb import", "domain: Optional[Any] = None, rng=None, **kwargs): super().__init__(rng=rng, **kwargs) self._num_samples = params.integer(num_samples, above=0) self._sampler", "None, rng=None, **kwargs): super().__init__(rng=rng, **kwargs) self._num_samples = params.integer(num_samples, above=0) self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain,", "If not provided, then the optimization domain is taken to be that of", "( params, Random, RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation, ) class RandomOptimizer(Optimizer, Random): \"\"\"Draws random", "rng=None, **kwargs): super().__init__(rng=rng, **kwargs) self._num_samples = params.integer(num_samples, above=0) self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng)", "Informatics. \"\"\" from typing import Optional, Any from smlb import ( params, Random,", ") class RandomOptimizer(Optimizer, Random): \"\"\"Draws random samples. Parameters: num_samples: the number of random", "params, Random, RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation, ) class RandomOptimizer(Optimizer, Random): \"\"\"Draws random samples.", "= params.integer(num_samples, above=0) self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng) def _minimize(self, data: VectorSpaceData, function_tracker:", "and materials informatics. 2019-2020, Citrine Informatics. \"\"\" from typing import Optional, Any from", "self._num_samples = params.integer(num_samples, above=0) self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng) def _minimize(self, data: VectorSpaceData,", "def _minimize(self, data: VectorSpaceData, function_tracker: TrackedTransformation): \"\"\"Generate num_samples random samples and evaluate them.\"\"\"", "the number of random samples to draw domain: optional domain from which to", "Benchmark A benchmark of regression models in chem- and materials informatics. 2019-2020, Citrine", "domain=domain, rng=rng) def _minimize(self, data: VectorSpaceData, function_tracker: TrackedTransformation): \"\"\"Generate num_samples random samples and", "domain: optional domain from which to draw values. If not provided, then the", "domain is taken to be that of `data` parameter passed to `optimize()`. rng:", "Optimizer, TrackedTransformation, ) class RandomOptimizer(Optimizer, Random): \"\"\"Draws random samples. Parameters: num_samples: the number" ]
[ "val @property def user_account_control_settings(self): \"\"\" Gets and sets the userAccountControlSettings Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The", "sets the browserEnterpriseModeSiteListLocation Returns: str: The browserEnterpriseModeSiteListLocation \"\"\" if \"browserEnterpriseModeSiteListLocation\" in self._prop_dict: return", "the browserInternetSecurityLevel Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The browserInternetSecurityLevel \"\"\" if \"browserInternetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"],", "self._prop_dict: return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else: return None @browser_block_automatic_detection_of_intranet_sites.setter def browser_block_automatic_detection_of_intranet_sites(self, val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] = val", "self._prop_dict[\"browserBlockPlugins\"] else: return None @browser_block_plugins.setter def browser_block_plugins(self, val): self._prop_dict[\"browserBlockPlugins\"] = val @property def", "\"\"\" if \"diagnosticsBlockDataSubmission\" in self._prop_dict: return self._prop_dict[\"diagnosticsBlockDataSubmission\"] else: return None @diagnostics_block_data_submission.setter def diagnostics_block_data_submission(self,", "self._prop_dict[\"diagnosticsBlockDataSubmission\"] = val @property def password_block_picture_password_and_pin(self): \"\"\" Gets and sets the passwordBlockPicturePasswordAndPin Returns:", "root for license information. # # This file was generated and any changes", "else: return None @browser_enterprise_mode_site_list_location.setter def browser_enterprise_mode_site_list_location(self, val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] = val @property def browser_internet_security_level(self):", "return self._prop_dict[\"browserRequireFirewall\"] else: return None @browser_require_firewall.setter def browser_require_firewall(self, val): self._prop_dict[\"browserRequireFirewall\"] = val @property", "in self._prop_dict: return self._prop_dict[\"passwordExpirationDays\"] else: return None @password_expiration_days.setter def password_expiration_days(self, val): self._prop_dict[\"passwordExpirationDays\"] =", "sets the storageRequireDeviceEncryption Returns: bool: The storageRequireDeviceEncryption \"\"\" if \"storageRequireDeviceEncryption\" in self._prop_dict: return", "val @property def browser_block_enterprise_mode_access(self): \"\"\" Gets and sets the browserBlockEnterpriseModeAccess Returns: bool: The", "def apply_only_to_windows81(self, val): self._prop_dict[\"applyOnlyToWindows81\"] = val @property def browser_block_autofill(self): \"\"\" Gets and sets", "the browserBlockSendingDoNotTrackHeader Returns: bool: The browserBlockSendingDoNotTrackHeader \"\"\" if \"browserBlockSendingDoNotTrackHeader\" in self._prop_dict: return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"]", "bool: The browserRequireFraudWarning \"\"\" if \"browserRequireFraudWarning\" in self._prop_dict: return self._prop_dict[\"browserRequireFraudWarning\"] else: return None", "None @password_minimum_character_set_count.setter def password_minimum_character_set_count(self, val): self._prop_dict[\"passwordMinimumCharacterSetCount\"] = val @property def password_previous_password_block_count(self): \"\"\" Gets", "sets the userAccountControlSettings Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The userAccountControlSettings \"\"\" if \"userAccountControlSettings\" in self._prop_dict: if", "Returns: bool: The browserRequireSmartScreen \"\"\" if \"browserRequireSmartScreen\" in self._prop_dict: return self._prop_dict[\"browserRequireSmartScreen\"] else: return", "@browser_logging_report_location.setter def browser_logging_report_location(self, val): self._prop_dict[\"browserLoggingReportLocation\"] = val @property def browser_require_high_security_for_restricted_sites(self): \"\"\" Gets and", "def password_previous_password_block_count(self): \"\"\" Gets and sets the passwordPreviousPasswordBlockCount Returns: int: The passwordPreviousPasswordBlockCount \"\"\"", "= val @property def browser_block_popups(self): \"\"\" Gets and sets the browserBlockPopups Returns: bool:", "browser_require_smart_screen(self, val): self._prop_dict[\"browserRequireSmartScreen\"] = val @property def browser_enterprise_mode_site_list_location(self): \"\"\" Gets and sets the", "def browser_require_fraud_warning(self, val): self._prop_dict[\"browserRequireFraudWarning\"] = val @property def browser_trusted_sites_security_level(self): \"\"\" Gets and sets", "self._prop_dict: return self._prop_dict[\"passwordMinimumCharacterSetCount\"] else: return None @password_minimum_character_set_count.setter def password_minimum_character_set_count(self, val): self._prop_dict[\"passwordMinimumCharacterSetCount\"] = val", "\"\"\" Gets and sets the cellularBlockDataRoaming Returns: bool: The cellularBlockDataRoaming \"\"\" if \"cellularBlockDataRoaming\"", "The browserRequireHighSecurityForRestrictedSites \"\"\" if \"browserRequireHighSecurityForRestrictedSites\" in self._prop_dict: return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else: return None @browser_require_high_security_for_restricted_sites.setter", "password_minutes_of_inactivity_before_screen_timeout(self): \"\"\" Gets and sets the passwordMinutesOfInactivityBeforeScreenTimeout Returns: int: The passwordMinutesOfInactivityBeforeScreenTimeout \"\"\" if", "else: return None @browser_block_autofill.setter def browser_block_autofill(self, val): self._prop_dict[\"browserBlockAutofill\"] = val @property def browser_block_automatic_detection_of_intranet_sites(self):", "\"\"\" Gets and sets the passwordExpirationDays Returns: int: The passwordExpirationDays \"\"\" if \"passwordExpirationDays\"", "storageRequireDeviceEncryption Returns: bool: The storageRequireDeviceEncryption \"\"\" if \"storageRequireDeviceEncryption\" in self._prop_dict: return self._prop_dict[\"storageRequireDeviceEncryption\"] else:", "Returns: int: The passwordMinimumCharacterSetCount \"\"\" if \"passwordMinimumCharacterSetCount\" in self._prop_dict: return self._prop_dict[\"passwordMinimumCharacterSetCount\"] else: return", "Returns: str: The browserEnterpriseModeSiteListLocation \"\"\" if \"browserEnterpriseModeSiteListLocation\" in self._prop_dict: return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else: return", "SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return None @browser_trusted_sites_security_level.setter def browser_trusted_sites_security_level(self, val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = val @property", "@property def password_minutes_of_inactivity_before_screen_timeout(self): \"\"\" Gets and sets the passwordMinutesOfInactivityBeforeScreenTimeout Returns: int: The passwordMinutesOfInactivityBeforeScreenTimeout", "in self._prop_dict: return self._prop_dict[\"browserLoggingReportLocation\"] else: return None @browser_logging_report_location.setter def browser_logging_report_location(self, val): self._prop_dict[\"browserLoggingReportLocation\"] =", "The passwordMinimumLength \"\"\" if \"passwordMinimumLength\" in self._prop_dict: return self._prop_dict[\"passwordMinimumLength\"] else: return None @password_minimum_length.setter", "\"browserBlockAutofill\" in self._prop_dict: return self._prop_dict[\"browserBlockAutofill\"] else: return None @browser_block_autofill.setter def browser_block_autofill(self, val): self._prop_dict[\"browserBlockAutofill\"]", "sets the diagnosticsBlockDataSubmission Returns: bool: The diagnosticsBlockDataSubmission \"\"\" if \"diagnosticsBlockDataSubmission\" in self._prop_dict: return", "def browser_logging_report_location(self): \"\"\" Gets and sets the browserLoggingReportLocation Returns: str: The browserLoggingReportLocation \"\"\"", "\"userAccountControlSettings\" in self._prop_dict: if isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase): return self._prop_dict[\"userAccountControlSettings\"] else : self._prop_dict[\"userAccountControlSettings\"] = WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"])", "and sets the browserEnterpriseModeSiteListLocation Returns: str: The browserEnterpriseModeSiteListLocation \"\"\" if \"browserEnterpriseModeSiteListLocation\" in self._prop_dict:", "def accounts_block_adding_non_microsoft_account_email(self, val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] = val @property def apply_only_to_windows81(self): \"\"\" Gets and sets", "def browser_require_firewall(self): \"\"\" Gets and sets the browserRequireFirewall Returns: bool: The browserRequireFirewall \"\"\"", "browserBlockPopups \"\"\" if \"browserBlockPopups\" in self._prop_dict: return self._prop_dict[\"browserBlockPopups\"] else: return None @browser_block_popups.setter def", "None @updates_require_automatic_updates.setter def updates_require_automatic_updates(self, val): self._prop_dict[\"updatesRequireAutomaticUpdates\"] = val @property def user_account_control_settings(self): \"\"\" Gets", "return None @accounts_block_adding_non_microsoft_account_email.setter def accounts_block_adding_non_microsoft_account_email(self, val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] = val @property def apply_only_to_windows81(self): \"\"\"", "self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] = val @property def browser_internet_security_level(self): \"\"\" Gets and sets the browserInternetSecurityLevel Returns:", "return None @browser_require_fraud_warning.setter def browser_require_fraud_warning(self, val): self._prop_dict[\"browserRequireFraudWarning\"] = val @property def browser_trusted_sites_security_level(self): \"\"\"", "def password_minimum_length(self, val): self._prop_dict[\"passwordMinimumLength\"] = val @property def password_minutes_of_inactivity_before_screen_timeout(self): \"\"\" Gets and sets", "def browser_block_single_word_entry_on_intranet_sites(self): \"\"\" Gets and sets the browserBlockSingleWordEntryOnIntranetSites Returns: bool: The browserBlockSingleWordEntryOnIntranetSites \"\"\"", "sets the applyOnlyToWindows81 Returns: bool: The applyOnlyToWindows81 \"\"\" if \"applyOnlyToWindows81\" in self._prop_dict: return", "\"\"\" Gets and sets the browserBlockAutomaticDetectionOfIntranetSites Returns: bool: The browserBlockAutomaticDetectionOfIntranetSites \"\"\" if \"browserBlockAutomaticDetectionOfIntranetSites\"", "def browser_require_smart_screen(self, val): self._prop_dict[\"browserRequireSmartScreen\"] = val @property def browser_enterprise_mode_site_list_location(self): \"\"\" Gets and sets", "(c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License", "return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else: return None @browser_block_automatic_detection_of_intranet_sites.setter def browser_block_automatic_detection_of_intranet_sites(self, val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] = val @property", "\"\"\" if \"browserBlockSendingDoNotTrackHeader\" in self._prop_dict: return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else: return None @browser_block_sending_do_not_track_header.setter def browser_block_sending_do_not_track_header(self,", "browserLoggingReportLocation Returns: str: The browserLoggingReportLocation \"\"\" if \"browserLoggingReportLocation\" in self._prop_dict: return self._prop_dict[\"browserLoggingReportLocation\"] else:", "@password_minimum_character_set_count.setter def password_minimum_character_set_count(self, val): self._prop_dict[\"passwordMinimumCharacterSetCount\"] = val @property def password_previous_password_block_count(self): \"\"\" Gets and", "the workFoldersUrl Returns: str: The workFoldersUrl \"\"\" if \"workFoldersUrl\" in self._prop_dict: return self._prop_dict[\"workFoldersUrl\"]", "self._prop_dict: return self._prop_dict[\"workFoldersUrl\"] else: return None @work_folders_url.setter def work_folders_url(self, val): self._prop_dict[\"workFoldersUrl\"] = val", "\"\"\" Gets and sets the accountsBlockAddingNonMicrosoftAccountEmail Returns: bool: The accountsBlockAddingNonMicrosoftAccountEmail \"\"\" if \"accountsBlockAddingNonMicrosoftAccountEmail\"", "def browser_logging_report_location(self, val): self._prop_dict[\"browserLoggingReportLocation\"] = val @property def browser_require_high_security_for_restricted_sites(self): \"\"\" Gets and sets", "and sets the passwordExpirationDays Returns: int: The passwordExpirationDays \"\"\" if \"passwordExpirationDays\" in self._prop_dict:", "cellular_block_data_roaming(self): \"\"\" Gets and sets the cellularBlockDataRoaming Returns: bool: The cellularBlockDataRoaming \"\"\" if", "class Windows81GeneralConfiguration(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict = prop_dict @property def accounts_block_adding_non_microsoft_account_email(self): \"\"\" Gets", "in self._prop_dict: return self._prop_dict[\"browserBlockPopups\"] else: return None @browser_block_popups.setter def browser_block_popups(self, val): self._prop_dict[\"browserBlockPopups\"] =", "OneDriveObjectBase): return self._prop_dict[\"browserIntranetSecurityLevel\"] else : self._prop_dict[\"browserIntranetSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return self._prop_dict[\"browserIntranetSecurityLevel\"] return None @browser_intranet_security_level.setter", "and sets the passwordBlockPicturePasswordAndPin Returns: bool: The passwordBlockPicturePasswordAndPin \"\"\" if \"passwordBlockPicturePasswordAndPin\" in self._prop_dict:", "be overwritten. ''' from __future__ import unicode_literals from ..model.internet_site_security_level import InternetSiteSecurityLevel from ..model.site_security_level", "passwordBlockPicturePasswordAndPin Returns: bool: The passwordBlockPicturePasswordAndPin \"\"\" if \"passwordBlockPicturePasswordAndPin\" in self._prop_dict: return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else:", "Reserved. Licensed under the MIT License. See License in the project root for", "# This file was generated and any changes will be overwritten. ''' from", "@browser_block_popups.setter def browser_block_popups(self, val): self._prop_dict[\"browserBlockPopups\"] = val @property def browser_block_sending_do_not_track_header(self): \"\"\" Gets and", "if \"passwordPreviousPasswordBlockCount\" in self._prop_dict: return self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else: return None @password_previous_password_block_count.setter def password_previous_password_block_count(self, val):", "Gets and sets the browserInternetSecurityLevel Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The browserInternetSecurityLevel \"\"\" if \"browserInternetSecurityLevel\" in", "val): self._prop_dict[\"userAccountControlSettings\"] = val @property def work_folders_url(self): \"\"\" Gets and sets the workFoldersUrl", "def apply_only_to_windows81(self): \"\"\" Gets and sets the applyOnlyToWindows81 Returns: bool: The applyOnlyToWindows81 \"\"\"", "def browser_block_automatic_detection_of_intranet_sites(self): \"\"\" Gets and sets the browserBlockAutomaticDetectionOfIntranetSites Returns: bool: The browserBlockAutomaticDetectionOfIntranetSites \"\"\"", "self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] = val @property def browser_require_smart_screen(self): \"\"\" Gets and sets the browserRequireSmartScreen Returns:", "self._prop_dict[\"browserInternetSecurityLevel\"] else : self._prop_dict[\"browserInternetSecurityLevel\"] = InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return self._prop_dict[\"browserInternetSecurityLevel\"] return None @browser_internet_security_level.setter def browser_internet_security_level(self,", "@property def cellular_block_data_roaming(self): \"\"\" Gets and sets the cellularBlockDataRoaming Returns: bool: The cellularBlockDataRoaming", "password_minutes_of_inactivity_before_screen_timeout(self, val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] = val @property def password_minimum_character_set_count(self): \"\"\" Gets and sets the", "and sets the storageRequireDeviceEncryption Returns: bool: The storageRequireDeviceEncryption \"\"\" if \"storageRequireDeviceEncryption\" in self._prop_dict:", "SiteSecurityLevel from ..model.required_password_type import RequiredPasswordType from ..model.windows_user_account_control_settings import WindowsUserAccountControlSettings from ..one_drive_object_base import OneDriveObjectBase", "else: return None @browser_block_plugins.setter def browser_block_plugins(self, val): self._prop_dict[\"browserBlockPlugins\"] = val @property def browser_block_popups(self):", "return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else: return None @browser_enterprise_mode_site_list_location.setter def browser_enterprise_mode_site_list_location(self, val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] = val @property", "\"\"\" if \"browserTrustedSitesSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else : self._prop_dict[\"browserTrustedSitesSecurityLevel\"]", "the passwordExpirationDays Returns: int: The passwordExpirationDays \"\"\" if \"passwordExpirationDays\" in self._prop_dict: return self._prop_dict[\"passwordExpirationDays\"]", "@property def browser_block_enterprise_mode_access(self): \"\"\" Gets and sets the browserBlockEnterpriseModeAccess Returns: bool: The browserBlockEnterpriseModeAccess", "return self._prop_dict[\"browserInternetSecurityLevel\"] return None @browser_internet_security_level.setter def browser_internet_security_level(self, val): self._prop_dict[\"browserInternetSecurityLevel\"] = val @property def", "The browserIntranetSecurityLevel \"\"\" if \"browserIntranetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserIntranetSecurityLevel\"] else", "return None @browser_block_autofill.setter def browser_block_autofill(self, val): self._prop_dict[\"browserBlockAutofill\"] = val @property def browser_block_automatic_detection_of_intranet_sites(self): \"\"\"", "return None @browser_block_enterprise_mode_access.setter def browser_block_enterprise_mode_access(self, val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"] = val @property def browser_block_java_script(self): \"\"\"", "and sets the passwordMinutesOfInactivityBeforeScreenTimeout Returns: int: The passwordMinutesOfInactivityBeforeScreenTimeout \"\"\" if \"passwordMinutesOfInactivityBeforeScreenTimeout\" in self._prop_dict:", "self._prop_dict[\"browserRequireSmartScreen\"] else: return None @browser_require_smart_screen.setter def browser_require_smart_screen(self, val): self._prop_dict[\"browserRequireSmartScreen\"] = val @property def", "was generated and any changes will be overwritten. ''' from __future__ import unicode_literals", "in self._prop_dict: return self._prop_dict[\"storageRequireDeviceEncryption\"] else: return None @storage_require_device_encryption.setter def storage_require_device_encryption(self, val): self._prop_dict[\"storageRequireDeviceEncryption\"] =", "else: return None @password_minutes_of_inactivity_before_screen_timeout.setter def password_minutes_of_inactivity_before_screen_timeout(self, val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] = val @property def password_minimum_character_set_count(self):", "for license information. # # This file was generated and any changes will", "self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else: return None @browser_block_enterprise_mode_access.setter def browser_block_enterprise_mode_access(self, val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"] = val @property def", "the browserRequireFirewall Returns: bool: The browserRequireFirewall \"\"\" if \"browserRequireFirewall\" in self._prop_dict: return self._prop_dict[\"browserRequireFirewall\"]", "str: The workFoldersUrl \"\"\" if \"workFoldersUrl\" in self._prop_dict: return self._prop_dict[\"workFoldersUrl\"] else: return None", "self._prop_dict[\"browserRequireFirewall\"] = val @property def browser_require_fraud_warning(self): \"\"\" Gets and sets the browserRequireFraudWarning Returns:", "self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else: return None @browser_enterprise_mode_site_list_location.setter def browser_enterprise_mode_site_list_location(self, val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] = val @property def", "if \"browserInternetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserInternetSecurityLevel\"] else : self._prop_dict[\"browserInternetSecurityLevel\"] =", "self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else : self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return None @browser_trusted_sites_security_level.setter def browser_trusted_sites_security_level(self,", "\"\"\" Gets and sets the browserEnterpriseModeSiteListLocation Returns: str: The browserEnterpriseModeSiteListLocation \"\"\" if \"browserEnterpriseModeSiteListLocation\"", "return None @browser_internet_security_level.setter def browser_internet_security_level(self, val): self._prop_dict[\"browserInternetSecurityLevel\"] = val @property def browser_intranet_security_level(self): \"\"\"", "if \"passwordMinimumLength\" in self._prop_dict: return self._prop_dict[\"passwordMinimumLength\"] else: return None @password_minimum_length.setter def password_minimum_length(self, val):", "\"\"\" Gets and sets the browserBlockAutofill Returns: bool: The browserBlockAutofill \"\"\" if \"browserBlockAutofill\"", "self._prop_dict: if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else : self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return self._prop_dict[\"browserTrustedSitesSecurityLevel\"]", "val @property def browser_block_sending_do_not_track_header(self): \"\"\" Gets and sets the browserBlockSendingDoNotTrackHeader Returns: bool: The", "\"\"\" Gets and sets the updatesRequireAutomaticUpdates Returns: bool: The updatesRequireAutomaticUpdates \"\"\" if \"updatesRequireAutomaticUpdates\"", "self._prop_dict: return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else: return None @browser_require_high_security_for_restricted_sites.setter def browser_require_high_security_for_restricted_sites(self, val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] = val", "__future__ import unicode_literals from ..model.internet_site_security_level import InternetSiteSecurityLevel from ..model.site_security_level import SiteSecurityLevel from ..model.required_password_type", "and sets the applyOnlyToWindows81 Returns: bool: The applyOnlyToWindows81 \"\"\" if \"applyOnlyToWindows81\" in self._prop_dict:", "return None @password_expiration_days.setter def password_expiration_days(self, val): self._prop_dict[\"passwordExpirationDays\"] = val @property def password_minimum_length(self): \"\"\"", "\"\"\" Gets and sets the browserBlockSingleWordEntryOnIntranetSites Returns: bool: The browserBlockSingleWordEntryOnIntranetSites \"\"\" if \"browserBlockSingleWordEntryOnIntranetSites\"", "self._prop_dict: return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else: return None @password_minutes_of_inactivity_before_screen_timeout.setter def password_minutes_of_inactivity_before_screen_timeout(self, val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] = val", "prop_dict={}): self._prop_dict = prop_dict @property def accounts_block_adding_non_microsoft_account_email(self): \"\"\" Gets and sets the accountsBlockAddingNonMicrosoftAccountEmail", "\"\"\" Gets and sets the browserBlockSendingDoNotTrackHeader Returns: bool: The browserBlockSendingDoNotTrackHeader \"\"\" if \"browserBlockSendingDoNotTrackHeader\"", "= val @property def browser_trusted_sites_security_level(self): \"\"\" Gets and sets the browserTrustedSitesSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`:", "val @property def password_minimum_length(self): \"\"\" Gets and sets the passwordMinimumLength Returns: int: The", "\"\"\" Gets and sets the applyOnlyToWindows81 Returns: bool: The applyOnlyToWindows81 \"\"\" if \"applyOnlyToWindows81\"", "int: The passwordSignInFailureCountBeforeFactoryReset \"\"\" if \"passwordSignInFailureCountBeforeFactoryReset\" in self._prop_dict: return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else: return None", "return None @browser_block_popups.setter def browser_block_popups(self, val): self._prop_dict[\"browserBlockPopups\"] = val @property def browser_block_sending_do_not_track_header(self): \"\"\"", "return self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else: return None @password_previous_password_block_count.setter def password_previous_password_block_count(self, val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"] = val @property", "@browser_block_java_script.setter def browser_block_java_script(self, val): self._prop_dict[\"browserBlockJavaScript\"] = val @property def browser_block_plugins(self): \"\"\" Gets and", "self._prop_dict: return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else: return None @browser_block_single_word_entry_on_intranet_sites.setter def browser_block_single_word_entry_on_intranet_sites(self, val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] = val", "= val @property def apply_only_to_windows81(self): \"\"\" Gets and sets the applyOnlyToWindows81 Returns: bool:", "\"\"\" if \"workFoldersUrl\" in self._prop_dict: return self._prop_dict[\"workFoldersUrl\"] else: return None @work_folders_url.setter def work_folders_url(self,", "\"browserRequireFraudWarning\" in self._prop_dict: return self._prop_dict[\"browserRequireFraudWarning\"] else: return None @browser_require_fraud_warning.setter def browser_require_fraud_warning(self, val): self._prop_dict[\"browserRequireFraudWarning\"]", "self._prop_dict[\"passwordExpirationDays\"] = val @property def password_minimum_length(self): \"\"\" Gets and sets the passwordMinimumLength Returns:", "cellularBlockDataRoaming Returns: bool: The cellularBlockDataRoaming \"\"\" if \"cellularBlockDataRoaming\" in self._prop_dict: return self._prop_dict[\"cellularBlockDataRoaming\"] else:", "\"browserRequireSmartScreen\" in self._prop_dict: return self._prop_dict[\"browserRequireSmartScreen\"] else: return None @browser_require_smart_screen.setter def browser_require_smart_screen(self, val): self._prop_dict[\"browserRequireSmartScreen\"]", "@property def password_previous_password_block_count(self): \"\"\" Gets and sets the passwordPreviousPasswordBlockCount Returns: int: The passwordPreviousPasswordBlockCount", "from __future__ import unicode_literals from ..model.internet_site_security_level import InternetSiteSecurityLevel from ..model.site_security_level import SiteSecurityLevel from", "self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return None @browser_trusted_sites_security_level.setter def browser_trusted_sites_security_level(self, val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = val @property def cellular_block_data_roaming(self):", "= RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return self._prop_dict[\"passwordRequiredType\"] return None @password_required_type.setter def password_required_type(self, val): self._prop_dict[\"passwordRequiredType\"] = val", "def password_expiration_days(self): \"\"\" Gets and sets the passwordExpirationDays Returns: int: The passwordExpirationDays \"\"\"", "The browserBlockJavaScript \"\"\" if \"browserBlockJavaScript\" in self._prop_dict: return self._prop_dict[\"browserBlockJavaScript\"] else: return None @browser_block_java_script.setter", "The passwordPreviousPasswordBlockCount \"\"\" if \"passwordPreviousPasswordBlockCount\" in self._prop_dict: return self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else: return None @password_previous_password_block_count.setter", "self._prop_dict[\"browserBlockPopups\"] else: return None @browser_block_popups.setter def browser_block_popups(self, val): self._prop_dict[\"browserBlockPopups\"] = val @property def", "return self._prop_dict[\"cellularBlockDataRoaming\"] else: return None @cellular_block_data_roaming.setter def cellular_block_data_roaming(self, val): self._prop_dict[\"cellularBlockDataRoaming\"] = val @property", "return self._prop_dict[\"userAccountControlSettings\"] else : self._prop_dict[\"userAccountControlSettings\"] = WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return self._prop_dict[\"userAccountControlSettings\"] return None @user_account_control_settings.setter def", "self._prop_dict: return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else: return None @password_sign_in_failure_count_before_factory_reset.setter def password_sign_in_failure_count_before_factory_reset(self, val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] = val", "else: return None @updates_require_automatic_updates.setter def updates_require_automatic_updates(self, val): self._prop_dict[\"updatesRequireAutomaticUpdates\"] = val @property def user_account_control_settings(self):", "The browserLoggingReportLocation \"\"\" if \"browserLoggingReportLocation\" in self._prop_dict: return self._prop_dict[\"browserLoggingReportLocation\"] else: return None @browser_logging_report_location.setter", "else: return None @password_expiration_days.setter def password_expiration_days(self, val): self._prop_dict[\"passwordExpirationDays\"] = val @property def password_minimum_length(self):", "Gets and sets the passwordExpirationDays Returns: int: The passwordExpirationDays \"\"\" if \"passwordExpirationDays\" in", "\"\"\" Gets and sets the browserTrustedSitesSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserTrustedSitesSecurityLevel \"\"\" if \"browserTrustedSitesSecurityLevel\"", "if \"userAccountControlSettings\" in self._prop_dict: if isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase): return self._prop_dict[\"userAccountControlSettings\"] else : self._prop_dict[\"userAccountControlSettings\"] =", "The passwordRequiredType \"\"\" if \"passwordRequiredType\" in self._prop_dict: if isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase): return self._prop_dict[\"passwordRequiredType\"] else", "return self._prop_dict[\"applyOnlyToWindows81\"] else: return None @apply_only_to_windows81.setter def apply_only_to_windows81(self, val): self._prop_dict[\"applyOnlyToWindows81\"] = val @property", "return self._prop_dict[\"browserBlockPlugins\"] else: return None @browser_block_plugins.setter def browser_block_plugins(self, val): self._prop_dict[\"browserBlockPlugins\"] = val @property", "else: return None @browser_block_java_script.setter def browser_block_java_script(self, val): self._prop_dict[\"browserBlockJavaScript\"] = val @property def browser_block_plugins(self):", "self._prop_dict[\"browserIntranetSecurityLevel\"] return None @browser_intranet_security_level.setter def browser_intranet_security_level(self, val): self._prop_dict[\"browserIntranetSecurityLevel\"] = val @property def browser_logging_report_location(self):", "@password_minutes_of_inactivity_before_screen_timeout.setter def password_minutes_of_inactivity_before_screen_timeout(self, val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] = val @property def password_minimum_character_set_count(self): \"\"\" Gets and", "return self._prop_dict[\"browserLoggingReportLocation\"] else: return None @browser_logging_report_location.setter def browser_logging_report_location(self, val): self._prop_dict[\"browserLoggingReportLocation\"] = val @property", "val): self._prop_dict[\"browserRequireFraudWarning\"] = val @property def browser_trusted_sites_security_level(self): \"\"\" Gets and sets the browserTrustedSitesSecurityLevel", "val @property def updates_require_automatic_updates(self): \"\"\" Gets and sets the updatesRequireAutomaticUpdates Returns: bool: The", "else: return None @cellular_block_data_roaming.setter def cellular_block_data_roaming(self, val): self._prop_dict[\"cellularBlockDataRoaming\"] = val @property def diagnostics_block_data_submission(self):", "@password_previous_password_block_count.setter def password_previous_password_block_count(self, val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"] = val @property def password_required_type(self): \"\"\" Gets and", "val @property def browser_internet_security_level(self): \"\"\" Gets and sets the browserInternetSecurityLevel Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The", "passwordMinimumCharacterSetCount Returns: int: The passwordMinimumCharacterSetCount \"\"\" if \"passwordMinimumCharacterSetCount\" in self._prop_dict: return self._prop_dict[\"passwordMinimumCharacterSetCount\"] else:", "= WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return self._prop_dict[\"userAccountControlSettings\"] return None @user_account_control_settings.setter def user_account_control_settings(self, val): self._prop_dict[\"userAccountControlSettings\"] = val", "RequiredPasswordType from ..model.windows_user_account_control_settings import WindowsUserAccountControlSettings from ..one_drive_object_base import OneDriveObjectBase class Windows81GeneralConfiguration(OneDriveObjectBase): def __init__(self,", "val @property def browser_intranet_security_level(self): \"\"\" Gets and sets the browserIntranetSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The", "and sets the browserRequireHighSecurityForRestrictedSites Returns: bool: The browserRequireHighSecurityForRestrictedSites \"\"\" if \"browserRequireHighSecurityForRestrictedSites\" in self._prop_dict:", "Gets and sets the passwordSignInFailureCountBeforeFactoryReset Returns: int: The passwordSignInFailureCountBeforeFactoryReset \"\"\" if \"passwordSignInFailureCountBeforeFactoryReset\" in", "sets the browserBlockSingleWordEntryOnIntranetSites Returns: bool: The browserBlockSingleWordEntryOnIntranetSites \"\"\" if \"browserBlockSingleWordEntryOnIntranetSites\" in self._prop_dict: return", "@property def browser_block_java_script(self): \"\"\" Gets and sets the browserBlockJavaScript Returns: bool: The browserBlockJavaScript", "sets the passwordRequiredType Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The passwordRequiredType \"\"\" if \"passwordRequiredType\" in self._prop_dict: if", "self._prop_dict[\"passwordRequiredType\"] = val @property def password_sign_in_failure_count_before_factory_reset(self): \"\"\" Gets and sets the passwordSignInFailureCountBeforeFactoryReset Returns:", "@property def diagnostics_block_data_submission(self): \"\"\" Gets and sets the diagnosticsBlockDataSubmission Returns: bool: The diagnosticsBlockDataSubmission", "self._prop_dict[\"browserRequireFraudWarning\"] else: return None @browser_require_fraud_warning.setter def browser_require_fraud_warning(self, val): self._prop_dict[\"browserRequireFraudWarning\"] = val @property def", "unicode_literals from ..model.internet_site_security_level import InternetSiteSecurityLevel from ..model.site_security_level import SiteSecurityLevel from ..model.required_password_type import RequiredPasswordType", "RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return self._prop_dict[\"passwordRequiredType\"] return None @password_required_type.setter def password_required_type(self, val): self._prop_dict[\"passwordRequiredType\"] = val @property", "in self._prop_dict: return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else: return None @browser_block_single_word_entry_on_intranet_sites.setter def browser_block_single_word_entry_on_intranet_sites(self, val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] =", "..model.site_security_level import SiteSecurityLevel from ..model.required_password_type import RequiredPasswordType from ..model.windows_user_account_control_settings import WindowsUserAccountControlSettings from ..one_drive_object_base", "def password_minutes_of_inactivity_before_screen_timeout(self, val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] = val @property def password_minimum_character_set_count(self): \"\"\" Gets and sets", "\"\"\" if \"passwordBlockPicturePasswordAndPin\" in self._prop_dict: return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else: return None @password_block_picture_password_and_pin.setter def password_block_picture_password_and_pin(self,", "self._prop_dict: return self._prop_dict[\"storageRequireDeviceEncryption\"] else: return None @storage_require_device_encryption.setter def storage_require_device_encryption(self, val): self._prop_dict[\"storageRequireDeviceEncryption\"] = val", "val @property def password_sign_in_failure_count_before_factory_reset(self): \"\"\" Gets and sets the passwordSignInFailureCountBeforeFactoryReset Returns: int: The", "self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else: return None @accounts_block_adding_non_microsoft_account_email.setter def accounts_block_adding_non_microsoft_account_email(self, val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] = val @property def", "val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = val @property def cellular_block_data_roaming(self): \"\"\" Gets and sets the cellularBlockDataRoaming", "@property def password_sign_in_failure_count_before_factory_reset(self): \"\"\" Gets and sets the passwordSignInFailureCountBeforeFactoryReset Returns: int: The passwordSignInFailureCountBeforeFactoryReset", "accounts_block_adding_non_microsoft_account_email(self): \"\"\" Gets and sets the accountsBlockAddingNonMicrosoftAccountEmail Returns: bool: The accountsBlockAddingNonMicrosoftAccountEmail \"\"\" if", "the browserRequireFraudWarning Returns: bool: The browserRequireFraudWarning \"\"\" if \"browserRequireFraudWarning\" in self._prop_dict: return self._prop_dict[\"browserRequireFraudWarning\"]", "# -*- coding: utf-8 -*- ''' # Copyright (c) Microsoft Corporation. All Rights", "from ..one_drive_object_base import OneDriveObjectBase class Windows81GeneralConfiguration(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict = prop_dict @property", "if \"browserRequireFraudWarning\" in self._prop_dict: return self._prop_dict[\"browserRequireFraudWarning\"] else: return None @browser_require_fraud_warning.setter def browser_require_fraud_warning(self, val):", "Gets and sets the browserTrustedSitesSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserTrustedSitesSecurityLevel \"\"\" if \"browserTrustedSitesSecurityLevel\" in", "self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return None @browser_trusted_sites_security_level.setter def browser_trusted_sites_security_level(self, val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"] =", "the accountsBlockAddingNonMicrosoftAccountEmail Returns: bool: The accountsBlockAddingNonMicrosoftAccountEmail \"\"\" if \"accountsBlockAddingNonMicrosoftAccountEmail\" in self._prop_dict: return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"]", "def password_minutes_of_inactivity_before_screen_timeout(self): \"\"\" Gets and sets the passwordMinutesOfInactivityBeforeScreenTimeout Returns: int: The passwordMinutesOfInactivityBeforeScreenTimeout \"\"\"", "val @property def browser_logging_report_location(self): \"\"\" Gets and sets the browserLoggingReportLocation Returns: str: The", "@property def password_minimum_character_set_count(self): \"\"\" Gets and sets the passwordMinimumCharacterSetCount Returns: int: The passwordMinimumCharacterSetCount", "def password_block_picture_password_and_pin(self): \"\"\" Gets and sets the passwordBlockPicturePasswordAndPin Returns: bool: The passwordBlockPicturePasswordAndPin \"\"\"", "Gets and sets the browserEnterpriseModeSiteListLocation Returns: str: The browserEnterpriseModeSiteListLocation \"\"\" if \"browserEnterpriseModeSiteListLocation\" in", "return self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else: return None @browser_block_enterprise_mode_access.setter def browser_block_enterprise_mode_access(self, val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"] = val @property", "Gets and sets the browserBlockAutofill Returns: bool: The browserBlockAutofill \"\"\" if \"browserBlockAutofill\" in", "bool: The browserBlockSingleWordEntryOnIntranetSites \"\"\" if \"browserBlockSingleWordEntryOnIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else: return None", "\"\"\" if \"storageRequireDeviceEncryption\" in self._prop_dict: return self._prop_dict[\"storageRequireDeviceEncryption\"] else: return None @storage_require_device_encryption.setter def storage_require_device_encryption(self,", "if \"storageRequireDeviceEncryption\" in self._prop_dict: return self._prop_dict[\"storageRequireDeviceEncryption\"] else: return None @storage_require_device_encryption.setter def storage_require_device_encryption(self, val):", "val @property def browser_block_java_script(self): \"\"\" Gets and sets the browserBlockJavaScript Returns: bool: The", "return None @browser_block_automatic_detection_of_intranet_sites.setter def browser_block_automatic_detection_of_intranet_sites(self, val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] = val @property def browser_block_enterprise_mode_access(self): \"\"\"", "if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserInternetSecurityLevel\"] else : self._prop_dict[\"browserInternetSecurityLevel\"] = InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return self._prop_dict[\"browserInternetSecurityLevel\"] return", "\"browserLoggingReportLocation\" in self._prop_dict: return self._prop_dict[\"browserLoggingReportLocation\"] else: return None @browser_logging_report_location.setter def browser_logging_report_location(self, val): self._prop_dict[\"browserLoggingReportLocation\"]", "val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] = val @property def browser_require_firewall(self): \"\"\" Gets and sets the browserRequireFirewall", "@property def storage_require_device_encryption(self): \"\"\" Gets and sets the storageRequireDeviceEncryption Returns: bool: The storageRequireDeviceEncryption", "\"\"\" if \"passwordSignInFailureCountBeforeFactoryReset\" in self._prop_dict: return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else: return None @password_sign_in_failure_count_before_factory_reset.setter def password_sign_in_failure_count_before_factory_reset(self,", "The browserBlockSingleWordEntryOnIntranetSites \"\"\" if \"browserBlockSingleWordEntryOnIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else: return None @browser_block_single_word_entry_on_intranet_sites.setter", "in self._prop_dict: return self._prop_dict[\"browserBlockJavaScript\"] else: return None @browser_block_java_script.setter def browser_block_java_script(self, val): self._prop_dict[\"browserBlockJavaScript\"] =", "= val @property def browser_logging_report_location(self): \"\"\" Gets and sets the browserLoggingReportLocation Returns: str:", "val): self._prop_dict[\"browserRequireFirewall\"] = val @property def browser_require_fraud_warning(self): \"\"\" Gets and sets the browserRequireFraudWarning", "Gets and sets the passwordPreviousPasswordBlockCount Returns: int: The passwordPreviousPasswordBlockCount \"\"\" if \"passwordPreviousPasswordBlockCount\" in", "sets the passwordPreviousPasswordBlockCount Returns: int: The passwordPreviousPasswordBlockCount \"\"\" if \"passwordPreviousPasswordBlockCount\" in self._prop_dict: return", "else: return None @diagnostics_block_data_submission.setter def diagnostics_block_data_submission(self, val): self._prop_dict[\"diagnosticsBlockDataSubmission\"] = val @property def password_block_picture_password_and_pin(self):", "the browserBlockJavaScript Returns: bool: The browserBlockJavaScript \"\"\" if \"browserBlockJavaScript\" in self._prop_dict: return self._prop_dict[\"browserBlockJavaScript\"]", "diagnosticsBlockDataSubmission \"\"\" if \"diagnosticsBlockDataSubmission\" in self._prop_dict: return self._prop_dict[\"diagnosticsBlockDataSubmission\"] else: return None @diagnostics_block_data_submission.setter def", "Gets and sets the accountsBlockAddingNonMicrosoftAccountEmail Returns: bool: The accountsBlockAddingNonMicrosoftAccountEmail \"\"\" if \"accountsBlockAddingNonMicrosoftAccountEmail\" in", "\"passwordMinimumLength\" in self._prop_dict: return self._prop_dict[\"passwordMinimumLength\"] else: return None @password_minimum_length.setter def password_minimum_length(self, val): self._prop_dict[\"passwordMinimumLength\"]", "@apply_only_to_windows81.setter def apply_only_to_windows81(self, val): self._prop_dict[\"applyOnlyToWindows81\"] = val @property def browser_block_autofill(self): \"\"\" Gets and", "cellular_block_data_roaming(self, val): self._prop_dict[\"cellularBlockDataRoaming\"] = val @property def diagnostics_block_data_submission(self): \"\"\" Gets and sets the", "isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserIntranetSecurityLevel\"] else : self._prop_dict[\"browserIntranetSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return self._prop_dict[\"browserIntranetSecurityLevel\"] return None", "self._prop_dict[\"browserIntranetSecurityLevel\"] = val @property def browser_logging_report_location(self): \"\"\" Gets and sets the browserLoggingReportLocation Returns:", "bool: The browserRequireHighSecurityForRestrictedSites \"\"\" if \"browserRequireHighSecurityForRestrictedSites\" in self._prop_dict: return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else: return None", "val @property def browser_trusted_sites_security_level(self): \"\"\" Gets and sets the browserTrustedSitesSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The", "\"passwordBlockPicturePasswordAndPin\" in self._prop_dict: return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else: return None @password_block_picture_password_and_pin.setter def password_block_picture_password_and_pin(self, val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"]", "password_sign_in_failure_count_before_factory_reset(self, val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] = val @property def storage_require_device_encryption(self): \"\"\" Gets and sets the", "return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else: return None @password_minutes_of_inactivity_before_screen_timeout.setter def password_minutes_of_inactivity_before_screen_timeout(self, val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] = val @property", "browserRequireFraudWarning Returns: bool: The browserRequireFraudWarning \"\"\" if \"browserRequireFraudWarning\" in self._prop_dict: return self._prop_dict[\"browserRequireFraudWarning\"] else:", "\"\"\" Gets and sets the passwordMinimumCharacterSetCount Returns: int: The passwordMinimumCharacterSetCount \"\"\" if \"passwordMinimumCharacterSetCount\"", "License in the project root for license information. # # This file was", "if \"updatesRequireAutomaticUpdates\" in self._prop_dict: return self._prop_dict[\"updatesRequireAutomaticUpdates\"] else: return None @updates_require_automatic_updates.setter def updates_require_automatic_updates(self, val):", ":class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserIntranetSecurityLevel \"\"\" if \"browserIntranetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserIntranetSecurityLevel\"]", "val): self._prop_dict[\"storageRequireDeviceEncryption\"] = val @property def updates_require_automatic_updates(self): \"\"\" Gets and sets the updatesRequireAutomaticUpdates", "\"\"\" Gets and sets the userAccountControlSettings Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The userAccountControlSettings \"\"\" if \"userAccountControlSettings\"", "self._prop_dict[\"userAccountControlSettings\"] = WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return self._prop_dict[\"userAccountControlSettings\"] return None @user_account_control_settings.setter def user_account_control_settings(self, val): self._prop_dict[\"userAccountControlSettings\"] =", "= val @property def browser_block_autofill(self): \"\"\" Gets and sets the browserBlockAutofill Returns: bool:", "sets the browserBlockPlugins Returns: bool: The browserBlockPlugins \"\"\" if \"browserBlockPlugins\" in self._prop_dict: return", "from ..model.internet_site_security_level import InternetSiteSecurityLevel from ..model.site_security_level import SiteSecurityLevel from ..model.required_password_type import RequiredPasswordType from", "val @property def password_required_type(self): \"\"\" Gets and sets the passwordRequiredType Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The", "sets the passwordMinimumCharacterSetCount Returns: int: The passwordMinimumCharacterSetCount \"\"\" if \"passwordMinimumCharacterSetCount\" in self._prop_dict: return", "browserBlockAutofill Returns: bool: The browserBlockAutofill \"\"\" if \"browserBlockAutofill\" in self._prop_dict: return self._prop_dict[\"browserBlockAutofill\"] else:", "Returns: bool: The browserBlockPlugins \"\"\" if \"browserBlockPlugins\" in self._prop_dict: return self._prop_dict[\"browserBlockPlugins\"] else: return", "in self._prop_dict: if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else : self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return", "the passwordBlockPicturePasswordAndPin Returns: bool: The passwordBlockPicturePasswordAndPin \"\"\" if \"passwordBlockPicturePasswordAndPin\" in self._prop_dict: return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"]", "import WindowsUserAccountControlSettings from ..one_drive_object_base import OneDriveObjectBase class Windows81GeneralConfiguration(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict =", "self._prop_dict: return self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else: return None @password_previous_password_block_count.setter def password_previous_password_block_count(self, val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"] = val", "\"\"\" if \"updatesRequireAutomaticUpdates\" in self._prop_dict: return self._prop_dict[\"updatesRequireAutomaticUpdates\"] else: return None @updates_require_automatic_updates.setter def updates_require_automatic_updates(self,", "the passwordMinimumLength Returns: int: The passwordMinimumLength \"\"\" if \"passwordMinimumLength\" in self._prop_dict: return self._prop_dict[\"passwordMinimumLength\"]", "Returns: int: The passwordPreviousPasswordBlockCount \"\"\" if \"passwordPreviousPasswordBlockCount\" in self._prop_dict: return self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else: return", "return self._prop_dict[\"userAccountControlSettings\"] return None @user_account_control_settings.setter def user_account_control_settings(self, val): self._prop_dict[\"userAccountControlSettings\"] = val @property def", "\"\"\" Gets and sets the passwordMinutesOfInactivityBeforeScreenTimeout Returns: int: The passwordMinutesOfInactivityBeforeScreenTimeout \"\"\" if \"passwordMinutesOfInactivityBeforeScreenTimeout\"", "Gets and sets the storageRequireDeviceEncryption Returns: bool: The storageRequireDeviceEncryption \"\"\" if \"storageRequireDeviceEncryption\" in", "@browser_require_smart_screen.setter def browser_require_smart_screen(self, val): self._prop_dict[\"browserRequireSmartScreen\"] = val @property def browser_enterprise_mode_site_list_location(self): \"\"\" Gets and", "self._prop_dict[\"browserBlockJavaScript\"] = val @property def browser_block_plugins(self): \"\"\" Gets and sets the browserBlockPlugins Returns:", "in self._prop_dict: return self._prop_dict[\"browserBlockAutofill\"] else: return None @browser_block_autofill.setter def browser_block_autofill(self, val): self._prop_dict[\"browserBlockAutofill\"] =", "if \"browserBlockAutofill\" in self._prop_dict: return self._prop_dict[\"browserBlockAutofill\"] else: return None @browser_block_autofill.setter def browser_block_autofill(self, val):", "browserRequireHighSecurityForRestrictedSites Returns: bool: The browserRequireHighSecurityForRestrictedSites \"\"\" if \"browserRequireHighSecurityForRestrictedSites\" in self._prop_dict: return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else:", "\"\"\" if \"cellularBlockDataRoaming\" in self._prop_dict: return self._prop_dict[\"cellularBlockDataRoaming\"] else: return None @cellular_block_data_roaming.setter def cellular_block_data_roaming(self,", "the userAccountControlSettings Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The userAccountControlSettings \"\"\" if \"userAccountControlSettings\" in self._prop_dict: if isinstance(self._prop_dict[\"userAccountControlSettings\"],", "isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase): return self._prop_dict[\"passwordRequiredType\"] else : self._prop_dict[\"passwordRequiredType\"] = RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return self._prop_dict[\"passwordRequiredType\"] return None", "sets the passwordMinutesOfInactivityBeforeScreenTimeout Returns: int: The passwordMinutesOfInactivityBeforeScreenTimeout \"\"\" if \"passwordMinutesOfInactivityBeforeScreenTimeout\" in self._prop_dict: return", "in self._prop_dict: if isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase): return self._prop_dict[\"userAccountControlSettings\"] else : self._prop_dict[\"userAccountControlSettings\"] = WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return", "else: return None @accounts_block_adding_non_microsoft_account_email.setter def accounts_block_adding_non_microsoft_account_email(self, val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] = val @property def apply_only_to_windows81(self):", "None @browser_block_popups.setter def browser_block_popups(self, val): self._prop_dict[\"browserBlockPopups\"] = val @property def browser_block_sending_do_not_track_header(self): \"\"\" Gets", "and sets the browserTrustedSitesSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserTrustedSitesSecurityLevel \"\"\" if \"browserTrustedSitesSecurityLevel\" in self._prop_dict:", "if \"browserBlockSendingDoNotTrackHeader\" in self._prop_dict: return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else: return None @browser_block_sending_do_not_track_header.setter def browser_block_sending_do_not_track_header(self, val):", "browserBlockSingleWordEntryOnIntranetSites Returns: bool: The browserBlockSingleWordEntryOnIntranetSites \"\"\" if \"browserBlockSingleWordEntryOnIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else:", "self._prop_dict: return self._prop_dict[\"browserLoggingReportLocation\"] else: return None @browser_logging_report_location.setter def browser_logging_report_location(self, val): self._prop_dict[\"browserLoggingReportLocation\"] = val", "= prop_dict @property def accounts_block_adding_non_microsoft_account_email(self): \"\"\" Gets and sets the accountsBlockAddingNonMicrosoftAccountEmail Returns: bool:", "return None @browser_require_smart_screen.setter def browser_require_smart_screen(self, val): self._prop_dict[\"browserRequireSmartScreen\"] = val @property def browser_enterprise_mode_site_list_location(self): \"\"\"", "def browser_intranet_security_level(self, val): self._prop_dict[\"browserIntranetSecurityLevel\"] = val @property def browser_logging_report_location(self): \"\"\" Gets and sets", "sets the browserBlockEnterpriseModeAccess Returns: bool: The browserBlockEnterpriseModeAccess \"\"\" if \"browserBlockEnterpriseModeAccess\" in self._prop_dict: return", "\"browserIntranetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserIntranetSecurityLevel\"] else : self._prop_dict[\"browserIntranetSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"])", "the passwordMinimumCharacterSetCount Returns: int: The passwordMinimumCharacterSetCount \"\"\" if \"passwordMinimumCharacterSetCount\" in self._prop_dict: return self._prop_dict[\"passwordMinimumCharacterSetCount\"]", "@accounts_block_adding_non_microsoft_account_email.setter def accounts_block_adding_non_microsoft_account_email(self, val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] = val @property def apply_only_to_windows81(self): \"\"\" Gets and", "and sets the passwordPreviousPasswordBlockCount Returns: int: The passwordPreviousPasswordBlockCount \"\"\" if \"passwordPreviousPasswordBlockCount\" in self._prop_dict:", "in self._prop_dict: return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else: return None @browser_block_sending_do_not_track_header.setter def browser_block_sending_do_not_track_header(self, val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] =", "self._prop_dict[\"applyOnlyToWindows81\"] else: return None @apply_only_to_windows81.setter def apply_only_to_windows81(self, val): self._prop_dict[\"applyOnlyToWindows81\"] = val @property def", "bool: The browserBlockSendingDoNotTrackHeader \"\"\" if \"browserBlockSendingDoNotTrackHeader\" in self._prop_dict: return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else: return None", "password_expiration_days(self, val): self._prop_dict[\"passwordExpirationDays\"] = val @property def password_minimum_length(self): \"\"\" Gets and sets the", "and sets the browserBlockEnterpriseModeAccess Returns: bool: The browserBlockEnterpriseModeAccess \"\"\" if \"browserBlockEnterpriseModeAccess\" in self._prop_dict:", "self._prop_dict[\"browserBlockPlugins\"] = val @property def browser_block_popups(self): \"\"\" Gets and sets the browserBlockPopups Returns:", "-*- ''' # Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the", "''' from __future__ import unicode_literals from ..model.internet_site_security_level import InternetSiteSecurityLevel from ..model.site_security_level import SiteSecurityLevel", "@property def password_expiration_days(self): \"\"\" Gets and sets the passwordExpirationDays Returns: int: The passwordExpirationDays", "Returns: int: The passwordMinutesOfInactivityBeforeScreenTimeout \"\"\" if \"passwordMinutesOfInactivityBeforeScreenTimeout\" in self._prop_dict: return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else: return", "val): self._prop_dict[\"updatesRequireAutomaticUpdates\"] = val @property def user_account_control_settings(self): \"\"\" Gets and sets the userAccountControlSettings", "userAccountControlSettings Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The userAccountControlSettings \"\"\" if \"userAccountControlSettings\" in self._prop_dict: if isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase):", "sets the passwordSignInFailureCountBeforeFactoryReset Returns: int: The passwordSignInFailureCountBeforeFactoryReset \"\"\" if \"passwordSignInFailureCountBeforeFactoryReset\" in self._prop_dict: return", "from ..model.site_security_level import SiteSecurityLevel from ..model.required_password_type import RequiredPasswordType from ..model.windows_user_account_control_settings import WindowsUserAccountControlSettings from", "None @browser_enterprise_mode_site_list_location.setter def browser_enterprise_mode_site_list_location(self, val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] = val @property def browser_internet_security_level(self): \"\"\" Gets", "def browser_internet_security_level(self): \"\"\" Gets and sets the browserInternetSecurityLevel Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The browserInternetSecurityLevel \"\"\"", "else: return None @password_block_picture_password_and_pin.setter def password_block_picture_password_and_pin(self, val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] = val @property def password_expiration_days(self):", "@storage_require_device_encryption.setter def storage_require_device_encryption(self, val): self._prop_dict[\"storageRequireDeviceEncryption\"] = val @property def updates_require_automatic_updates(self): \"\"\" Gets and", "def __init__(self, prop_dict={}): self._prop_dict = prop_dict @property def accounts_block_adding_non_microsoft_account_email(self): \"\"\" Gets and sets", "WindowsUserAccountControlSettings from ..one_drive_object_base import OneDriveObjectBase class Windows81GeneralConfiguration(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict = prop_dict", "def browser_require_firewall(self, val): self._prop_dict[\"browserRequireFirewall\"] = val @property def browser_require_fraud_warning(self): \"\"\" Gets and sets", "sets the passwordBlockPicturePasswordAndPin Returns: bool: The passwordBlockPicturePasswordAndPin \"\"\" if \"passwordBlockPicturePasswordAndPin\" in self._prop_dict: return", "self._prop_dict[\"browserBlockAutofill\"] else: return None @browser_block_autofill.setter def browser_block_autofill(self, val): self._prop_dict[\"browserBlockAutofill\"] = val @property def", "bool: The storageRequireDeviceEncryption \"\"\" if \"storageRequireDeviceEncryption\" in self._prop_dict: return self._prop_dict[\"storageRequireDeviceEncryption\"] else: return None", "self._prop_dict[\"updatesRequireAutomaticUpdates\"] = val @property def user_account_control_settings(self): \"\"\" Gets and sets the userAccountControlSettings Returns:", "if \"passwordBlockPicturePasswordAndPin\" in self._prop_dict: return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else: return None @password_block_picture_password_and_pin.setter def password_block_picture_password_and_pin(self, val):", "apply_only_to_windows81(self): \"\"\" Gets and sets the applyOnlyToWindows81 Returns: bool: The applyOnlyToWindows81 \"\"\" if", "\"passwordMinutesOfInactivityBeforeScreenTimeout\" in self._prop_dict: return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else: return None @password_minutes_of_inactivity_before_screen_timeout.setter def password_minutes_of_inactivity_before_screen_timeout(self, val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"]", "OneDriveObjectBase): return self._prop_dict[\"userAccountControlSettings\"] else : self._prop_dict[\"userAccountControlSettings\"] = WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return self._prop_dict[\"userAccountControlSettings\"] return None @user_account_control_settings.setter", "will be overwritten. ''' from __future__ import unicode_literals from ..model.internet_site_security_level import InternetSiteSecurityLevel from", "browserBlockJavaScript \"\"\" if \"browserBlockJavaScript\" in self._prop_dict: return self._prop_dict[\"browserBlockJavaScript\"] else: return None @browser_block_java_script.setter def", "browserBlockAutomaticDetectionOfIntranetSites Returns: bool: The browserBlockAutomaticDetectionOfIntranetSites \"\"\" if \"browserBlockAutomaticDetectionOfIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else:", "browserIntranetSecurityLevel \"\"\" if \"browserIntranetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserIntranetSecurityLevel\"] else :", "sets the browserRequireFraudWarning Returns: bool: The browserRequireFraudWarning \"\"\" if \"browserRequireFraudWarning\" in self._prop_dict: return", "return self._prop_dict[\"browserRequireSmartScreen\"] else: return None @browser_require_smart_screen.setter def browser_require_smart_screen(self, val): self._prop_dict[\"browserRequireSmartScreen\"] = val @property", "return self._prop_dict[\"passwordRequiredType\"] return None @password_required_type.setter def password_required_type(self, val): self._prop_dict[\"passwordRequiredType\"] = val @property def", "the browserEnterpriseModeSiteListLocation Returns: str: The browserEnterpriseModeSiteListLocation \"\"\" if \"browserEnterpriseModeSiteListLocation\" in self._prop_dict: return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"]", "Rights Reserved. Licensed under the MIT License. See License in the project root", "Gets and sets the browserBlockEnterpriseModeAccess Returns: bool: The browserBlockEnterpriseModeAccess \"\"\" if \"browserBlockEnterpriseModeAccess\" in", "The diagnosticsBlockDataSubmission \"\"\" if \"diagnosticsBlockDataSubmission\" in self._prop_dict: return self._prop_dict[\"diagnosticsBlockDataSubmission\"] else: return None @diagnostics_block_data_submission.setter", "\"applyOnlyToWindows81\" in self._prop_dict: return self._prop_dict[\"applyOnlyToWindows81\"] else: return None @apply_only_to_windows81.setter def apply_only_to_windows81(self, val): self._prop_dict[\"applyOnlyToWindows81\"]", "..model.internet_site_security_level import InternetSiteSecurityLevel from ..model.site_security_level import SiteSecurityLevel from ..model.required_password_type import RequiredPasswordType from ..model.windows_user_account_control_settings", "browser_logging_report_location(self): \"\"\" Gets and sets the browserLoggingReportLocation Returns: str: The browserLoggingReportLocation \"\"\" if", "self._prop_dict[\"passwordRequiredType\"] = RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return self._prop_dict[\"passwordRequiredType\"] return None @password_required_type.setter def password_required_type(self, val): self._prop_dict[\"passwordRequiredType\"] =", "''' # Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT", ": self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return None @browser_trusted_sites_security_level.setter def browser_trusted_sites_security_level(self, val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"]", "in self._prop_dict: return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else: return None @browser_block_automatic_detection_of_intranet_sites.setter def browser_block_automatic_detection_of_intranet_sites(self, val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] =", "self._prop_dict: return self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else: return None @browser_block_enterprise_mode_access.setter def browser_block_enterprise_mode_access(self, val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"] = val", "None @browser_block_plugins.setter def browser_block_plugins(self, val): self._prop_dict[\"browserBlockPlugins\"] = val @property def browser_block_popups(self): \"\"\" Gets", "browser_block_plugins(self, val): self._prop_dict[\"browserBlockPlugins\"] = val @property def browser_block_popups(self): \"\"\" Gets and sets the", "password_expiration_days(self): \"\"\" Gets and sets the passwordExpirationDays Returns: int: The passwordExpirationDays \"\"\" if", "if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserIntranetSecurityLevel\"] else : self._prop_dict[\"browserIntranetSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return self._prop_dict[\"browserIntranetSecurityLevel\"] return", "self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] = val @property def apply_only_to_windows81(self): \"\"\" Gets and sets the applyOnlyToWindows81 Returns:", "the browserBlockAutomaticDetectionOfIntranetSites Returns: bool: The browserBlockAutomaticDetectionOfIntranetSites \"\"\" if \"browserBlockAutomaticDetectionOfIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"]", "def browser_trusted_sites_security_level(self, val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = val @property def cellular_block_data_roaming(self): \"\"\" Gets and sets", "Returns: bool: The diagnosticsBlockDataSubmission \"\"\" if \"diagnosticsBlockDataSubmission\" in self._prop_dict: return self._prop_dict[\"diagnosticsBlockDataSubmission\"] else: return", "passwordSignInFailureCountBeforeFactoryReset \"\"\" if \"passwordSignInFailureCountBeforeFactoryReset\" in self._prop_dict: return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else: return None @password_sign_in_failure_count_before_factory_reset.setter def", "Gets and sets the browserRequireHighSecurityForRestrictedSites Returns: bool: The browserRequireHighSecurityForRestrictedSites \"\"\" if \"browserRequireHighSecurityForRestrictedSites\" in", "= val @property def user_account_control_settings(self): \"\"\" Gets and sets the userAccountControlSettings Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`:", "browser_block_java_script(self, val): self._prop_dict[\"browserBlockJavaScript\"] = val @property def browser_block_plugins(self): \"\"\" Gets and sets the", "in self._prop_dict: return self._prop_dict[\"workFoldersUrl\"] else: return None @work_folders_url.setter def work_folders_url(self, val): self._prop_dict[\"workFoldersUrl\"] =", "The browserBlockSendingDoNotTrackHeader \"\"\" if \"browserBlockSendingDoNotTrackHeader\" in self._prop_dict: return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else: return None @browser_block_sending_do_not_track_header.setter", "and sets the passwordSignInFailureCountBeforeFactoryReset Returns: int: The passwordSignInFailureCountBeforeFactoryReset \"\"\" if \"passwordSignInFailureCountBeforeFactoryReset\" in self._prop_dict:", "<reponame>MIchaelMainer/msgraph-v10-models-python<filename>models/windows81_general_configuration.py # -*- coding: utf-8 -*- ''' # Copyright (c) Microsoft Corporation. All", "def browser_block_java_script(self, val): self._prop_dict[\"browserBlockJavaScript\"] = val @property def browser_block_plugins(self): \"\"\" Gets and sets", "return self._prop_dict[\"browserIntranetSecurityLevel\"] return None @browser_intranet_security_level.setter def browser_intranet_security_level(self, val): self._prop_dict[\"browserIntranetSecurityLevel\"] = val @property def", "val): self._prop_dict[\"browserBlockPlugins\"] = val @property def browser_block_popups(self): \"\"\" Gets and sets the browserBlockPopups", "def password_minimum_character_set_count(self): \"\"\" Gets and sets the passwordMinimumCharacterSetCount Returns: int: The passwordMinimumCharacterSetCount \"\"\"", "return self._prop_dict[\"browserBlockPopups\"] else: return None @browser_block_popups.setter def browser_block_popups(self, val): self._prop_dict[\"browserBlockPopups\"] = val @property", "\"cellularBlockDataRoaming\" in self._prop_dict: return self._prop_dict[\"cellularBlockDataRoaming\"] else: return None @cellular_block_data_roaming.setter def cellular_block_data_roaming(self, val): self._prop_dict[\"cellularBlockDataRoaming\"]", "if \"passwordExpirationDays\" in self._prop_dict: return self._prop_dict[\"passwordExpirationDays\"] else: return None @password_expiration_days.setter def password_expiration_days(self, val):", "@password_required_type.setter def password_required_type(self, val): self._prop_dict[\"passwordRequiredType\"] = val @property def password_sign_in_failure_count_before_factory_reset(self): \"\"\" Gets and", "the passwordSignInFailureCountBeforeFactoryReset Returns: int: The passwordSignInFailureCountBeforeFactoryReset \"\"\" if \"passwordSignInFailureCountBeforeFactoryReset\" in self._prop_dict: return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"]", "password_sign_in_failure_count_before_factory_reset(self): \"\"\" Gets and sets the passwordSignInFailureCountBeforeFactoryReset Returns: int: The passwordSignInFailureCountBeforeFactoryReset \"\"\" if", "the applyOnlyToWindows81 Returns: bool: The applyOnlyToWindows81 \"\"\" if \"applyOnlyToWindows81\" in self._prop_dict: return self._prop_dict[\"applyOnlyToWindows81\"]", "= val @property def browser_block_java_script(self): \"\"\" Gets and sets the browserBlockJavaScript Returns: bool:", "import unicode_literals from ..model.internet_site_security_level import InternetSiteSecurityLevel from ..model.site_security_level import SiteSecurityLevel from ..model.required_password_type import", "storage_require_device_encryption(self): \"\"\" Gets and sets the storageRequireDeviceEncryption Returns: bool: The storageRequireDeviceEncryption \"\"\" if", "InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return self._prop_dict[\"browserInternetSecurityLevel\"] return None @browser_internet_security_level.setter def browser_internet_security_level(self, val): self._prop_dict[\"browserInternetSecurityLevel\"] = val @property", "browserBlockEnterpriseModeAccess Returns: bool: The browserBlockEnterpriseModeAccess \"\"\" if \"browserBlockEnterpriseModeAccess\" in self._prop_dict: return self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else:", "\"\"\" if \"browserBlockSingleWordEntryOnIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else: return None @browser_block_single_word_entry_on_intranet_sites.setter def browser_block_single_word_entry_on_intranet_sites(self,", "sets the browserRequireSmartScreen Returns: bool: The browserRequireSmartScreen \"\"\" if \"browserRequireSmartScreen\" in self._prop_dict: return", "def browser_block_sending_do_not_track_header(self): \"\"\" Gets and sets the browserBlockSendingDoNotTrackHeader Returns: bool: The browserBlockSendingDoNotTrackHeader \"\"\"", "val @property def browser_require_high_security_for_restricted_sites(self): \"\"\" Gets and sets the browserRequireHighSecurityForRestrictedSites Returns: bool: The", "OneDriveObjectBase class Windows81GeneralConfiguration(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict = prop_dict @property def accounts_block_adding_non_microsoft_account_email(self): \"\"\"", "isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserInternetSecurityLevel\"] else : self._prop_dict[\"browserInternetSecurityLevel\"] = InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return self._prop_dict[\"browserInternetSecurityLevel\"] return None", "in self._prop_dict: return self._prop_dict[\"browserRequireFraudWarning\"] else: return None @browser_require_fraud_warning.setter def browser_require_fraud_warning(self, val): self._prop_dict[\"browserRequireFraudWarning\"] =", "password_block_picture_password_and_pin(self): \"\"\" Gets and sets the passwordBlockPicturePasswordAndPin Returns: bool: The passwordBlockPicturePasswordAndPin \"\"\" if", "int: The passwordExpirationDays \"\"\" if \"passwordExpirationDays\" in self._prop_dict: return self._prop_dict[\"passwordExpirationDays\"] else: return None", "return None @password_minimum_character_set_count.setter def password_minimum_character_set_count(self, val): self._prop_dict[\"passwordMinimumCharacterSetCount\"] = val @property def password_previous_password_block_count(self): \"\"\"", "\"browserInternetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserInternetSecurityLevel\"] else : self._prop_dict[\"browserInternetSecurityLevel\"] = InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"])", "return None @password_sign_in_failure_count_before_factory_reset.setter def password_sign_in_failure_count_before_factory_reset(self, val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] = val @property def storage_require_device_encryption(self): \"\"\"", "userAccountControlSettings \"\"\" if \"userAccountControlSettings\" in self._prop_dict: if isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase): return self._prop_dict[\"userAccountControlSettings\"] else :", "@property def browser_require_firewall(self): \"\"\" Gets and sets the browserRequireFirewall Returns: bool: The browserRequireFirewall", "val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] = val @property def apply_only_to_windows81(self): \"\"\" Gets and sets the applyOnlyToWindows81", "the browserBlockSingleWordEntryOnIntranetSites Returns: bool: The browserBlockSingleWordEntryOnIntranetSites \"\"\" if \"browserBlockSingleWordEntryOnIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"]", "in self._prop_dict: return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else: return None @browser_enterprise_mode_site_list_location.setter def browser_enterprise_mode_site_list_location(self, val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] =", "return None @browser_enterprise_mode_site_list_location.setter def browser_enterprise_mode_site_list_location(self, val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] = val @property def browser_internet_security_level(self): \"\"\"", "if \"browserBlockEnterpriseModeAccess\" in self._prop_dict: return self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else: return None @browser_block_enterprise_mode_access.setter def browser_block_enterprise_mode_access(self, val):", "else : self._prop_dict[\"browserIntranetSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return self._prop_dict[\"browserIntranetSecurityLevel\"] return None @browser_intranet_security_level.setter def browser_intranet_security_level(self, val):", "Gets and sets the browserBlockSendingDoNotTrackHeader Returns: bool: The browserBlockSendingDoNotTrackHeader \"\"\" if \"browserBlockSendingDoNotTrackHeader\" in", "else: return None @browser_require_firewall.setter def browser_require_firewall(self, val): self._prop_dict[\"browserRequireFirewall\"] = val @property def browser_require_fraud_warning(self):", "The passwordExpirationDays \"\"\" if \"passwordExpirationDays\" in self._prop_dict: return self._prop_dict[\"passwordExpirationDays\"] else: return None @password_expiration_days.setter", "= val @property def password_sign_in_failure_count_before_factory_reset(self): \"\"\" Gets and sets the passwordSignInFailureCountBeforeFactoryReset Returns: int:", "val): self._prop_dict[\"passwordMinimumLength\"] = val @property def password_minutes_of_inactivity_before_screen_timeout(self): \"\"\" Gets and sets the passwordMinutesOfInactivityBeforeScreenTimeout", "def user_account_control_settings(self, val): self._prop_dict[\"userAccountControlSettings\"] = val @property def work_folders_url(self): \"\"\" Gets and sets", "-*- coding: utf-8 -*- ''' # Copyright (c) Microsoft Corporation. All Rights Reserved.", "\"\"\" if \"browserBlockAutofill\" in self._prop_dict: return self._prop_dict[\"browserBlockAutofill\"] else: return None @browser_block_autofill.setter def browser_block_autofill(self,", "@property def browser_block_automatic_detection_of_intranet_sites(self): \"\"\" Gets and sets the browserBlockAutomaticDetectionOfIntranetSites Returns: bool: The browserBlockAutomaticDetectionOfIntranetSites", "the browserBlockEnterpriseModeAccess Returns: bool: The browserBlockEnterpriseModeAccess \"\"\" if \"browserBlockEnterpriseModeAccess\" in self._prop_dict: return self._prop_dict[\"browserBlockEnterpriseModeAccess\"]", "The browserRequireFirewall \"\"\" if \"browserRequireFirewall\" in self._prop_dict: return self._prop_dict[\"browserRequireFirewall\"] else: return None @browser_require_firewall.setter", "\"browserBlockSingleWordEntryOnIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else: return None @browser_block_single_word_entry_on_intranet_sites.setter def browser_block_single_word_entry_on_intranet_sites(self, val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"]", "return None @password_required_type.setter def password_required_type(self, val): self._prop_dict[\"passwordRequiredType\"] = val @property def password_sign_in_failure_count_before_factory_reset(self): \"\"\"", "# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License.", "browser_block_autofill(self): \"\"\" Gets and sets the browserBlockAutofill Returns: bool: The browserBlockAutofill \"\"\" if", "if \"applyOnlyToWindows81\" in self._prop_dict: return self._prop_dict[\"applyOnlyToWindows81\"] else: return None @apply_only_to_windows81.setter def apply_only_to_windows81(self, val):", "bool: The browserBlockPlugins \"\"\" if \"browserBlockPlugins\" in self._prop_dict: return self._prop_dict[\"browserBlockPlugins\"] else: return None", "browser_logging_report_location(self, val): self._prop_dict[\"browserLoggingReportLocation\"] = val @property def browser_require_high_security_for_restricted_sites(self): \"\"\" Gets and sets the", "browser_require_high_security_for_restricted_sites(self, val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] = val @property def browser_require_firewall(self): \"\"\" Gets and sets the", "else : self._prop_dict[\"userAccountControlSettings\"] = WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return self._prop_dict[\"userAccountControlSettings\"] return None @user_account_control_settings.setter def user_account_control_settings(self, val):", "else : self._prop_dict[\"passwordRequiredType\"] = RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return self._prop_dict[\"passwordRequiredType\"] return None @password_required_type.setter def password_required_type(self, val):", "= InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return self._prop_dict[\"browserInternetSecurityLevel\"] return None @browser_internet_security_level.setter def browser_internet_security_level(self, val): self._prop_dict[\"browserInternetSecurityLevel\"] = val", "in self._prop_dict: return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else: return None @password_minutes_of_inactivity_before_screen_timeout.setter def password_minutes_of_inactivity_before_screen_timeout(self, val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] =", "passwordSignInFailureCountBeforeFactoryReset Returns: int: The passwordSignInFailureCountBeforeFactoryReset \"\"\" if \"passwordSignInFailureCountBeforeFactoryReset\" in self._prop_dict: return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else:", "self._prop_dict: return self._prop_dict[\"passwordMinimumLength\"] else: return None @password_minimum_length.setter def password_minimum_length(self, val): self._prop_dict[\"passwordMinimumLength\"] = val", "Gets and sets the browserRequireFirewall Returns: bool: The browserRequireFirewall \"\"\" if \"browserRequireFirewall\" in", "Returns: bool: The cellularBlockDataRoaming \"\"\" if \"cellularBlockDataRoaming\" in self._prop_dict: return self._prop_dict[\"cellularBlockDataRoaming\"] else: return", "@diagnostics_block_data_submission.setter def diagnostics_block_data_submission(self, val): self._prop_dict[\"diagnosticsBlockDataSubmission\"] = val @property def password_block_picture_password_and_pin(self): \"\"\" Gets and", "self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] = val @property def browser_block_single_word_entry_on_intranet_sites(self): \"\"\" Gets and sets the browserBlockSingleWordEntryOnIntranetSites Returns:", "Gets and sets the browserBlockAutomaticDetectionOfIntranetSites Returns: bool: The browserBlockAutomaticDetectionOfIntranetSites \"\"\" if \"browserBlockAutomaticDetectionOfIntranetSites\" in", "isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else : self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return None", "browser_block_popups(self): \"\"\" Gets and sets the browserBlockPopups Returns: bool: The browserBlockPopups \"\"\" if", "accountsBlockAddingNonMicrosoftAccountEmail Returns: bool: The accountsBlockAddingNonMicrosoftAccountEmail \"\"\" if \"accountsBlockAddingNonMicrosoftAccountEmail\" in self._prop_dict: return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else:", "def browser_require_smart_screen(self): \"\"\" Gets and sets the browserRequireSmartScreen Returns: bool: The browserRequireSmartScreen \"\"\"", "Gets and sets the passwordMinutesOfInactivityBeforeScreenTimeout Returns: int: The passwordMinutesOfInactivityBeforeScreenTimeout \"\"\" if \"passwordMinutesOfInactivityBeforeScreenTimeout\" in", "None @browser_block_automatic_detection_of_intranet_sites.setter def browser_block_automatic_detection_of_intranet_sites(self, val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] = val @property def browser_block_enterprise_mode_access(self): \"\"\" Gets", "= SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return None @browser_trusted_sites_security_level.setter def browser_trusted_sites_security_level(self, val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = val", "sets the browserBlockPopups Returns: bool: The browserBlockPopups \"\"\" if \"browserBlockPopups\" in self._prop_dict: return", "val @property def browser_block_automatic_detection_of_intranet_sites(self): \"\"\" Gets and sets the browserBlockAutomaticDetectionOfIntranetSites Returns: bool: The", "InternetSiteSecurityLevel from ..model.site_security_level import SiteSecurityLevel from ..model.required_password_type import RequiredPasswordType from ..model.windows_user_account_control_settings import WindowsUserAccountControlSettings", "browserBlockEnterpriseModeAccess \"\"\" if \"browserBlockEnterpriseModeAccess\" in self._prop_dict: return self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else: return None @browser_block_enterprise_mode_access.setter def", "and sets the browserInternetSecurityLevel Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The browserInternetSecurityLevel \"\"\" if \"browserInternetSecurityLevel\" in self._prop_dict:", "None @browser_require_smart_screen.setter def browser_require_smart_screen(self, val): self._prop_dict[\"browserRequireSmartScreen\"] = val @property def browser_enterprise_mode_site_list_location(self): \"\"\" Gets", "The browserBlockPopups \"\"\" if \"browserBlockPopups\" in self._prop_dict: return self._prop_dict[\"browserBlockPopups\"] else: return None @browser_block_popups.setter", "The browserBlockEnterpriseModeAccess \"\"\" if \"browserBlockEnterpriseModeAccess\" in self._prop_dict: return self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else: return None @browser_block_enterprise_mode_access.setter", "if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else : self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return", "passwordMinimumLength \"\"\" if \"passwordMinimumLength\" in self._prop_dict: return self._prop_dict[\"passwordMinimumLength\"] else: return None @password_minimum_length.setter def", "OneDriveObjectBase): return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else : self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return None @browser_trusted_sites_security_level.setter", "self._prop_dict[\"passwordMinimumLength\"] else: return None @password_minimum_length.setter def password_minimum_length(self, val): self._prop_dict[\"passwordMinimumLength\"] = val @property def", "The browserRequireFraudWarning \"\"\" if \"browserRequireFraudWarning\" in self._prop_dict: return self._prop_dict[\"browserRequireFraudWarning\"] else: return None @browser_require_fraud_warning.setter", "val @property def password_minutes_of_inactivity_before_screen_timeout(self): \"\"\" Gets and sets the passwordMinutesOfInactivityBeforeScreenTimeout Returns: int: The", "browser_trusted_sites_security_level(self, val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = val @property def cellular_block_data_roaming(self): \"\"\" Gets and sets the", "user_account_control_settings(self, val): self._prop_dict[\"userAccountControlSettings\"] = val @property def work_folders_url(self): \"\"\" Gets and sets the", "sets the browserTrustedSitesSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserTrustedSitesSecurityLevel \"\"\" if \"browserTrustedSitesSecurityLevel\" in self._prop_dict: if", "val): self._prop_dict[\"passwordExpirationDays\"] = val @property def password_minimum_length(self): \"\"\" Gets and sets the passwordMinimumLength", "= val @property def browser_block_enterprise_mode_access(self): \"\"\" Gets and sets the browserBlockEnterpriseModeAccess Returns: bool:", "license information. # # This file was generated and any changes will be", "\"\"\" Gets and sets the browserRequireFraudWarning Returns: bool: The browserRequireFraudWarning \"\"\" if \"browserRequireFraudWarning\"", "def browser_block_enterprise_mode_access(self, val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"] = val @property def browser_block_java_script(self): \"\"\" Gets and sets", "import OneDriveObjectBase class Windows81GeneralConfiguration(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict = prop_dict @property def accounts_block_adding_non_microsoft_account_email(self):", "in self._prop_dict: return self._prop_dict[\"browserRequireFirewall\"] else: return None @browser_require_firewall.setter def browser_require_firewall(self, val): self._prop_dict[\"browserRequireFirewall\"] =", "All Rights Reserved. Licensed under the MIT License. See License in the project", "browser_block_popups(self, val): self._prop_dict[\"browserBlockPopups\"] = val @property def browser_block_sending_do_not_track_header(self): \"\"\" Gets and sets the", "else: return None @storage_require_device_encryption.setter def storage_require_device_encryption(self, val): self._prop_dict[\"storageRequireDeviceEncryption\"] = val @property def updates_require_automatic_updates(self):", "the browserTrustedSitesSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserTrustedSitesSecurityLevel \"\"\" if \"browserTrustedSitesSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"],", "self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] = val @property def password_minimum_character_set_count(self): \"\"\" Gets and sets the passwordMinimumCharacterSetCount Returns:", "if \"browserRequireHighSecurityForRestrictedSites\" in self._prop_dict: return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else: return None @browser_require_high_security_for_restricted_sites.setter def browser_require_high_security_for_restricted_sites(self, val):", "if \"browserBlockAutomaticDetectionOfIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else: return None @browser_block_automatic_detection_of_intranet_sites.setter def browser_block_automatic_detection_of_intranet_sites(self, val):", "None @browser_intranet_security_level.setter def browser_intranet_security_level(self, val): self._prop_dict[\"browserIntranetSecurityLevel\"] = val @property def browser_logging_report_location(self): \"\"\" Gets", "The passwordSignInFailureCountBeforeFactoryReset \"\"\" if \"passwordSignInFailureCountBeforeFactoryReset\" in self._prop_dict: return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else: return None @password_sign_in_failure_count_before_factory_reset.setter", "self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else: return None @browser_block_single_word_entry_on_intranet_sites.setter def browser_block_single_word_entry_on_intranet_sites(self, val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] = val @property def", "sets the passwordMinimumLength Returns: int: The passwordMinimumLength \"\"\" if \"passwordMinimumLength\" in self._prop_dict: return", "and sets the browserBlockPlugins Returns: bool: The browserBlockPlugins \"\"\" if \"browserBlockPlugins\" in self._prop_dict:", "Gets and sets the workFoldersUrl Returns: str: The workFoldersUrl \"\"\" if \"workFoldersUrl\" in", "browserRequireHighSecurityForRestrictedSites \"\"\" if \"browserRequireHighSecurityForRestrictedSites\" in self._prop_dict: return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else: return None @browser_require_high_security_for_restricted_sites.setter def", "val): self._prop_dict[\"cellularBlockDataRoaming\"] = val @property def diagnostics_block_data_submission(self): \"\"\" Gets and sets the diagnosticsBlockDataSubmission", "return None @password_minutes_of_inactivity_before_screen_timeout.setter def password_minutes_of_inactivity_before_screen_timeout(self, val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] = val @property def password_minimum_character_set_count(self): \"\"\"", "\"\"\" Gets and sets the diagnosticsBlockDataSubmission Returns: bool: The diagnosticsBlockDataSubmission \"\"\" if \"diagnosticsBlockDataSubmission\"", "passwordMinimumCharacterSetCount \"\"\" if \"passwordMinimumCharacterSetCount\" in self._prop_dict: return self._prop_dict[\"passwordMinimumCharacterSetCount\"] else: return None @password_minimum_character_set_count.setter def", "def storage_require_device_encryption(self, val): self._prop_dict[\"storageRequireDeviceEncryption\"] = val @property def updates_require_automatic_updates(self): \"\"\" Gets and sets", "return None @updates_require_automatic_updates.setter def updates_require_automatic_updates(self, val): self._prop_dict[\"updatesRequireAutomaticUpdates\"] = val @property def user_account_control_settings(self): \"\"\"", "browser_enterprise_mode_site_list_location(self): \"\"\" Gets and sets the browserEnterpriseModeSiteListLocation Returns: str: The browserEnterpriseModeSiteListLocation \"\"\" if", "Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserIntranetSecurityLevel \"\"\" if \"browserIntranetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase): return", "None @password_minimum_length.setter def password_minimum_length(self, val): self._prop_dict[\"passwordMinimumLength\"] = val @property def password_minutes_of_inactivity_before_screen_timeout(self): \"\"\" Gets", "None @password_minutes_of_inactivity_before_screen_timeout.setter def password_minutes_of_inactivity_before_screen_timeout(self, val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] = val @property def password_minimum_character_set_count(self): \"\"\" Gets", "Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The passwordRequiredType \"\"\" if \"passwordRequiredType\" in self._prop_dict: if isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase): return", "__init__(self, prop_dict={}): self._prop_dict = prop_dict @property def accounts_block_adding_non_microsoft_account_email(self): \"\"\" Gets and sets the", "accounts_block_adding_non_microsoft_account_email(self, val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] = val @property def apply_only_to_windows81(self): \"\"\" Gets and sets the", "@property def browser_block_autofill(self): \"\"\" Gets and sets the browserBlockAutofill Returns: bool: The browserBlockAutofill", "browserTrustedSitesSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserTrustedSitesSecurityLevel \"\"\" if \"browserTrustedSitesSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase):", "utf-8 -*- ''' # Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under", "browser_trusted_sites_security_level(self): \"\"\" Gets and sets the browserTrustedSitesSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserTrustedSitesSecurityLevel \"\"\" if", "self._prop_dict[\"diagnosticsBlockDataSubmission\"] else: return None @diagnostics_block_data_submission.setter def diagnostics_block_data_submission(self, val): self._prop_dict[\"diagnosticsBlockDataSubmission\"] = val @property def", "OneDriveObjectBase): return self._prop_dict[\"passwordRequiredType\"] else : self._prop_dict[\"passwordRequiredType\"] = RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return self._prop_dict[\"passwordRequiredType\"] return None @password_required_type.setter", "self._prop_dict[\"userAccountControlSettings\"] return None @user_account_control_settings.setter def user_account_control_settings(self, val): self._prop_dict[\"userAccountControlSettings\"] = val @property def work_folders_url(self):", "\"\"\" Gets and sets the browserLoggingReportLocation Returns: str: The browserLoggingReportLocation \"\"\" if \"browserLoggingReportLocation\"", "Gets and sets the passwordMinimumCharacterSetCount Returns: int: The passwordMinimumCharacterSetCount \"\"\" if \"passwordMinimumCharacterSetCount\" in", "self._prop_dict: return self._prop_dict[\"browserRequireSmartScreen\"] else: return None @browser_require_smart_screen.setter def browser_require_smart_screen(self, val): self._prop_dict[\"browserRequireSmartScreen\"] = val", "Gets and sets the userAccountControlSettings Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The userAccountControlSettings \"\"\" if \"userAccountControlSettings\" in", "= val @property def browser_require_high_security_for_restricted_sites(self): \"\"\" Gets and sets the browserRequireHighSecurityForRestrictedSites Returns: bool:", "\"browserRequireFirewall\" in self._prop_dict: return self._prop_dict[\"browserRequireFirewall\"] else: return None @browser_require_firewall.setter def browser_require_firewall(self, val): self._prop_dict[\"browserRequireFirewall\"]", "= val @property def browser_block_plugins(self): \"\"\" Gets and sets the browserBlockPlugins Returns: bool:", "information. # # This file was generated and any changes will be overwritten.", "\"browserEnterpriseModeSiteListLocation\" in self._prop_dict: return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else: return None @browser_enterprise_mode_site_list_location.setter def browser_enterprise_mode_site_list_location(self, val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"]", "This file was generated and any changes will be overwritten. ''' from __future__", "in self._prop_dict: return self._prop_dict[\"browserRequireSmartScreen\"] else: return None @browser_require_smart_screen.setter def browser_require_smart_screen(self, val): self._prop_dict[\"browserRequireSmartScreen\"] =", "= val @property def browser_intranet_security_level(self): \"\"\" Gets and sets the browserIntranetSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`:", "..model.windows_user_account_control_settings import WindowsUserAccountControlSettings from ..one_drive_object_base import OneDriveObjectBase class Windows81GeneralConfiguration(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict", "and sets the browserBlockSingleWordEntryOnIntranetSites Returns: bool: The browserBlockSingleWordEntryOnIntranetSites \"\"\" if \"browserBlockSingleWordEntryOnIntranetSites\" in self._prop_dict:", "@property def password_minimum_length(self): \"\"\" Gets and sets the passwordMinimumLength Returns: int: The passwordMinimumLength", "and sets the passwordMinimumLength Returns: int: The passwordMinimumLength \"\"\" if \"passwordMinimumLength\" in self._prop_dict:", "in the project root for license information. # # This file was generated", "sets the browserRequireHighSecurityForRestrictedSites Returns: bool: The browserRequireHighSecurityForRestrictedSites \"\"\" if \"browserRequireHighSecurityForRestrictedSites\" in self._prop_dict: return", "else: return None @browser_require_fraud_warning.setter def browser_require_fraud_warning(self, val): self._prop_dict[\"browserRequireFraudWarning\"] = val @property def browser_trusted_sites_security_level(self):", "applyOnlyToWindows81 Returns: bool: The applyOnlyToWindows81 \"\"\" if \"applyOnlyToWindows81\" in self._prop_dict: return self._prop_dict[\"applyOnlyToWindows81\"] else:", "def browser_block_plugins(self): \"\"\" Gets and sets the browserBlockPlugins Returns: bool: The browserBlockPlugins \"\"\"", "bool: The passwordBlockPicturePasswordAndPin \"\"\" if \"passwordBlockPicturePasswordAndPin\" in self._prop_dict: return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else: return None", "self._prop_dict[\"browserBlockEnterpriseModeAccess\"] = val @property def browser_block_java_script(self): \"\"\" Gets and sets the browserBlockJavaScript Returns:", "= val @property def cellular_block_data_roaming(self): \"\"\" Gets and sets the cellularBlockDataRoaming Returns: bool:", "self._prop_dict[\"passwordMinimumLength\"] = val @property def password_minutes_of_inactivity_before_screen_timeout(self): \"\"\" Gets and sets the passwordMinutesOfInactivityBeforeScreenTimeout Returns:", "def browser_block_single_word_entry_on_intranet_sites(self, val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] = val @property def browser_require_smart_screen(self): \"\"\" Gets and sets", "prop_dict @property def accounts_block_adding_non_microsoft_account_email(self): \"\"\" Gets and sets the accountsBlockAddingNonMicrosoftAccountEmail Returns: bool: The", "\"\"\" if \"browserLoggingReportLocation\" in self._prop_dict: return self._prop_dict[\"browserLoggingReportLocation\"] else: return None @browser_logging_report_location.setter def browser_logging_report_location(self,", "self._prop_dict: return self._prop_dict[\"browserRequireFraudWarning\"] else: return None @browser_require_fraud_warning.setter def browser_require_fraud_warning(self, val): self._prop_dict[\"browserRequireFraudWarning\"] = val", "self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else: return None @password_minutes_of_inactivity_before_screen_timeout.setter def password_minutes_of_inactivity_before_screen_timeout(self, val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] = val @property def", "return self._prop_dict[\"storageRequireDeviceEncryption\"] else: return None @storage_require_device_encryption.setter def storage_require_device_encryption(self, val): self._prop_dict[\"storageRequireDeviceEncryption\"] = val @property", "browser_enterprise_mode_site_list_location(self, val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] = val @property def browser_internet_security_level(self): \"\"\" Gets and sets the", "self._prop_dict[\"userAccountControlSettings\"] = val @property def work_folders_url(self): \"\"\" Gets and sets the workFoldersUrl Returns:", "self._prop_dict[\"passwordExpirationDays\"] else: return None @password_expiration_days.setter def password_expiration_days(self, val): self._prop_dict[\"passwordExpirationDays\"] = val @property def", "None @browser_internet_security_level.setter def browser_internet_security_level(self, val): self._prop_dict[\"browserInternetSecurityLevel\"] = val @property def browser_intranet_security_level(self): \"\"\" Gets", "sets the browserLoggingReportLocation Returns: str: The browserLoggingReportLocation \"\"\" if \"browserLoggingReportLocation\" in self._prop_dict: return", "\"diagnosticsBlockDataSubmission\" in self._prop_dict: return self._prop_dict[\"diagnosticsBlockDataSubmission\"] else: return None @diagnostics_block_data_submission.setter def diagnostics_block_data_submission(self, val): self._prop_dict[\"diagnosticsBlockDataSubmission\"]", "\"\"\" Gets and sets the workFoldersUrl Returns: str: The workFoldersUrl \"\"\" if \"workFoldersUrl\"", "in self._prop_dict: return self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else: return None @browser_block_enterprise_mode_access.setter def browser_block_enterprise_mode_access(self, val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"] =", "Returns: bool: The updatesRequireAutomaticUpdates \"\"\" if \"updatesRequireAutomaticUpdates\" in self._prop_dict: return self._prop_dict[\"updatesRequireAutomaticUpdates\"] else: return", "return self._prop_dict[\"passwordExpirationDays\"] else: return None @password_expiration_days.setter def password_expiration_days(self, val): self._prop_dict[\"passwordExpirationDays\"] = val @property", "return None @password_block_picture_password_and_pin.setter def password_block_picture_password_and_pin(self, val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] = val @property def password_expiration_days(self): \"\"\"", "OneDriveObjectBase): return self._prop_dict[\"browserInternetSecurityLevel\"] else : self._prop_dict[\"browserInternetSecurityLevel\"] = InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return self._prop_dict[\"browserInternetSecurityLevel\"] return None @browser_internet_security_level.setter", "\"browserTrustedSitesSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else : self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"])", "The accountsBlockAddingNonMicrosoftAccountEmail \"\"\" if \"accountsBlockAddingNonMicrosoftAccountEmail\" in self._prop_dict: return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else: return None @accounts_block_adding_non_microsoft_account_email.setter", "\"browserBlockSendingDoNotTrackHeader\" in self._prop_dict: return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else: return None @browser_block_sending_do_not_track_header.setter def browser_block_sending_do_not_track_header(self, val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"]", "return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else : self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return None @browser_trusted_sites_security_level.setter def", "@browser_block_autofill.setter def browser_block_autofill(self, val): self._prop_dict[\"browserBlockAutofill\"] = val @property def browser_block_automatic_detection_of_intranet_sites(self): \"\"\" Gets and", "browserInternetSecurityLevel \"\"\" if \"browserInternetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserInternetSecurityLevel\"] else :", "password_block_picture_password_and_pin(self, val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] = val @property def password_expiration_days(self): \"\"\" Gets and sets the", "Returns: bool: The browserBlockJavaScript \"\"\" if \"browserBlockJavaScript\" in self._prop_dict: return self._prop_dict[\"browserBlockJavaScript\"] else: return", "The passwordBlockPicturePasswordAndPin \"\"\" if \"passwordBlockPicturePasswordAndPin\" in self._prop_dict: return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else: return None @password_block_picture_password_and_pin.setter", "return self._prop_dict[\"browserIntranetSecurityLevel\"] else : self._prop_dict[\"browserIntranetSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return self._prop_dict[\"browserIntranetSecurityLevel\"] return None @browser_intranet_security_level.setter def", "else: return None @browser_logging_report_location.setter def browser_logging_report_location(self, val): self._prop_dict[\"browserLoggingReportLocation\"] = val @property def browser_require_high_security_for_restricted_sites(self):", "else: return None @browser_block_enterprise_mode_access.setter def browser_block_enterprise_mode_access(self, val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"] = val @property def browser_block_java_script(self):", "\"\"\" Gets and sets the passwordPreviousPasswordBlockCount Returns: int: The passwordPreviousPasswordBlockCount \"\"\" if \"passwordPreviousPasswordBlockCount\"", "Gets and sets the diagnosticsBlockDataSubmission Returns: bool: The diagnosticsBlockDataSubmission \"\"\" if \"diagnosticsBlockDataSubmission\" in", "\"\"\" if \"browserBlockPlugins\" in self._prop_dict: return self._prop_dict[\"browserBlockPlugins\"] else: return None @browser_block_plugins.setter def browser_block_plugins(self,", "def cellular_block_data_roaming(self, val): self._prop_dict[\"cellularBlockDataRoaming\"] = val @property def diagnostics_block_data_submission(self): \"\"\" Gets and sets", "Licensed under the MIT License. See License in the project root for license", "generated and any changes will be overwritten. ''' from __future__ import unicode_literals from", "\"\"\" Gets and sets the browserBlockPopups Returns: bool: The browserBlockPopups \"\"\" if \"browserBlockPopups\"", "def browser_block_sending_do_not_track_header(self, val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] = val @property def browser_block_single_word_entry_on_intranet_sites(self): \"\"\" Gets and sets", "val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] = val @property def password_expiration_days(self): \"\"\" Gets and sets the passwordExpirationDays", "diagnostics_block_data_submission(self, val): self._prop_dict[\"diagnosticsBlockDataSubmission\"] = val @property def password_block_picture_password_and_pin(self): \"\"\" Gets and sets the", "@property def browser_trusted_sites_security_level(self): \"\"\" Gets and sets the browserTrustedSitesSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserTrustedSitesSecurityLevel", "The browserBlockAutofill \"\"\" if \"browserBlockAutofill\" in self._prop_dict: return self._prop_dict[\"browserBlockAutofill\"] else: return None @browser_block_autofill.setter", "None @browser_block_enterprise_mode_access.setter def browser_block_enterprise_mode_access(self, val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"] = val @property def browser_block_java_script(self): \"\"\" Gets", "browserLoggingReportLocation \"\"\" if \"browserLoggingReportLocation\" in self._prop_dict: return self._prop_dict[\"browserLoggingReportLocation\"] else: return None @browser_logging_report_location.setter def", "Gets and sets the applyOnlyToWindows81 Returns: bool: The applyOnlyToWindows81 \"\"\" if \"applyOnlyToWindows81\" in", "browserBlockAutomaticDetectionOfIntranetSites \"\"\" if \"browserBlockAutomaticDetectionOfIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else: return None @browser_block_automatic_detection_of_intranet_sites.setter def", "val @property def storage_require_device_encryption(self): \"\"\" Gets and sets the storageRequireDeviceEncryption Returns: bool: The", "self._prop_dict[\"browserRequireFirewall\"] else: return None @browser_require_firewall.setter def browser_require_firewall(self, val): self._prop_dict[\"browserRequireFirewall\"] = val @property def", "\"\"\" if \"passwordMinutesOfInactivityBeforeScreenTimeout\" in self._prop_dict: return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else: return None @password_minutes_of_inactivity_before_screen_timeout.setter def password_minutes_of_inactivity_before_screen_timeout(self,", "in self._prop_dict: if isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase): return self._prop_dict[\"passwordRequiredType\"] else : self._prop_dict[\"passwordRequiredType\"] = RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return", "self._prop_dict[\"cellularBlockDataRoaming\"] else: return None @cellular_block_data_roaming.setter def cellular_block_data_roaming(self, val): self._prop_dict[\"cellularBlockDataRoaming\"] = val @property def", "in self._prop_dict: return self._prop_dict[\"passwordMinimumCharacterSetCount\"] else: return None @password_minimum_character_set_count.setter def password_minimum_character_set_count(self, val): self._prop_dict[\"passwordMinimumCharacterSetCount\"] =", "in self._prop_dict: return self._prop_dict[\"updatesRequireAutomaticUpdates\"] else: return None @updates_require_automatic_updates.setter def updates_require_automatic_updates(self, val): self._prop_dict[\"updatesRequireAutomaticUpdates\"] =", "browser_require_fraud_warning(self): \"\"\" Gets and sets the browserRequireFraudWarning Returns: bool: The browserRequireFraudWarning \"\"\" if", "passwordMinimumLength Returns: int: The passwordMinimumLength \"\"\" if \"passwordMinimumLength\" in self._prop_dict: return self._prop_dict[\"passwordMinimumLength\"] else:", "self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else: return None @browser_block_automatic_detection_of_intranet_sites.setter def browser_block_automatic_detection_of_intranet_sites(self, val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] = val @property def", "Returns: int: The passwordSignInFailureCountBeforeFactoryReset \"\"\" if \"passwordSignInFailureCountBeforeFactoryReset\" in self._prop_dict: return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else: return", "\"browserRequireHighSecurityForRestrictedSites\" in self._prop_dict: return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else: return None @browser_require_high_security_for_restricted_sites.setter def browser_require_high_security_for_restricted_sites(self, val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"]", "The browserBlockAutomaticDetectionOfIntranetSites \"\"\" if \"browserBlockAutomaticDetectionOfIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else: return None @browser_block_automatic_detection_of_intranet_sites.setter", "None @browser_trusted_sites_security_level.setter def browser_trusted_sites_security_level(self, val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = val @property def cellular_block_data_roaming(self): \"\"\" Gets", "browser_block_enterprise_mode_access(self, val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"] = val @property def browser_block_java_script(self): \"\"\" Gets and sets the", "= val @property def password_minutes_of_inactivity_before_screen_timeout(self): \"\"\" Gets and sets the passwordMinutesOfInactivityBeforeScreenTimeout Returns: int:", "The browserTrustedSitesSecurityLevel \"\"\" if \"browserTrustedSitesSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else", "passwordMinutesOfInactivityBeforeScreenTimeout Returns: int: The passwordMinutesOfInactivityBeforeScreenTimeout \"\"\" if \"passwordMinutesOfInactivityBeforeScreenTimeout\" in self._prop_dict: return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else:", "work_folders_url(self): \"\"\" Gets and sets the workFoldersUrl Returns: str: The workFoldersUrl \"\"\" if", "\"\"\" if \"browserRequireHighSecurityForRestrictedSites\" in self._prop_dict: return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else: return None @browser_require_high_security_for_restricted_sites.setter def browser_require_high_security_for_restricted_sites(self,", "val @property def browser_block_single_word_entry_on_intranet_sites(self): \"\"\" Gets and sets the browserBlockSingleWordEntryOnIntranetSites Returns: bool: The", "..model.required_password_type import RequiredPasswordType from ..model.windows_user_account_control_settings import WindowsUserAccountControlSettings from ..one_drive_object_base import OneDriveObjectBase class Windows81GeneralConfiguration(OneDriveObjectBase):", "None @accounts_block_adding_non_microsoft_account_email.setter def accounts_block_adding_non_microsoft_account_email(self, val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] = val @property def apply_only_to_windows81(self): \"\"\" Gets", "and sets the passwordMinimumCharacterSetCount Returns: int: The passwordMinimumCharacterSetCount \"\"\" if \"passwordMinimumCharacterSetCount\" in self._prop_dict:", "val): self._prop_dict[\"diagnosticsBlockDataSubmission\"] = val @property def password_block_picture_password_and_pin(self): \"\"\" Gets and sets the passwordBlockPicturePasswordAndPin", "and sets the browserIntranetSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserIntranetSecurityLevel \"\"\" if \"browserIntranetSecurityLevel\" in self._prop_dict:", "str: The browserLoggingReportLocation \"\"\" if \"browserLoggingReportLocation\" in self._prop_dict: return self._prop_dict[\"browserLoggingReportLocation\"] else: return None", "return self._prop_dict[\"passwordRequiredType\"] else : self._prop_dict[\"passwordRequiredType\"] = RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return self._prop_dict[\"passwordRequiredType\"] return None @password_required_type.setter def", "\"\"\" if \"browserIntranetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserIntranetSecurityLevel\"] else : self._prop_dict[\"browserIntranetSecurityLevel\"]", "# # This file was generated and any changes will be overwritten. '''", "if \"browserBlockPopups\" in self._prop_dict: return self._prop_dict[\"browserBlockPopups\"] else: return None @browser_block_popups.setter def browser_block_popups(self, val):", "browserEnterpriseModeSiteListLocation Returns: str: The browserEnterpriseModeSiteListLocation \"\"\" if \"browserEnterpriseModeSiteListLocation\" in self._prop_dict: return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else:", "\"\"\" Gets and sets the browserRequireFirewall Returns: bool: The browserRequireFirewall \"\"\" if \"browserRequireFirewall\"", "under the MIT License. See License in the project root for license information.", "if \"browserBlockPlugins\" in self._prop_dict: return self._prop_dict[\"browserBlockPlugins\"] else: return None @browser_block_plugins.setter def browser_block_plugins(self, val):", "None @cellular_block_data_roaming.setter def cellular_block_data_roaming(self, val): self._prop_dict[\"cellularBlockDataRoaming\"] = val @property def diagnostics_block_data_submission(self): \"\"\" Gets", "self._prop_dict: return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else: return None @browser_enterprise_mode_site_list_location.setter def browser_enterprise_mode_site_list_location(self, val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] = val", "password_minimum_length(self, val): self._prop_dict[\"passwordMinimumLength\"] = val @property def password_minutes_of_inactivity_before_screen_timeout(self): \"\"\" Gets and sets the", "browserEnterpriseModeSiteListLocation \"\"\" if \"browserEnterpriseModeSiteListLocation\" in self._prop_dict: return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else: return None @browser_enterprise_mode_site_list_location.setter def", ": self._prop_dict[\"passwordRequiredType\"] = RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return self._prop_dict[\"passwordRequiredType\"] return None @password_required_type.setter def password_required_type(self, val): self._prop_dict[\"passwordRequiredType\"]", "the browserIntranetSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserIntranetSecurityLevel \"\"\" if \"browserIntranetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"],", "Gets and sets the browserLoggingReportLocation Returns: str: The browserLoggingReportLocation \"\"\" if \"browserLoggingReportLocation\" in", "self._prop_dict[\"browserBlockJavaScript\"] else: return None @browser_block_java_script.setter def browser_block_java_script(self, val): self._prop_dict[\"browserBlockJavaScript\"] = val @property def", "def updates_require_automatic_updates(self, val): self._prop_dict[\"updatesRequireAutomaticUpdates\"] = val @property def user_account_control_settings(self): \"\"\" Gets and sets", "\"\"\" if \"userAccountControlSettings\" in self._prop_dict: if isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase): return self._prop_dict[\"userAccountControlSettings\"] else : self._prop_dict[\"userAccountControlSettings\"]", "self._prop_dict[\"updatesRequireAutomaticUpdates\"] else: return None @updates_require_automatic_updates.setter def updates_require_automatic_updates(self, val): self._prop_dict[\"updatesRequireAutomaticUpdates\"] = val @property def", "self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] = val @property def browser_block_enterprise_mode_access(self): \"\"\" Gets and sets the browserBlockEnterpriseModeAccess Returns:", "\"browserBlockAutomaticDetectionOfIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else: return None @browser_block_automatic_detection_of_intranet_sites.setter def browser_block_automatic_detection_of_intranet_sites(self, val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"]", "and sets the passwordRequiredType Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The passwordRequiredType \"\"\" if \"passwordRequiredType\" in self._prop_dict:", "@browser_intranet_security_level.setter def browser_intranet_security_level(self, val): self._prop_dict[\"browserIntranetSecurityLevel\"] = val @property def browser_logging_report_location(self): \"\"\" Gets and", "and sets the updatesRequireAutomaticUpdates Returns: bool: The updatesRequireAutomaticUpdates \"\"\" if \"updatesRequireAutomaticUpdates\" in self._prop_dict:", "the storageRequireDeviceEncryption Returns: bool: The storageRequireDeviceEncryption \"\"\" if \"storageRequireDeviceEncryption\" in self._prop_dict: return self._prop_dict[\"storageRequireDeviceEncryption\"]", "def browser_block_plugins(self, val): self._prop_dict[\"browserBlockPlugins\"] = val @property def browser_block_popups(self): \"\"\" Gets and sets", "sets the browserIntranetSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserIntranetSecurityLevel \"\"\" if \"browserIntranetSecurityLevel\" in self._prop_dict: if", "browser_block_single_word_entry_on_intranet_sites(self): \"\"\" Gets and sets the browserBlockSingleWordEntryOnIntranetSites Returns: bool: The browserBlockSingleWordEntryOnIntranetSites \"\"\" if", "self._prop_dict: return self._prop_dict[\"applyOnlyToWindows81\"] else: return None @apply_only_to_windows81.setter def apply_only_to_windows81(self, val): self._prop_dict[\"applyOnlyToWindows81\"] = val", "self._prop_dict: return self._prop_dict[\"browserBlockPopups\"] else: return None @browser_block_popups.setter def browser_block_popups(self, val): self._prop_dict[\"browserBlockPopups\"] = val", "Corporation. All Rights Reserved. Licensed under the MIT License. See License in the", "sets the browserBlockAutofill Returns: bool: The browserBlockAutofill \"\"\" if \"browserBlockAutofill\" in self._prop_dict: return", "self._prop_dict: return self._prop_dict[\"browserRequireFirewall\"] else: return None @browser_require_firewall.setter def browser_require_firewall(self, val): self._prop_dict[\"browserRequireFirewall\"] = val", "@property def user_account_control_settings(self): \"\"\" Gets and sets the userAccountControlSettings Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The userAccountControlSettings", "= val @property def password_required_type(self): \"\"\" Gets and sets the passwordRequiredType Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`:", "val): self._prop_dict[\"passwordMinimumCharacterSetCount\"] = val @property def password_previous_password_block_count(self): \"\"\" Gets and sets the passwordPreviousPasswordBlockCount", "Gets and sets the browserBlockPopups Returns: bool: The browserBlockPopups \"\"\" if \"browserBlockPopups\" in", "return None @browser_block_java_script.setter def browser_block_java_script(self, val): self._prop_dict[\"browserBlockJavaScript\"] = val @property def browser_block_plugins(self): \"\"\"", "browserBlockSendingDoNotTrackHeader \"\"\" if \"browserBlockSendingDoNotTrackHeader\" in self._prop_dict: return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else: return None @browser_block_sending_do_not_track_header.setter def", "self._prop_dict: return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else: return None @password_block_picture_password_and_pin.setter def password_block_picture_password_and_pin(self, val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] = val", "self._prop_dict[\"browserInternetSecurityLevel\"] = InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return self._prop_dict[\"browserInternetSecurityLevel\"] return None @browser_internet_security_level.setter def browser_internet_security_level(self, val): self._prop_dict[\"browserInternetSecurityLevel\"] =", "@browser_require_firewall.setter def browser_require_firewall(self, val): self._prop_dict[\"browserRequireFirewall\"] = val @property def browser_require_fraud_warning(self): \"\"\" Gets and", "password_required_type(self): \"\"\" Gets and sets the passwordRequiredType Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The passwordRequiredType \"\"\" if", "None @apply_only_to_windows81.setter def apply_only_to_windows81(self, val): self._prop_dict[\"applyOnlyToWindows81\"] = val @property def browser_block_autofill(self): \"\"\" Gets", "sets the cellularBlockDataRoaming Returns: bool: The cellularBlockDataRoaming \"\"\" if \"cellularBlockDataRoaming\" in self._prop_dict: return", "updates_require_automatic_updates(self): \"\"\" Gets and sets the updatesRequireAutomaticUpdates Returns: bool: The updatesRequireAutomaticUpdates \"\"\" if", "None @password_required_type.setter def password_required_type(self, val): self._prop_dict[\"passwordRequiredType\"] = val @property def password_sign_in_failure_count_before_factory_reset(self): \"\"\" Gets", "storageRequireDeviceEncryption \"\"\" if \"storageRequireDeviceEncryption\" in self._prop_dict: return self._prop_dict[\"storageRequireDeviceEncryption\"] else: return None @storage_require_device_encryption.setter def", "= val @property def password_previous_password_block_count(self): \"\"\" Gets and sets the passwordPreviousPasswordBlockCount Returns: int:", "self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else: return None @password_previous_password_block_count.setter def password_previous_password_block_count(self, val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"] = val @property def", "The cellularBlockDataRoaming \"\"\" if \"cellularBlockDataRoaming\" in self._prop_dict: return self._prop_dict[\"cellularBlockDataRoaming\"] else: return None @cellular_block_data_roaming.setter", "password_previous_password_block_count(self, val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"] = val @property def password_required_type(self): \"\"\" Gets and sets the", "\"\"\" if \"browserBlockEnterpriseModeAccess\" in self._prop_dict: return self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else: return None @browser_block_enterprise_mode_access.setter def browser_block_enterprise_mode_access(self,", "updates_require_automatic_updates(self, val): self._prop_dict[\"updatesRequireAutomaticUpdates\"] = val @property def user_account_control_settings(self): \"\"\" Gets and sets the", "changes will be overwritten. ''' from __future__ import unicode_literals from ..model.internet_site_security_level import InternetSiteSecurityLevel", "updatesRequireAutomaticUpdates Returns: bool: The updatesRequireAutomaticUpdates \"\"\" if \"updatesRequireAutomaticUpdates\" in self._prop_dict: return self._prop_dict[\"updatesRequireAutomaticUpdates\"] else:", ":class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The passwordRequiredType \"\"\" if \"passwordRequiredType\" in self._prop_dict: if isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase): return self._prop_dict[\"passwordRequiredType\"]", "\"\"\" if \"passwordRequiredType\" in self._prop_dict: if isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase): return self._prop_dict[\"passwordRequiredType\"] else : self._prop_dict[\"passwordRequiredType\"]", "The storageRequireDeviceEncryption \"\"\" if \"storageRequireDeviceEncryption\" in self._prop_dict: return self._prop_dict[\"storageRequireDeviceEncryption\"] else: return None @storage_require_device_encryption.setter", "in self._prop_dict: return self._prop_dict[\"browserBlockPlugins\"] else: return None @browser_block_plugins.setter def browser_block_plugins(self, val): self._prop_dict[\"browserBlockPlugins\"] =", "overwritten. ''' from __future__ import unicode_literals from ..model.internet_site_security_level import InternetSiteSecurityLevel from ..model.site_security_level import", "\"\"\" Gets and sets the browserBlockEnterpriseModeAccess Returns: bool: The browserBlockEnterpriseModeAccess \"\"\" if \"browserBlockEnterpriseModeAccess\"", "return None @apply_only_to_windows81.setter def apply_only_to_windows81(self, val): self._prop_dict[\"applyOnlyToWindows81\"] = val @property def browser_block_autofill(self): \"\"\"", "None @storage_require_device_encryption.setter def storage_require_device_encryption(self, val): self._prop_dict[\"storageRequireDeviceEncryption\"] = val @property def updates_require_automatic_updates(self): \"\"\" Gets", "None @browser_require_fraud_warning.setter def browser_require_fraud_warning(self, val): self._prop_dict[\"browserRequireFraudWarning\"] = val @property def browser_trusted_sites_security_level(self): \"\"\" Gets", "\"accountsBlockAddingNonMicrosoftAccountEmail\" in self._prop_dict: return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else: return None @accounts_block_adding_non_microsoft_account_email.setter def accounts_block_adding_non_microsoft_account_email(self, val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"]", "from ..model.required_password_type import RequiredPasswordType from ..model.windows_user_account_control_settings import WindowsUserAccountControlSettings from ..one_drive_object_base import OneDriveObjectBase class", "sets the browserInternetSecurityLevel Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The browserInternetSecurityLevel \"\"\" if \"browserInternetSecurityLevel\" in self._prop_dict: if", "@property def password_block_picture_password_and_pin(self): \"\"\" Gets and sets the passwordBlockPicturePasswordAndPin Returns: bool: The passwordBlockPicturePasswordAndPin", "= val @property def storage_require_device_encryption(self): \"\"\" Gets and sets the storageRequireDeviceEncryption Returns: bool:", "return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else: return None @browser_block_single_word_entry_on_intranet_sites.setter def browser_block_single_word_entry_on_intranet_sites(self, val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] = val @property", "\"passwordSignInFailureCountBeforeFactoryReset\" in self._prop_dict: return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else: return None @password_sign_in_failure_count_before_factory_reset.setter def password_sign_in_failure_count_before_factory_reset(self, val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"]", "self._prop_dict[\"browserBlockPopups\"] = val @property def browser_block_sending_do_not_track_header(self): \"\"\" Gets and sets the browserBlockSendingDoNotTrackHeader Returns:", "bool: The browserRequireSmartScreen \"\"\" if \"browserRequireSmartScreen\" in self._prop_dict: return self._prop_dict[\"browserRequireSmartScreen\"] else: return None", "Returns: str: The workFoldersUrl \"\"\" if \"workFoldersUrl\" in self._prop_dict: return self._prop_dict[\"workFoldersUrl\"] else: return", "browserInternetSecurityLevel Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The browserInternetSecurityLevel \"\"\" if \"browserInternetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase):", "browserBlockSendingDoNotTrackHeader Returns: bool: The browserBlockSendingDoNotTrackHeader \"\"\" if \"browserBlockSendingDoNotTrackHeader\" in self._prop_dict: return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else:", "browser_internet_security_level(self): \"\"\" Gets and sets the browserInternetSecurityLevel Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The browserInternetSecurityLevel \"\"\" if", "Gets and sets the browserRequireFraudWarning Returns: bool: The browserRequireFraudWarning \"\"\" if \"browserRequireFraudWarning\" in", "workFoldersUrl \"\"\" if \"workFoldersUrl\" in self._prop_dict: return self._prop_dict[\"workFoldersUrl\"] else: return None @work_folders_url.setter def", "self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else: return None @browser_block_sending_do_not_track_header.setter def browser_block_sending_do_not_track_header(self, val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] = val @property def", "return None @browser_require_high_security_for_restricted_sites.setter def browser_require_high_security_for_restricted_sites(self, val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] = val @property def browser_require_firewall(self): \"\"\"", "password_previous_password_block_count(self): \"\"\" Gets and sets the passwordPreviousPasswordBlockCount Returns: int: The passwordPreviousPasswordBlockCount \"\"\" if", "\"updatesRequireAutomaticUpdates\" in self._prop_dict: return self._prop_dict[\"updatesRequireAutomaticUpdates\"] else: return None @updates_require_automatic_updates.setter def updates_require_automatic_updates(self, val): self._prop_dict[\"updatesRequireAutomaticUpdates\"]", "in self._prop_dict: return self._prop_dict[\"passwordMinimumLength\"] else: return None @password_minimum_length.setter def password_minimum_length(self, val): self._prop_dict[\"passwordMinimumLength\"] =", "else: return None @browser_require_smart_screen.setter def browser_require_smart_screen(self, val): self._prop_dict[\"browserRequireSmartScreen\"] = val @property def browser_enterprise_mode_site_list_location(self):", "def password_sign_in_failure_count_before_factory_reset(self): \"\"\" Gets and sets the passwordSignInFailureCountBeforeFactoryReset Returns: int: The passwordSignInFailureCountBeforeFactoryReset \"\"\"", "self._prop_dict[\"browserLoggingReportLocation\"] else: return None @browser_logging_report_location.setter def browser_logging_report_location(self, val): self._prop_dict[\"browserLoggingReportLocation\"] = val @property def", "None @diagnostics_block_data_submission.setter def diagnostics_block_data_submission(self, val): self._prop_dict[\"diagnosticsBlockDataSubmission\"] = val @property def password_block_picture_password_and_pin(self): \"\"\" Gets", "return self._prop_dict[\"browserBlockJavaScript\"] else: return None @browser_block_java_script.setter def browser_block_java_script(self, val): self._prop_dict[\"browserBlockJavaScript\"] = val @property", "browserBlockPlugins Returns: bool: The browserBlockPlugins \"\"\" if \"browserBlockPlugins\" in self._prop_dict: return self._prop_dict[\"browserBlockPlugins\"] else:", "= val @property def browser_enterprise_mode_site_list_location(self): \"\"\" Gets and sets the browserEnterpriseModeSiteListLocation Returns: str:", "return None @browser_block_sending_do_not_track_header.setter def browser_block_sending_do_not_track_header(self, val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] = val @property def browser_block_single_word_entry_on_intranet_sites(self): \"\"\"", "License. See License in the project root for license information. # # This", "browserBlockPopups Returns: bool: The browserBlockPopups \"\"\" if \"browserBlockPopups\" in self._prop_dict: return self._prop_dict[\"browserBlockPopups\"] else:", "def updates_require_automatic_updates(self): \"\"\" Gets and sets the updatesRequireAutomaticUpdates Returns: bool: The updatesRequireAutomaticUpdates \"\"\"", "if \"browserBlockSingleWordEntryOnIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else: return None @browser_block_single_word_entry_on_intranet_sites.setter def browser_block_single_word_entry_on_intranet_sites(self, val):", "coding: utf-8 -*- ''' # Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed", "@property def browser_require_fraud_warning(self): \"\"\" Gets and sets the browserRequireFraudWarning Returns: bool: The browserRequireFraudWarning", "str: The browserEnterpriseModeSiteListLocation \"\"\" if \"browserEnterpriseModeSiteListLocation\" in self._prop_dict: return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else: return None", "bool: The browserBlockAutofill \"\"\" if \"browserBlockAutofill\" in self._prop_dict: return self._prop_dict[\"browserBlockAutofill\"] else: return None", "The userAccountControlSettings \"\"\" if \"userAccountControlSettings\" in self._prop_dict: if isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase): return self._prop_dict[\"userAccountControlSettings\"] else", "self._prop_dict: return self._prop_dict[\"browserBlockAutofill\"] else: return None @browser_block_autofill.setter def browser_block_autofill(self, val): self._prop_dict[\"browserBlockAutofill\"] = val", "@browser_internet_security_level.setter def browser_internet_security_level(self, val): self._prop_dict[\"browserInternetSecurityLevel\"] = val @property def browser_intranet_security_level(self): \"\"\" Gets and", "self._prop_dict[\"passwordPreviousPasswordBlockCount\"] = val @property def password_required_type(self): \"\"\" Gets and sets the passwordRequiredType Returns:", "Gets and sets the passwordBlockPicturePasswordAndPin Returns: bool: The passwordBlockPicturePasswordAndPin \"\"\" if \"passwordBlockPicturePasswordAndPin\" in", "\"\"\" if \"browserEnterpriseModeSiteListLocation\" in self._prop_dict: return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else: return None @browser_enterprise_mode_site_list_location.setter def browser_enterprise_mode_site_list_location(self,", "the browserRequireHighSecurityForRestrictedSites Returns: bool: The browserRequireHighSecurityForRestrictedSites \"\"\" if \"browserRequireHighSecurityForRestrictedSites\" in self._prop_dict: return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"]", "workFoldersUrl Returns: str: The workFoldersUrl \"\"\" if \"workFoldersUrl\" in self._prop_dict: return self._prop_dict[\"workFoldersUrl\"] else:", "def diagnostics_block_data_submission(self, val): self._prop_dict[\"diagnosticsBlockDataSubmission\"] = val @property def password_block_picture_password_and_pin(self): \"\"\" Gets and sets", "val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] = val @property def storage_require_device_encryption(self): \"\"\" Gets and sets the storageRequireDeviceEncryption", "browser_block_enterprise_mode_access(self): \"\"\" Gets and sets the browserBlockEnterpriseModeAccess Returns: bool: The browserBlockEnterpriseModeAccess \"\"\" if", "def browser_enterprise_mode_site_list_location(self, val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] = val @property def browser_internet_security_level(self): \"\"\" Gets and sets", "if \"browserRequireFirewall\" in self._prop_dict: return self._prop_dict[\"browserRequireFirewall\"] else: return None @browser_require_firewall.setter def browser_require_firewall(self, val):", "browserTrustedSitesSecurityLevel \"\"\" if \"browserTrustedSitesSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else :", "the browserBlockPlugins Returns: bool: The browserBlockPlugins \"\"\" if \"browserBlockPlugins\" in self._prop_dict: return self._prop_dict[\"browserBlockPlugins\"]", "Returns: bool: The browserRequireHighSecurityForRestrictedSites \"\"\" if \"browserRequireHighSecurityForRestrictedSites\" in self._prop_dict: return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else: return", "self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] = val @property def browser_require_firewall(self): \"\"\" Gets and sets the browserRequireFirewall Returns:", "browserRequireFirewall \"\"\" if \"browserRequireFirewall\" in self._prop_dict: return self._prop_dict[\"browserRequireFirewall\"] else: return None @browser_require_firewall.setter def", "else: return None @password_minimum_length.setter def password_minimum_length(self, val): self._prop_dict[\"passwordMinimumLength\"] = val @property def password_minutes_of_inactivity_before_screen_timeout(self):", "def user_account_control_settings(self): \"\"\" Gets and sets the userAccountControlSettings Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The userAccountControlSettings \"\"\"", "cellularBlockDataRoaming \"\"\" if \"cellularBlockDataRoaming\" in self._prop_dict: return self._prop_dict[\"cellularBlockDataRoaming\"] else: return None @cellular_block_data_roaming.setter def", "@password_minimum_length.setter def password_minimum_length(self, val): self._prop_dict[\"passwordMinimumLength\"] = val @property def password_minutes_of_inactivity_before_screen_timeout(self): \"\"\" Gets and", "self._prop_dict: return self._prop_dict[\"passwordExpirationDays\"] else: return None @password_expiration_days.setter def password_expiration_days(self, val): self._prop_dict[\"passwordExpirationDays\"] = val", "\"\"\" if \"passwordMinimumCharacterSetCount\" in self._prop_dict: return self._prop_dict[\"passwordMinimumCharacterSetCount\"] else: return None @password_minimum_character_set_count.setter def password_minimum_character_set_count(self,", "passwordRequiredType \"\"\" if \"passwordRequiredType\" in self._prop_dict: if isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase): return self._prop_dict[\"passwordRequiredType\"] else :", "def cellular_block_data_roaming(self): \"\"\" Gets and sets the cellularBlockDataRoaming Returns: bool: The cellularBlockDataRoaming \"\"\"", "The browserEnterpriseModeSiteListLocation \"\"\" if \"browserEnterpriseModeSiteListLocation\" in self._prop_dict: return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else: return None @browser_enterprise_mode_site_list_location.setter", "SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return self._prop_dict[\"browserIntranetSecurityLevel\"] return None @browser_intranet_security_level.setter def browser_intranet_security_level(self, val): self._prop_dict[\"browserIntranetSecurityLevel\"] = val @property", "\"\"\" if \"passwordMinimumLength\" in self._prop_dict: return self._prop_dict[\"passwordMinimumLength\"] else: return None @password_minimum_length.setter def password_minimum_length(self,", "return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else: return None @browser_require_high_security_for_restricted_sites.setter def browser_require_high_security_for_restricted_sites(self, val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] = val @property", "Returns: bool: The browserBlockAutofill \"\"\" if \"browserBlockAutofill\" in self._prop_dict: return self._prop_dict[\"browserBlockAutofill\"] else: return", "bool: The browserBlockJavaScript \"\"\" if \"browserBlockJavaScript\" in self._prop_dict: return self._prop_dict[\"browserBlockJavaScript\"] else: return None", "\"passwordExpirationDays\" in self._prop_dict: return self._prop_dict[\"passwordExpirationDays\"] else: return None @password_expiration_days.setter def password_expiration_days(self, val): self._prop_dict[\"passwordExpirationDays\"]", "def password_minimum_length(self): \"\"\" Gets and sets the passwordMinimumLength Returns: int: The passwordMinimumLength \"\"\"", "the passwordRequiredType Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The passwordRequiredType \"\"\" if \"passwordRequiredType\" in self._prop_dict: if isinstance(self._prop_dict[\"passwordRequiredType\"],", "self._prop_dict[\"storageRequireDeviceEncryption\"] = val @property def updates_require_automatic_updates(self): \"\"\" Gets and sets the updatesRequireAutomaticUpdates Returns:", "= SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return self._prop_dict[\"browserIntranetSecurityLevel\"] return None @browser_intranet_security_level.setter def browser_intranet_security_level(self, val): self._prop_dict[\"browserIntranetSecurityLevel\"] = val", "browser_block_single_word_entry_on_intranet_sites(self, val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] = val @property def browser_require_smart_screen(self): \"\"\" Gets and sets the", "\"browserBlockPopups\" in self._prop_dict: return self._prop_dict[\"browserBlockPopups\"] else: return None @browser_block_popups.setter def browser_block_popups(self, val): self._prop_dict[\"browserBlockPopups\"]", "self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = val @property def cellular_block_data_roaming(self): \"\"\" Gets and sets the cellularBlockDataRoaming Returns:", "self._prop_dict[\"userAccountControlSettings\"] else : self._prop_dict[\"userAccountControlSettings\"] = WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return self._prop_dict[\"userAccountControlSettings\"] return None @user_account_control_settings.setter def user_account_control_settings(self,", "browser_require_high_security_for_restricted_sites(self): \"\"\" Gets and sets the browserRequireHighSecurityForRestrictedSites Returns: bool: The browserRequireHighSecurityForRestrictedSites \"\"\" if", "= val @property def diagnostics_block_data_submission(self): \"\"\" Gets and sets the diagnosticsBlockDataSubmission Returns: bool:", "browserRequireSmartScreen \"\"\" if \"browserRequireSmartScreen\" in self._prop_dict: return self._prop_dict[\"browserRequireSmartScreen\"] else: return None @browser_require_smart_screen.setter def", "See License in the project root for license information. # # This file", "return None @browser_logging_report_location.setter def browser_logging_report_location(self, val): self._prop_dict[\"browserLoggingReportLocation\"] = val @property def browser_require_high_security_for_restricted_sites(self): \"\"\"", "the passwordMinutesOfInactivityBeforeScreenTimeout Returns: int: The passwordMinutesOfInactivityBeforeScreenTimeout \"\"\" if \"passwordMinutesOfInactivityBeforeScreenTimeout\" in self._prop_dict: return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"]", "\"\"\" Gets and sets the browserBlockJavaScript Returns: bool: The browserBlockJavaScript \"\"\" if \"browserBlockJavaScript\"", "= val @property def password_minimum_length(self): \"\"\" Gets and sets the passwordMinimumLength Returns: int:", "@property def browser_block_plugins(self): \"\"\" Gets and sets the browserBlockPlugins Returns: bool: The browserBlockPlugins", "@browser_block_enterprise_mode_access.setter def browser_block_enterprise_mode_access(self, val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"] = val @property def browser_block_java_script(self): \"\"\" Gets and", "def storage_require_device_encryption(self): \"\"\" Gets and sets the storageRequireDeviceEncryption Returns: bool: The storageRequireDeviceEncryption \"\"\"", "browser_intranet_security_level(self, val): self._prop_dict[\"browserIntranetSecurityLevel\"] = val @property def browser_logging_report_location(self): \"\"\" Gets and sets the", "@browser_require_high_security_for_restricted_sites.setter def browser_require_high_security_for_restricted_sites(self, val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] = val @property def browser_require_firewall(self): \"\"\" Gets and", "The workFoldersUrl \"\"\" if \"workFoldersUrl\" in self._prop_dict: return self._prop_dict[\"workFoldersUrl\"] else: return None @work_folders_url.setter", "The browserRequireSmartScreen \"\"\" if \"browserRequireSmartScreen\" in self._prop_dict: return self._prop_dict[\"browserRequireSmartScreen\"] else: return None @browser_require_smart_screen.setter", "self._prop_dict = prop_dict @property def accounts_block_adding_non_microsoft_account_email(self): \"\"\" Gets and sets the accountsBlockAddingNonMicrosoftAccountEmail Returns:", "browser_block_java_script(self): \"\"\" Gets and sets the browserBlockJavaScript Returns: bool: The browserBlockJavaScript \"\"\" if", "return None @password_previous_password_block_count.setter def password_previous_password_block_count(self, val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"] = val @property def password_required_type(self): \"\"\"", ":class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The userAccountControlSettings \"\"\" if \"userAccountControlSettings\" in self._prop_dict: if isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase): return self._prop_dict[\"userAccountControlSettings\"]", "= val @property def work_folders_url(self): \"\"\" Gets and sets the workFoldersUrl Returns: str:", "@browser_enterprise_mode_site_list_location.setter def browser_enterprise_mode_site_list_location(self, val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] = val @property def browser_internet_security_level(self): \"\"\" Gets and", "None @browser_require_firewall.setter def browser_require_firewall(self, val): self._prop_dict[\"browserRequireFirewall\"] = val @property def browser_require_fraud_warning(self): \"\"\" Gets", "apply_only_to_windows81(self, val): self._prop_dict[\"applyOnlyToWindows81\"] = val @property def browser_block_autofill(self): \"\"\" Gets and sets the", "self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else: return None @password_block_picture_password_and_pin.setter def password_block_picture_password_and_pin(self, val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] = val @property def", "def password_sign_in_failure_count_before_factory_reset(self, val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] = val @property def storage_require_device_encryption(self): \"\"\" Gets and sets", "\"\"\" if \"browserBlockJavaScript\" in self._prop_dict: return self._prop_dict[\"browserBlockJavaScript\"] else: return None @browser_block_java_script.setter def browser_block_java_script(self,", "browser_require_fraud_warning(self, val): self._prop_dict[\"browserRequireFraudWarning\"] = val @property def browser_trusted_sites_security_level(self): \"\"\" Gets and sets the", "bool: The browserBlockPopups \"\"\" if \"browserBlockPopups\" in self._prop_dict: return self._prop_dict[\"browserBlockPopups\"] else: return None", "val): self._prop_dict[\"passwordRequiredType\"] = val @property def password_sign_in_failure_count_before_factory_reset(self): \"\"\" Gets and sets the passwordSignInFailureCountBeforeFactoryReset", "\"\"\" if \"browserRequireFirewall\" in self._prop_dict: return self._prop_dict[\"browserRequireFirewall\"] else: return None @browser_require_firewall.setter def browser_require_firewall(self,", "password_minimum_length(self): \"\"\" Gets and sets the passwordMinimumLength Returns: int: The passwordMinimumLength \"\"\" if", "the browserBlockAutofill Returns: bool: The browserBlockAutofill \"\"\" if \"browserBlockAutofill\" in self._prop_dict: return self._prop_dict[\"browserBlockAutofill\"]", "val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] = val @property def browser_block_single_word_entry_on_intranet_sites(self): \"\"\" Gets and sets the browserBlockSingleWordEntryOnIntranetSites", "Gets and sets the updatesRequireAutomaticUpdates Returns: bool: The updatesRequireAutomaticUpdates \"\"\" if \"updatesRequireAutomaticUpdates\" in", "return None @user_account_control_settings.setter def user_account_control_settings(self, val): self._prop_dict[\"userAccountControlSettings\"] = val @property def work_folders_url(self): \"\"\"", "Returns: bool: The browserBlockPopups \"\"\" if \"browserBlockPopups\" in self._prop_dict: return self._prop_dict[\"browserBlockPopups\"] else: return", "browserBlockSingleWordEntryOnIntranetSites \"\"\" if \"browserBlockSingleWordEntryOnIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else: return None @browser_block_single_word_entry_on_intranet_sites.setter def", "@cellular_block_data_roaming.setter def cellular_block_data_roaming(self, val): self._prop_dict[\"cellularBlockDataRoaming\"] = val @property def diagnostics_block_data_submission(self): \"\"\" Gets and", "in self._prop_dict: if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserInternetSecurityLevel\"] else : self._prop_dict[\"browserInternetSecurityLevel\"] = InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return", "return None @browser_require_firewall.setter def browser_require_firewall(self, val): self._prop_dict[\"browserRequireFirewall\"] = val @property def browser_require_fraud_warning(self): \"\"\"", "None @password_sign_in_failure_count_before_factory_reset.setter def password_sign_in_failure_count_before_factory_reset(self, val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] = val @property def storage_require_device_encryption(self): \"\"\" Gets", "passwordBlockPicturePasswordAndPin \"\"\" if \"passwordBlockPicturePasswordAndPin\" in self._prop_dict: return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else: return None @password_block_picture_password_and_pin.setter def", "def password_expiration_days(self, val): self._prop_dict[\"passwordExpirationDays\"] = val @property def password_minimum_length(self): \"\"\" Gets and sets", "Returns: bool: The browserBlockSingleWordEntryOnIntranetSites \"\"\" if \"browserBlockSingleWordEntryOnIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] else: return", "\"passwordMinimumCharacterSetCount\" in self._prop_dict: return self._prop_dict[\"passwordMinimumCharacterSetCount\"] else: return None @password_minimum_character_set_count.setter def password_minimum_character_set_count(self, val): self._prop_dict[\"passwordMinimumCharacterSetCount\"]", "Returns: bool: The passwordBlockPicturePasswordAndPin \"\"\" if \"passwordBlockPicturePasswordAndPin\" in self._prop_dict: return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else: return", ": self._prop_dict[\"browserInternetSecurityLevel\"] = InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return self._prop_dict[\"browserInternetSecurityLevel\"] return None @browser_internet_security_level.setter def browser_internet_security_level(self, val): self._prop_dict[\"browserInternetSecurityLevel\"]", "@password_sign_in_failure_count_before_factory_reset.setter def password_sign_in_failure_count_before_factory_reset(self, val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] = val @property def storage_require_device_encryption(self): \"\"\" Gets and", "the cellularBlockDataRoaming Returns: bool: The cellularBlockDataRoaming \"\"\" if \"cellularBlockDataRoaming\" in self._prop_dict: return self._prop_dict[\"cellularBlockDataRoaming\"]", "return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else: return None @browser_block_sending_do_not_track_header.setter def browser_block_sending_do_not_track_header(self, val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] = val @property", "val): self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] = val @property def browser_internet_security_level(self): \"\"\" Gets and sets the browserInternetSecurityLevel", "in self._prop_dict: return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else: return None @accounts_block_adding_non_microsoft_account_email.setter def accounts_block_adding_non_microsoft_account_email(self, val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] =", "@property def apply_only_to_windows81(self): \"\"\" Gets and sets the applyOnlyToWindows81 Returns: bool: The applyOnlyToWindows81", "def password_previous_password_block_count(self, val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"] = val @property def password_required_type(self): \"\"\" Gets and sets", "= val @property def password_expiration_days(self): \"\"\" Gets and sets the passwordExpirationDays Returns: int:", "@browser_block_plugins.setter def browser_block_plugins(self, val): self._prop_dict[\"browserBlockPlugins\"] = val @property def browser_block_popups(self): \"\"\" Gets and", "def browser_block_java_script(self): \"\"\" Gets and sets the browserBlockJavaScript Returns: bool: The browserBlockJavaScript \"\"\"", "def password_block_picture_password_and_pin(self, val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] = val @property def password_expiration_days(self): \"\"\" Gets and sets", "None @browser_block_sending_do_not_track_header.setter def browser_block_sending_do_not_track_header(self, val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] = val @property def browser_block_single_word_entry_on_intranet_sites(self): \"\"\" Gets", "self._prop_dict[\"passwordRequiredType\"] else : self._prop_dict[\"passwordRequiredType\"] = RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return self._prop_dict[\"passwordRequiredType\"] return None @password_required_type.setter def password_required_type(self,", "@property def browser_block_sending_do_not_track_header(self): \"\"\" Gets and sets the browserBlockSendingDoNotTrackHeader Returns: bool: The browserBlockSendingDoNotTrackHeader", "passwordExpirationDays \"\"\" if \"passwordExpirationDays\" in self._prop_dict: return self._prop_dict[\"passwordExpirationDays\"] else: return None @password_expiration_days.setter def", "self._prop_dict[\"storageRequireDeviceEncryption\"] else: return None @storage_require_device_encryption.setter def storage_require_device_encryption(self, val): self._prop_dict[\"storageRequireDeviceEncryption\"] = val @property def", "def browser_block_popups(self): \"\"\" Gets and sets the browserBlockPopups Returns: bool: The browserBlockPopups \"\"\"", "@property def browser_enterprise_mode_site_list_location(self): \"\"\" Gets and sets the browserEnterpriseModeSiteListLocation Returns: str: The browserEnterpriseModeSiteListLocation", "if \"browserBlockJavaScript\" in self._prop_dict: return self._prop_dict[\"browserBlockJavaScript\"] else: return None @browser_block_java_script.setter def browser_block_java_script(self, val):", "return None @browser_block_single_word_entry_on_intranet_sites.setter def browser_block_single_word_entry_on_intranet_sites(self, val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] = val @property def browser_require_smart_screen(self): \"\"\"", "\"\"\" if \"browserBlockAutomaticDetectionOfIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else: return None @browser_block_automatic_detection_of_intranet_sites.setter def browser_block_automatic_detection_of_intranet_sites(self,", "the updatesRequireAutomaticUpdates Returns: bool: The updatesRequireAutomaticUpdates \"\"\" if \"updatesRequireAutomaticUpdates\" in self._prop_dict: return self._prop_dict[\"updatesRequireAutomaticUpdates\"]", "val @property def browser_require_firewall(self): \"\"\" Gets and sets the browserRequireFirewall Returns: bool: The", "import SiteSecurityLevel from ..model.required_password_type import RequiredPasswordType from ..model.windows_user_account_control_settings import WindowsUserAccountControlSettings from ..one_drive_object_base import", "if isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase): return self._prop_dict[\"passwordRequiredType\"] else : self._prop_dict[\"passwordRequiredType\"] = RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return self._prop_dict[\"passwordRequiredType\"] return", "browser_block_autofill(self, val): self._prop_dict[\"browserBlockAutofill\"] = val @property def browser_block_automatic_detection_of_intranet_sites(self): \"\"\" Gets and sets the", "val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"] = val @property def browser_block_java_script(self): \"\"\" Gets and sets the browserBlockJavaScript", "\"\"\" if \"browserBlockPopups\" in self._prop_dict: return self._prop_dict[\"browserBlockPopups\"] else: return None @browser_block_popups.setter def browser_block_popups(self,", "\"\"\" Gets and sets the browserIntranetSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserIntranetSecurityLevel \"\"\" if \"browserIntranetSecurityLevel\"", "password_minimum_character_set_count(self): \"\"\" Gets and sets the passwordMinimumCharacterSetCount Returns: int: The passwordMinimumCharacterSetCount \"\"\" if", "and sets the browserBlockJavaScript Returns: bool: The browserBlockJavaScript \"\"\" if \"browserBlockJavaScript\" in self._prop_dict:", "return None @diagnostics_block_data_submission.setter def diagnostics_block_data_submission(self, val): self._prop_dict[\"diagnosticsBlockDataSubmission\"] = val @property def password_block_picture_password_and_pin(self): \"\"\"", "Returns: bool: The browserBlockSendingDoNotTrackHeader \"\"\" if \"browserBlockSendingDoNotTrackHeader\" in self._prop_dict: return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else: return", "\"\"\" if \"browserRequireSmartScreen\" in self._prop_dict: return self._prop_dict[\"browserRequireSmartScreen\"] else: return None @browser_require_smart_screen.setter def browser_require_smart_screen(self,", "project root for license information. # # This file was generated and any", "else: return None @password_sign_in_failure_count_before_factory_reset.setter def password_sign_in_failure_count_before_factory_reset(self, val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] = val @property def storage_require_device_encryption(self):", "browser_require_smart_screen(self): \"\"\" Gets and sets the browserRequireSmartScreen Returns: bool: The browserRequireSmartScreen \"\"\" if", "self._prop_dict: return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else: return None @accounts_block_adding_non_microsoft_account_email.setter def accounts_block_adding_non_microsoft_account_email(self, val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] = val", "browserBlockJavaScript Returns: bool: The browserBlockJavaScript \"\"\" if \"browserBlockJavaScript\" in self._prop_dict: return self._prop_dict[\"browserBlockJavaScript\"] else:", "Gets and sets the browserBlockSingleWordEntryOnIntranetSites Returns: bool: The browserBlockSingleWordEntryOnIntranetSites \"\"\" if \"browserBlockSingleWordEntryOnIntranetSites\" in", "= val @property def browser_internet_security_level(self): \"\"\" Gets and sets the browserInternetSecurityLevel Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`:", "@property def browser_require_high_security_for_restricted_sites(self): \"\"\" Gets and sets the browserRequireHighSecurityForRestrictedSites Returns: bool: The browserRequireHighSecurityForRestrictedSites", "def password_required_type(self): \"\"\" Gets and sets the passwordRequiredType Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The passwordRequiredType \"\"\"", "bool: The diagnosticsBlockDataSubmission \"\"\" if \"diagnosticsBlockDataSubmission\" in self._prop_dict: return self._prop_dict[\"diagnosticsBlockDataSubmission\"] else: return None", "if isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase): return self._prop_dict[\"userAccountControlSettings\"] else : self._prop_dict[\"userAccountControlSettings\"] = WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return self._prop_dict[\"userAccountControlSettings\"] return", "if \"diagnosticsBlockDataSubmission\" in self._prop_dict: return self._prop_dict[\"diagnosticsBlockDataSubmission\"] else: return None @diagnostics_block_data_submission.setter def diagnostics_block_data_submission(self, val):", "else: return None @browser_block_popups.setter def browser_block_popups(self, val): self._prop_dict[\"browserBlockPopups\"] = val @property def browser_block_sending_do_not_track_header(self):", "the browserLoggingReportLocation Returns: str: The browserLoggingReportLocation \"\"\" if \"browserLoggingReportLocation\" in self._prop_dict: return self._prop_dict[\"browserLoggingReportLocation\"]", "None @browser_logging_report_location.setter def browser_logging_report_location(self, val): self._prop_dict[\"browserLoggingReportLocation\"] = val @property def browser_require_high_security_for_restricted_sites(self): \"\"\" Gets", "self._prop_dict: return self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] else: return None @browser_block_sending_do_not_track_header.setter def browser_block_sending_do_not_track_header(self, val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] = val", "\"\"\" Gets and sets the browserInternetSecurityLevel Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The browserInternetSecurityLevel \"\"\" if \"browserInternetSecurityLevel\"", "the passwordPreviousPasswordBlockCount Returns: int: The passwordPreviousPasswordBlockCount \"\"\" if \"passwordPreviousPasswordBlockCount\" in self._prop_dict: return self._prop_dict[\"passwordPreviousPasswordBlockCount\"]", "self._prop_dict: return self._prop_dict[\"diagnosticsBlockDataSubmission\"] else: return None @diagnostics_block_data_submission.setter def diagnostics_block_data_submission(self, val): self._prop_dict[\"diagnosticsBlockDataSubmission\"] = val", "sets the passwordExpirationDays Returns: int: The passwordExpirationDays \"\"\" if \"passwordExpirationDays\" in self._prop_dict: return", "self._prop_dict[\"browserIntranetSecurityLevel\"] else : self._prop_dict[\"browserIntranetSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return self._prop_dict[\"browserIntranetSecurityLevel\"] return None @browser_intranet_security_level.setter def browser_intranet_security_level(self,", "if \"accountsBlockAddingNonMicrosoftAccountEmail\" in self._prop_dict: return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else: return None @accounts_block_adding_non_microsoft_account_email.setter def accounts_block_adding_non_microsoft_account_email(self, val):", "browser_block_sending_do_not_track_header(self): \"\"\" Gets and sets the browserBlockSendingDoNotTrackHeader Returns: bool: The browserBlockSendingDoNotTrackHeader \"\"\" if", "@property def accounts_block_adding_non_microsoft_account_email(self): \"\"\" Gets and sets the accountsBlockAddingNonMicrosoftAccountEmail Returns: bool: The accountsBlockAddingNonMicrosoftAccountEmail", "The updatesRequireAutomaticUpdates \"\"\" if \"updatesRequireAutomaticUpdates\" in self._prop_dict: return self._prop_dict[\"updatesRequireAutomaticUpdates\"] else: return None @updates_require_automatic_updates.setter", "The browserInternetSecurityLevel \"\"\" if \"browserInternetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserInternetSecurityLevel\"] else", "diagnosticsBlockDataSubmission Returns: bool: The diagnosticsBlockDataSubmission \"\"\" if \"diagnosticsBlockDataSubmission\" in self._prop_dict: return self._prop_dict[\"diagnosticsBlockDataSubmission\"] else:", "def work_folders_url(self): \"\"\" Gets and sets the workFoldersUrl Returns: str: The workFoldersUrl \"\"\"", "\"\"\" Gets and sets the passwordSignInFailureCountBeforeFactoryReset Returns: int: The passwordSignInFailureCountBeforeFactoryReset \"\"\" if \"passwordSignInFailureCountBeforeFactoryReset\"", "= val @property def browser_require_smart_screen(self): \"\"\" Gets and sets the browserRequireSmartScreen Returns: bool:", "bool: The accountsBlockAddingNonMicrosoftAccountEmail \"\"\" if \"accountsBlockAddingNonMicrosoftAccountEmail\" in self._prop_dict: return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else: return None", "= val @property def browser_require_fraud_warning(self): \"\"\" Gets and sets the browserRequireFraudWarning Returns: bool:", "return None @cellular_block_data_roaming.setter def cellular_block_data_roaming(self, val): self._prop_dict[\"cellularBlockDataRoaming\"] = val @property def diagnostics_block_data_submission(self): \"\"\"", ":class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The browserInternetSecurityLevel \"\"\" if \"browserInternetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserInternetSecurityLevel\"]", "def browser_require_high_security_for_restricted_sites(self): \"\"\" Gets and sets the browserRequireHighSecurityForRestrictedSites Returns: bool: The browserRequireHighSecurityForRestrictedSites \"\"\"", "sets the accountsBlockAddingNonMicrosoftAccountEmail Returns: bool: The accountsBlockAddingNonMicrosoftAccountEmail \"\"\" if \"accountsBlockAddingNonMicrosoftAccountEmail\" in self._prop_dict: return", "val @property def browser_block_popups(self): \"\"\" Gets and sets the browserBlockPopups Returns: bool: The", "Returns: bool: The browserRequireFirewall \"\"\" if \"browserRequireFirewall\" in self._prop_dict: return self._prop_dict[\"browserRequireFirewall\"] else: return", "Gets and sets the cellularBlockDataRoaming Returns: bool: The cellularBlockDataRoaming \"\"\" if \"cellularBlockDataRoaming\" in", "= val @property def browser_block_single_word_entry_on_intranet_sites(self): \"\"\" Gets and sets the browserBlockSingleWordEntryOnIntranetSites Returns: bool:", "browser_require_firewall(self): \"\"\" Gets and sets the browserRequireFirewall Returns: bool: The browserRequireFirewall \"\"\" if", "val): self._prop_dict[\"browserBlockPopups\"] = val @property def browser_block_sending_do_not_track_header(self): \"\"\" Gets and sets the browserBlockSendingDoNotTrackHeader", "def browser_block_automatic_detection_of_intranet_sites(self, val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] = val @property def browser_block_enterprise_mode_access(self): \"\"\" Gets and sets", "sets the browserRequireFirewall Returns: bool: The browserRequireFirewall \"\"\" if \"browserRequireFirewall\" in self._prop_dict: return", "and sets the browserBlockAutomaticDetectionOfIntranetSites Returns: bool: The browserBlockAutomaticDetectionOfIntranetSites \"\"\" if \"browserBlockAutomaticDetectionOfIntranetSites\" in self._prop_dict:", "def browser_block_enterprise_mode_access(self): \"\"\" Gets and sets the browserBlockEnterpriseModeAccess Returns: bool: The browserBlockEnterpriseModeAccess \"\"\"", "def browser_enterprise_mode_site_list_location(self): \"\"\" Gets and sets the browserEnterpriseModeSiteListLocation Returns: str: The browserEnterpriseModeSiteListLocation \"\"\"", "self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] = val @property def password_expiration_days(self): \"\"\" Gets and sets the passwordExpirationDays Returns:", "and sets the browserBlockPopups Returns: bool: The browserBlockPopups \"\"\" if \"browserBlockPopups\" in self._prop_dict:", "if \"passwordRequiredType\" in self._prop_dict: if isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase): return self._prop_dict[\"passwordRequiredType\"] else : self._prop_dict[\"passwordRequiredType\"] =", "val @property def apply_only_to_windows81(self): \"\"\" Gets and sets the applyOnlyToWindows81 Returns: bool: The", "if \"browserRequireSmartScreen\" in self._prop_dict: return self._prop_dict[\"browserRequireSmartScreen\"] else: return None @browser_require_smart_screen.setter def browser_require_smart_screen(self, val):", "return self._prop_dict[\"browserInternetSecurityLevel\"] else : self._prop_dict[\"browserInternetSecurityLevel\"] = InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return self._prop_dict[\"browserInternetSecurityLevel\"] return None @browser_internet_security_level.setter def", "@password_block_picture_password_and_pin.setter def password_block_picture_password_and_pin(self, val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] = val @property def password_expiration_days(self): \"\"\" Gets and", "Returns: int: The passwordMinimumLength \"\"\" if \"passwordMinimumLength\" in self._prop_dict: return self._prop_dict[\"passwordMinimumLength\"] else: return", "else : self._prop_dict[\"browserInternetSecurityLevel\"] = InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return self._prop_dict[\"browserInternetSecurityLevel\"] return None @browser_internet_security_level.setter def browser_internet_security_level(self, val):", "val @property def cellular_block_data_roaming(self): \"\"\" Gets and sets the cellularBlockDataRoaming Returns: bool: The", "val @property def password_block_picture_password_and_pin(self): \"\"\" Gets and sets the passwordBlockPicturePasswordAndPin Returns: bool: The", "return self._prop_dict[\"browserBlockAutofill\"] else: return None @browser_block_autofill.setter def browser_block_autofill(self, val): self._prop_dict[\"browserBlockAutofill\"] = val @property", "val): self._prop_dict[\"browserIntranetSecurityLevel\"] = val @property def browser_logging_report_location(self): \"\"\" Gets and sets the browserLoggingReportLocation", "= val @property def browser_block_sending_do_not_track_header(self): \"\"\" Gets and sets the browserBlockSendingDoNotTrackHeader Returns: bool:", "\"\"\" if \"passwordPreviousPasswordBlockCount\" in self._prop_dict: return self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else: return None @password_previous_password_block_count.setter def password_previous_password_block_count(self,", "@property def password_required_type(self): \"\"\" Gets and sets the passwordRequiredType Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The passwordRequiredType", "if \"passwordSignInFailureCountBeforeFactoryReset\" in self._prop_dict: return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else: return None @password_sign_in_failure_count_before_factory_reset.setter def password_sign_in_failure_count_before_factory_reset(self, val):", "sets the workFoldersUrl Returns: str: The workFoldersUrl \"\"\" if \"workFoldersUrl\" in self._prop_dict: return", "browserBlockAutofill \"\"\" if \"browserBlockAutofill\" in self._prop_dict: return self._prop_dict[\"browserBlockAutofill\"] else: return None @browser_block_autofill.setter def", "@property def updates_require_automatic_updates(self): \"\"\" Gets and sets the updatesRequireAutomaticUpdates Returns: bool: The updatesRequireAutomaticUpdates", "val): self._prop_dict[\"applyOnlyToWindows81\"] = val @property def browser_block_autofill(self): \"\"\" Gets and sets the browserBlockAutofill", "else : self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserTrustedSitesSecurityLevel\"]) return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return None @browser_trusted_sites_security_level.setter def browser_trusted_sites_security_level(self, val):", "return None @browser_block_plugins.setter def browser_block_plugins(self, val): self._prop_dict[\"browserBlockPlugins\"] = val @property def browser_block_popups(self): \"\"\"", "The passwordMinimumCharacterSetCount \"\"\" if \"passwordMinimumCharacterSetCount\" in self._prop_dict: return self._prop_dict[\"passwordMinimumCharacterSetCount\"] else: return None @password_minimum_character_set_count.setter", "self._prop_dict[\"passwordMinimumCharacterSetCount\"] else: return None @password_minimum_character_set_count.setter def password_minimum_character_set_count(self, val): self._prop_dict[\"passwordMinimumCharacterSetCount\"] = val @property def", "self._prop_dict: if isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase): return self._prop_dict[\"passwordRequiredType\"] else : self._prop_dict[\"passwordRequiredType\"] = RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"]) return self._prop_dict[\"passwordRequiredType\"]", "browser_block_plugins(self): \"\"\" Gets and sets the browserBlockPlugins Returns: bool: The browserBlockPlugins \"\"\" if", "= val @property def browser_block_automatic_detection_of_intranet_sites(self): \"\"\" Gets and sets the browserBlockAutomaticDetectionOfIntranetSites Returns: bool:", "else: return None @browser_block_sending_do_not_track_header.setter def browser_block_sending_do_not_track_header(self, val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] = val @property def browser_block_single_word_entry_on_intranet_sites(self):", "self._prop_dict[\"browserInternetSecurityLevel\"] return None @browser_internet_security_level.setter def browser_internet_security_level(self, val): self._prop_dict[\"browserInternetSecurityLevel\"] = val @property def browser_intranet_security_level(self):", ": self._prop_dict[\"browserIntranetSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return self._prop_dict[\"browserIntranetSecurityLevel\"] return None @browser_intranet_security_level.setter def browser_intranet_security_level(self, val): self._prop_dict[\"browserIntranetSecurityLevel\"]", "\"storageRequireDeviceEncryption\" in self._prop_dict: return self._prop_dict[\"storageRequireDeviceEncryption\"] else: return None @storage_require_device_encryption.setter def storage_require_device_encryption(self, val): self._prop_dict[\"storageRequireDeviceEncryption\"]", "and sets the userAccountControlSettings Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The userAccountControlSettings \"\"\" if \"userAccountControlSettings\" in self._prop_dict:", "def browser_block_autofill(self, val): self._prop_dict[\"browserBlockAutofill\"] = val @property def browser_block_automatic_detection_of_intranet_sites(self): \"\"\" Gets and sets", "and sets the browserBlockAutofill Returns: bool: The browserBlockAutofill \"\"\" if \"browserBlockAutofill\" in self._prop_dict:", "the diagnosticsBlockDataSubmission Returns: bool: The diagnosticsBlockDataSubmission \"\"\" if \"diagnosticsBlockDataSubmission\" in self._prop_dict: return self._prop_dict[\"diagnosticsBlockDataSubmission\"]", "Returns: str: The browserLoggingReportLocation \"\"\" if \"browserLoggingReportLocation\" in self._prop_dict: return self._prop_dict[\"browserLoggingReportLocation\"] else: return", "accountsBlockAddingNonMicrosoftAccountEmail \"\"\" if \"accountsBlockAddingNonMicrosoftAccountEmail\" in self._prop_dict: return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else: return None @accounts_block_adding_non_microsoft_account_email.setter def", "if \"cellularBlockDataRoaming\" in self._prop_dict: return self._prop_dict[\"cellularBlockDataRoaming\"] else: return None @cellular_block_data_roaming.setter def cellular_block_data_roaming(self, val):", "else: return None @apply_only_to_windows81.setter def apply_only_to_windows81(self, val): self._prop_dict[\"applyOnlyToWindows81\"] = val @property def browser_block_autofill(self):", "Gets and sets the browserBlockPlugins Returns: bool: The browserBlockPlugins \"\"\" if \"browserBlockPlugins\" in", "val @property def browser_enterprise_mode_site_list_location(self): \"\"\" Gets and sets the browserEnterpriseModeSiteListLocation Returns: str: The", "\"\"\" if \"browserInternetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserInternetSecurityLevel\"] else : self._prop_dict[\"browserInternetSecurityLevel\"]", "= val @property def browser_require_firewall(self): \"\"\" Gets and sets the browserRequireFirewall Returns: bool:", "passwordPreviousPasswordBlockCount Returns: int: The passwordPreviousPasswordBlockCount \"\"\" if \"passwordPreviousPasswordBlockCount\" in self._prop_dict: return self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else:", "val): self._prop_dict[\"browserBlockJavaScript\"] = val @property def browser_block_plugins(self): \"\"\" Gets and sets the browserBlockPlugins", "and sets the browserLoggingReportLocation Returns: str: The browserLoggingReportLocation \"\"\" if \"browserLoggingReportLocation\" in self._prop_dict:", "browserIntranetSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserIntranetSecurityLevel \"\"\" if \"browserIntranetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase):", "self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else: return None @browser_require_high_security_for_restricted_sites.setter def browser_require_high_security_for_restricted_sites(self, val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] = val @property def", "The browserBlockPlugins \"\"\" if \"browserBlockPlugins\" in self._prop_dict: return self._prop_dict[\"browserBlockPlugins\"] else: return None @browser_block_plugins.setter", "return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] return None @browser_trusted_sites_security_level.setter def browser_trusted_sites_security_level(self, val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = val @property def", "self._prop_dict: return self._prop_dict[\"cellularBlockDataRoaming\"] else: return None @cellular_block_data_roaming.setter def cellular_block_data_roaming(self, val): self._prop_dict[\"cellularBlockDataRoaming\"] = val", "MIT License. See License in the project root for license information. # #", "in self._prop_dict: if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserIntranetSecurityLevel\"] else : self._prop_dict[\"browserIntranetSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return", "in self._prop_dict: return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else: return None @password_block_picture_password_and_pin.setter def password_block_picture_password_and_pin(self, val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] =", "self._prop_dict[\"browserRequireSmartScreen\"] = val @property def browser_enterprise_mode_site_list_location(self): \"\"\" Gets and sets the browserEnterpriseModeSiteListLocation Returns:", "browserBlockPlugins \"\"\" if \"browserBlockPlugins\" in self._prop_dict: return self._prop_dict[\"browserBlockPlugins\"] else: return None @browser_block_plugins.setter def", "val): self._prop_dict[\"browserBlockAutofill\"] = val @property def browser_block_automatic_detection_of_intranet_sites(self): \"\"\" Gets and sets the browserBlockAutomaticDetectionOfIntranetSites", "password_minimum_character_set_count(self, val): self._prop_dict[\"passwordMinimumCharacterSetCount\"] = val @property def password_previous_password_block_count(self): \"\"\" Gets and sets the", "val @property def work_folders_url(self): \"\"\" Gets and sets the workFoldersUrl Returns: str: The", "@property def work_folders_url(self): \"\"\" Gets and sets the workFoldersUrl Returns: str: The workFoldersUrl", "sets the browserBlockSendingDoNotTrackHeader Returns: bool: The browserBlockSendingDoNotTrackHeader \"\"\" if \"browserBlockSendingDoNotTrackHeader\" in self._prop_dict: return", "= val @property def updates_require_automatic_updates(self): \"\"\" Gets and sets the updatesRequireAutomaticUpdates Returns: bool:", "@browser_require_fraud_warning.setter def browser_require_fraud_warning(self, val): self._prop_dict[\"browserRequireFraudWarning\"] = val @property def browser_trusted_sites_security_level(self): \"\"\" Gets and", "\"workFoldersUrl\" in self._prop_dict: return self._prop_dict[\"workFoldersUrl\"] else: return None @work_folders_url.setter def work_folders_url(self, val): self._prop_dict[\"workFoldersUrl\"]", "\"\"\" Gets and sets the browserRequireSmartScreen Returns: bool: The browserRequireSmartScreen \"\"\" if \"browserRequireSmartScreen\"", "= val @property def password_minimum_character_set_count(self): \"\"\" Gets and sets the passwordMinimumCharacterSetCount Returns: int:", "self._prop_dict: return self._prop_dict[\"browserBlockJavaScript\"] else: return None @browser_block_java_script.setter def browser_block_java_script(self, val): self._prop_dict[\"browserBlockJavaScript\"] = val", "\"\"\" Gets and sets the browserRequireHighSecurityForRestrictedSites Returns: bool: The browserRequireHighSecurityForRestrictedSites \"\"\" if \"browserRequireHighSecurityForRestrictedSites\"", "bool: The browserBlockEnterpriseModeAccess \"\"\" if \"browserBlockEnterpriseModeAccess\" in self._prop_dict: return self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else: return None", "\"browserBlockJavaScript\" in self._prop_dict: return self._prop_dict[\"browserBlockJavaScript\"] else: return None @browser_block_java_script.setter def browser_block_java_script(self, val): self._prop_dict[\"browserBlockJavaScript\"]", "if \"browserIntranetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserIntranetSecurityLevel\"] else : self._prop_dict[\"browserIntranetSecurityLevel\"] =", "Returns: bool: The storageRequireDeviceEncryption \"\"\" if \"storageRequireDeviceEncryption\" in self._prop_dict: return self._prop_dict[\"storageRequireDeviceEncryption\"] else: return", "Returns: bool: The browserBlockAutomaticDetectionOfIntranetSites \"\"\" if \"browserBlockAutomaticDetectionOfIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else: return", "@updates_require_automatic_updates.setter def updates_require_automatic_updates(self, val): self._prop_dict[\"updatesRequireAutomaticUpdates\"] = val @property def user_account_control_settings(self): \"\"\" Gets and", "@property def browser_require_smart_screen(self): \"\"\" Gets and sets the browserRequireSmartScreen Returns: bool: The browserRequireSmartScreen", "val @property def browser_block_plugins(self): \"\"\" Gets and sets the browserBlockPlugins Returns: bool: The", "\"\"\" Gets and sets the passwordRequiredType Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The passwordRequiredType \"\"\" if \"passwordRequiredType\"", "\"\"\" if \"browserRequireFraudWarning\" in self._prop_dict: return self._prop_dict[\"browserRequireFraudWarning\"] else: return None @browser_require_fraud_warning.setter def browser_require_fraud_warning(self,", "file was generated and any changes will be overwritten. ''' from __future__ import", "@browser_block_sending_do_not_track_header.setter def browser_block_sending_do_not_track_header(self, val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] = val @property def browser_block_single_word_entry_on_intranet_sites(self): \"\"\" Gets and", "self._prop_dict: if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserInternetSecurityLevel\"] else : self._prop_dict[\"browserInternetSecurityLevel\"] = InternetSiteSecurityLevel(self._prop_dict[\"browserInternetSecurityLevel\"]) return self._prop_dict[\"browserInternetSecurityLevel\"]", "else: return None @password_previous_password_block_count.setter def password_previous_password_block_count(self, val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"] = val @property def password_required_type(self):", "int: The passwordPreviousPasswordBlockCount \"\"\" if \"passwordPreviousPasswordBlockCount\" in self._prop_dict: return self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else: return None", "return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else: return None @accounts_block_adding_non_microsoft_account_email.setter def accounts_block_adding_non_microsoft_account_email(self, val): self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] = val @property", "None @browser_require_high_security_for_restricted_sites.setter def browser_require_high_security_for_restricted_sites(self, val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] = val @property def browser_require_firewall(self): \"\"\" Gets", "WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return self._prop_dict[\"userAccountControlSettings\"] return None @user_account_control_settings.setter def user_account_control_settings(self, val): self._prop_dict[\"userAccountControlSettings\"] = val @property", "bool: The browserBlockAutomaticDetectionOfIntranetSites \"\"\" if \"browserBlockAutomaticDetectionOfIntranetSites\" in self._prop_dict: return self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] else: return None", "self._prop_dict[\"browserLoggingReportLocation\"] = val @property def browser_require_high_security_for_restricted_sites(self): \"\"\" Gets and sets the browserRequireHighSecurityForRestrictedSites Returns:", "Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See", "Gets and sets the passwordMinimumLength Returns: int: The passwordMinimumLength \"\"\" if \"passwordMinimumLength\" in", ": self._prop_dict[\"userAccountControlSettings\"] = WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return self._prop_dict[\"userAccountControlSettings\"] return None @user_account_control_settings.setter def user_account_control_settings(self, val): self._prop_dict[\"userAccountControlSettings\"]", "in self._prop_dict: return self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else: return None @password_previous_password_block_count.setter def password_previous_password_block_count(self, val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"] =", "val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] = val @property def browser_block_enterprise_mode_access(self): \"\"\" Gets and sets the browserBlockEnterpriseModeAccess", "bool: The cellularBlockDataRoaming \"\"\" if \"cellularBlockDataRoaming\" in self._prop_dict: return self._prop_dict[\"cellularBlockDataRoaming\"] else: return None", "\"passwordPreviousPasswordBlockCount\" in self._prop_dict: return self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else: return None @password_previous_password_block_count.setter def password_previous_password_block_count(self, val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"]", "user_account_control_settings(self): \"\"\" Gets and sets the userAccountControlSettings Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The userAccountControlSettings \"\"\" if", "in self._prop_dict: return self._prop_dict[\"cellularBlockDataRoaming\"] else: return None @cellular_block_data_roaming.setter def cellular_block_data_roaming(self, val): self._prop_dict[\"cellularBlockDataRoaming\"] =", "updatesRequireAutomaticUpdates \"\"\" if \"updatesRequireAutomaticUpdates\" in self._prop_dict: return self._prop_dict[\"updatesRequireAutomaticUpdates\"] else: return None @updates_require_automatic_updates.setter def", "\"passwordRequiredType\" in self._prop_dict: if isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase): return self._prop_dict[\"passwordRequiredType\"] else : self._prop_dict[\"passwordRequiredType\"] = RequiredPasswordType(self._prop_dict[\"passwordRequiredType\"])", "\"\"\" Gets and sets the storageRequireDeviceEncryption Returns: bool: The storageRequireDeviceEncryption \"\"\" if \"storageRequireDeviceEncryption\"", "and sets the browserRequireFirewall Returns: bool: The browserRequireFirewall \"\"\" if \"browserRequireFirewall\" in self._prop_dict:", "\"\"\" if \"passwordExpirationDays\" in self._prop_dict: return self._prop_dict[\"passwordExpirationDays\"] else: return None @password_expiration_days.setter def password_expiration_days(self,", ":class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserTrustedSitesSecurityLevel \"\"\" if \"browserTrustedSitesSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserTrustedSitesSecurityLevel\"]", "and any changes will be overwritten. ''' from __future__ import unicode_literals from ..model.internet_site_security_level", "browser_block_automatic_detection_of_intranet_sites(self, val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] = val @property def browser_block_enterprise_mode_access(self): \"\"\" Gets and sets the", "Gets and sets the browserBlockJavaScript Returns: bool: The browserBlockJavaScript \"\"\" if \"browserBlockJavaScript\" in", "@property def browser_block_single_word_entry_on_intranet_sites(self): \"\"\" Gets and sets the browserBlockSingleWordEntryOnIntranetSites Returns: bool: The browserBlockSingleWordEntryOnIntranetSites", "passwordPreviousPasswordBlockCount \"\"\" if \"passwordPreviousPasswordBlockCount\" in self._prop_dict: return self._prop_dict[\"passwordPreviousPasswordBlockCount\"] else: return None @password_previous_password_block_count.setter def", "def browser_block_popups(self, val): self._prop_dict[\"browserBlockPopups\"] = val @property def browser_block_sending_do_not_track_header(self): \"\"\" Gets and sets", "val @property def browser_require_fraud_warning(self): \"\"\" Gets and sets the browserRequireFraudWarning Returns: bool: The", "browser_block_sending_do_not_track_header(self, val): self._prop_dict[\"browserBlockSendingDoNotTrackHeader\"] = val @property def browser_block_single_word_entry_on_intranet_sites(self): \"\"\" Gets and sets the", "val @property def password_previous_password_block_count(self): \"\"\" Gets and sets the passwordPreviousPasswordBlockCount Returns: int: The", "and sets the workFoldersUrl Returns: str: The workFoldersUrl \"\"\" if \"workFoldersUrl\" in self._prop_dict:", "import InternetSiteSecurityLevel from ..model.site_security_level import SiteSecurityLevel from ..model.required_password_type import RequiredPasswordType from ..model.windows_user_account_control_settings import", "int: The passwordMinutesOfInactivityBeforeScreenTimeout \"\"\" if \"passwordMinutesOfInactivityBeforeScreenTimeout\" in self._prop_dict: return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else: return None", "the MIT License. See License in the project root for license information. #", "sets the updatesRequireAutomaticUpdates Returns: bool: The updatesRequireAutomaticUpdates \"\"\" if \"updatesRequireAutomaticUpdates\" in self._prop_dict: return", "Returns: bool: The browserRequireFraudWarning \"\"\" if \"browserRequireFraudWarning\" in self._prop_dict: return self._prop_dict[\"browserRequireFraudWarning\"] else: return", "Returns: :class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`: The userAccountControlSettings \"\"\" if \"userAccountControlSettings\" in self._prop_dict: if isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase): return", "None @password_expiration_days.setter def password_expiration_days(self, val): self._prop_dict[\"passwordExpirationDays\"] = val @property def password_minimum_length(self): \"\"\" Gets", "return self._prop_dict[\"passwordMinimumCharacterSetCount\"] else: return None @password_minimum_character_set_count.setter def password_minimum_character_set_count(self, val): self._prop_dict[\"passwordMinimumCharacterSetCount\"] = val @property", "isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase): return self._prop_dict[\"userAccountControlSettings\"] else : self._prop_dict[\"userAccountControlSettings\"] = WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return self._prop_dict[\"userAccountControlSettings\"] return None", "\"browserBlockEnterpriseModeAccess\" in self._prop_dict: return self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else: return None @browser_block_enterprise_mode_access.setter def browser_block_enterprise_mode_access(self, val): self._prop_dict[\"browserBlockEnterpriseModeAccess\"]", "applyOnlyToWindows81 \"\"\" if \"applyOnlyToWindows81\" in self._prop_dict: return self._prop_dict[\"applyOnlyToWindows81\"] else: return None @apply_only_to_windows81.setter def", "@property def browser_intranet_security_level(self): \"\"\" Gets and sets the browserIntranetSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserIntranetSecurityLevel", "val): self._prop_dict[\"browserLoggingReportLocation\"] = val @property def browser_require_high_security_for_restricted_sites(self): \"\"\" Gets and sets the browserRequireHighSecurityForRestrictedSites", "def browser_require_high_security_for_restricted_sites(self, val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] = val @property def browser_require_firewall(self): \"\"\" Gets and sets", "val @property def browser_require_smart_screen(self): \"\"\" Gets and sets the browserRequireSmartScreen Returns: bool: The", "val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"] = val @property def password_required_type(self): \"\"\" Gets and sets the passwordRequiredType", "diagnostics_block_data_submission(self): \"\"\" Gets and sets the diagnosticsBlockDataSubmission Returns: bool: The diagnosticsBlockDataSubmission \"\"\" if", "\"browserBlockPlugins\" in self._prop_dict: return self._prop_dict[\"browserBlockPlugins\"] else: return None @browser_block_plugins.setter def browser_block_plugins(self, val): self._prop_dict[\"browserBlockPlugins\"]", "None @browser_block_single_word_entry_on_intranet_sites.setter def browser_block_single_word_entry_on_intranet_sites(self, val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] = val @property def browser_require_smart_screen(self): \"\"\" Gets", "\"\"\" if \"applyOnlyToWindows81\" in self._prop_dict: return self._prop_dict[\"applyOnlyToWindows81\"] else: return None @apply_only_to_windows81.setter def apply_only_to_windows81(self,", "\"\"\" Gets and sets the browserBlockPlugins Returns: bool: The browserBlockPlugins \"\"\" if \"browserBlockPlugins\"", "self._prop_dict[\"browserRequireFraudWarning\"] = val @property def browser_trusted_sites_security_level(self): \"\"\" Gets and sets the browserTrustedSitesSecurityLevel Returns:", "None @password_previous_password_block_count.setter def password_previous_password_block_count(self, val): self._prop_dict[\"passwordPreviousPasswordBlockCount\"] = val @property def password_required_type(self): \"\"\" Gets", "return self._prop_dict[\"diagnosticsBlockDataSubmission\"] else: return None @diagnostics_block_data_submission.setter def diagnostics_block_data_submission(self, val): self._prop_dict[\"diagnosticsBlockDataSubmission\"] = val @property", "self._prop_dict[\"browserBlockAutofill\"] = val @property def browser_block_automatic_detection_of_intranet_sites(self): \"\"\" Gets and sets the browserBlockAutomaticDetectionOfIntranetSites Returns:", "val @property def password_expiration_days(self): \"\"\" Gets and sets the passwordExpirationDays Returns: int: The", "passwordRequiredType Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The passwordRequiredType \"\"\" if \"passwordRequiredType\" in self._prop_dict: if isinstance(self._prop_dict[\"passwordRequiredType\"], OneDriveObjectBase):", "the browserBlockPopups Returns: bool: The browserBlockPopups \"\"\" if \"browserBlockPopups\" in self._prop_dict: return self._prop_dict[\"browserBlockPopups\"]", "@browser_block_automatic_detection_of_intranet_sites.setter def browser_block_automatic_detection_of_intranet_sites(self, val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] = val @property def browser_block_enterprise_mode_access(self): \"\"\" Gets and", "Returns: bool: The applyOnlyToWindows81 \"\"\" if \"applyOnlyToWindows81\" in self._prop_dict: return self._prop_dict[\"applyOnlyToWindows81\"] else: return", "bool: The updatesRequireAutomaticUpdates \"\"\" if \"updatesRequireAutomaticUpdates\" in self._prop_dict: return self._prop_dict[\"updatesRequireAutomaticUpdates\"] else: return None", "browserRequireFraudWarning \"\"\" if \"browserRequireFraudWarning\" in self._prop_dict: return self._prop_dict[\"browserRequireFraudWarning\"] else: return None @browser_require_fraud_warning.setter def", "val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] = val @property def browser_require_smart_screen(self): \"\"\" Gets and sets the browserRequireSmartScreen", "browser_block_automatic_detection_of_intranet_sites(self): \"\"\" Gets and sets the browserBlockAutomaticDetectionOfIntranetSites Returns: bool: The browserBlockAutomaticDetectionOfIntranetSites \"\"\" if", "import RequiredPasswordType from ..model.windows_user_account_control_settings import WindowsUserAccountControlSettings from ..one_drive_object_base import OneDriveObjectBase class Windows81GeneralConfiguration(OneDriveObjectBase): def", "return self._prop_dict[\"browserRequireFraudWarning\"] else: return None @browser_require_fraud_warning.setter def browser_require_fraud_warning(self, val): self._prop_dict[\"browserRequireFraudWarning\"] = val @property", "@password_expiration_days.setter def password_expiration_days(self, val): self._prop_dict[\"passwordExpirationDays\"] = val @property def password_minimum_length(self): \"\"\" Gets and", "self._prop_dict[\"passwordMinimumCharacterSetCount\"] = val @property def password_previous_password_block_count(self): \"\"\" Gets and sets the passwordPreviousPasswordBlockCount Returns:", "None @password_block_picture_password_and_pin.setter def password_block_picture_password_and_pin(self, val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] = val @property def password_expiration_days(self): \"\"\" Gets", "def password_required_type(self, val): self._prop_dict[\"passwordRequiredType\"] = val @property def password_sign_in_failure_count_before_factory_reset(self): \"\"\" Gets and sets", "None @browser_block_java_script.setter def browser_block_java_script(self, val): self._prop_dict[\"browserBlockJavaScript\"] = val @property def browser_block_plugins(self): \"\"\" Gets", "if \"passwordMinutesOfInactivityBeforeScreenTimeout\" in self._prop_dict: return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else: return None @password_minutes_of_inactivity_before_screen_timeout.setter def password_minutes_of_inactivity_before_screen_timeout(self, val):", "Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserTrustedSitesSecurityLevel \"\"\" if \"browserTrustedSitesSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase): return", "Returns: bool: The accountsBlockAddingNonMicrosoftAccountEmail \"\"\" if \"accountsBlockAddingNonMicrosoftAccountEmail\" in self._prop_dict: return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else: return", "from ..model.windows_user_account_control_settings import WindowsUserAccountControlSettings from ..one_drive_object_base import OneDriveObjectBase class Windows81GeneralConfiguration(OneDriveObjectBase): def __init__(self, prop_dict={}):", "@property def browser_logging_report_location(self): \"\"\" Gets and sets the browserLoggingReportLocation Returns: str: The browserLoggingReportLocation", "return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else: return None @password_sign_in_failure_count_before_factory_reset.setter def password_sign_in_failure_count_before_factory_reset(self, val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] = val @property", "else: return None @browser_block_single_word_entry_on_intranet_sites.setter def browser_block_single_word_entry_on_intranet_sites(self, val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] = val @property def browser_require_smart_screen(self):", "self._prop_dict[\"browserIntranetSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return self._prop_dict[\"browserIntranetSecurityLevel\"] return None @browser_intranet_security_level.setter def browser_intranet_security_level(self, val): self._prop_dict[\"browserIntranetSecurityLevel\"] =", "in self._prop_dict: return self._prop_dict[\"applyOnlyToWindows81\"] else: return None @apply_only_to_windows81.setter def apply_only_to_windows81(self, val): self._prop_dict[\"applyOnlyToWindows81\"] =", "val): self._prop_dict[\"browserRequireSmartScreen\"] = val @property def browser_enterprise_mode_site_list_location(self): \"\"\" Gets and sets the browserEnterpriseModeSiteListLocation", "password_required_type(self, val): self._prop_dict[\"passwordRequiredType\"] = val @property def password_sign_in_failure_count_before_factory_reset(self): \"\"\" Gets and sets the", "passwordExpirationDays Returns: int: The passwordExpirationDays \"\"\" if \"passwordExpirationDays\" in self._prop_dict: return self._prop_dict[\"passwordExpirationDays\"] else:", "else: return None @password_minimum_character_set_count.setter def password_minimum_character_set_count(self, val): self._prop_dict[\"passwordMinimumCharacterSetCount\"] = val @property def password_previous_password_block_count(self):", "def password_minimum_character_set_count(self, val): self._prop_dict[\"passwordMinimumCharacterSetCount\"] = val @property def password_previous_password_block_count(self): \"\"\" Gets and sets", "return self._prop_dict[\"passwordMinimumLength\"] else: return None @password_minimum_length.setter def password_minimum_length(self, val): self._prop_dict[\"passwordMinimumLength\"] = val @property", "self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] = val @property def storage_require_device_encryption(self): \"\"\" Gets and sets the storageRequireDeviceEncryption Returns:", "Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The browserInternetSecurityLevel \"\"\" if \"browserInternetSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserInternetSecurityLevel\"], OneDriveObjectBase): return", "val @property def browser_block_autofill(self): \"\"\" Gets and sets the browserBlockAutofill Returns: bool: The", "Returns: int: The passwordExpirationDays \"\"\" if \"passwordExpirationDays\" in self._prop_dict: return self._prop_dict[\"passwordExpirationDays\"] else: return", "The applyOnlyToWindows81 \"\"\" if \"applyOnlyToWindows81\" in self._prop_dict: return self._prop_dict[\"applyOnlyToWindows81\"] else: return None @apply_only_to_windows81.setter", "def browser_block_autofill(self): \"\"\" Gets and sets the browserBlockAutofill Returns: bool: The browserBlockAutofill \"\"\"", "def diagnostics_block_data_submission(self): \"\"\" Gets and sets the diagnosticsBlockDataSubmission Returns: bool: The diagnosticsBlockDataSubmission \"\"\"", "Gets and sets the browserRequireSmartScreen Returns: bool: The browserRequireSmartScreen \"\"\" if \"browserRequireSmartScreen\" in", "and sets the cellularBlockDataRoaming Returns: bool: The cellularBlockDataRoaming \"\"\" if \"cellularBlockDataRoaming\" in self._prop_dict:", "return self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] else: return None @password_block_picture_password_and_pin.setter def password_block_picture_password_and_pin(self, val): self._prop_dict[\"passwordBlockPicturePasswordAndPin\"] = val @property", "any changes will be overwritten. ''' from __future__ import unicode_literals from ..model.internet_site_security_level import", "and sets the accountsBlockAddingNonMicrosoftAccountEmail Returns: bool: The accountsBlockAddingNonMicrosoftAccountEmail \"\"\" if \"accountsBlockAddingNonMicrosoftAccountEmail\" in self._prop_dict:", "return None @storage_require_device_encryption.setter def storage_require_device_encryption(self, val): self._prop_dict[\"storageRequireDeviceEncryption\"] = val @property def updates_require_automatic_updates(self): \"\"\"", "in self._prop_dict: return self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else: return None @password_sign_in_failure_count_before_factory_reset.setter def password_sign_in_failure_count_before_factory_reset(self, val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] =", "if \"browserLoggingReportLocation\" in self._prop_dict: return self._prop_dict[\"browserLoggingReportLocation\"] else: return None @browser_logging_report_location.setter def browser_logging_report_location(self, val):", "browser_require_firewall(self, val): self._prop_dict[\"browserRequireFirewall\"] = val @property def browser_require_fraud_warning(self): \"\"\" Gets and sets the", "@browser_block_single_word_entry_on_intranet_sites.setter def browser_block_single_word_entry_on_intranet_sites(self, val): self._prop_dict[\"browserBlockSingleWordEntryOnIntranetSites\"] = val @property def browser_require_smart_screen(self): \"\"\" Gets and", "if \"passwordMinimumCharacterSetCount\" in self._prop_dict: return self._prop_dict[\"passwordMinimumCharacterSetCount\"] else: return None @password_minimum_character_set_count.setter def password_minimum_character_set_count(self, val):", "Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in", "Gets and sets the passwordRequiredType Returns: :class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`: The passwordRequiredType \"\"\" if \"passwordRequiredType\" in", "@property def browser_block_popups(self): \"\"\" Gets and sets the browserBlockPopups Returns: bool: The browserBlockPopups", "val @property def password_minimum_character_set_count(self): \"\"\" Gets and sets the passwordMinimumCharacterSetCount Returns: int: The", "browser_intranet_security_level(self): \"\"\" Gets and sets the browserIntranetSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserIntranetSecurityLevel \"\"\" if", "None @user_account_control_settings.setter def user_account_control_settings(self, val): self._prop_dict[\"userAccountControlSettings\"] = val @property def work_folders_url(self): \"\"\" Gets", "browserRequireSmartScreen Returns: bool: The browserRequireSmartScreen \"\"\" if \"browserRequireSmartScreen\" in self._prop_dict: return self._prop_dict[\"browserRequireSmartScreen\"] else:", "@property def browser_internet_security_level(self): \"\"\" Gets and sets the browserInternetSecurityLevel Returns: :class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`: The browserInternetSecurityLevel", "def browser_require_fraud_warning(self): \"\"\" Gets and sets the browserRequireFraudWarning Returns: bool: The browserRequireFraudWarning \"\"\"", "and sets the browserRequireSmartScreen Returns: bool: The browserRequireSmartScreen \"\"\" if \"browserRequireSmartScreen\" in self._prop_dict:", "return None @browser_intranet_security_level.setter def browser_intranet_security_level(self, val): self._prop_dict[\"browserIntranetSecurityLevel\"] = val @property def browser_logging_report_location(self): \"\"\"", "sets the browserBlockJavaScript Returns: bool: The browserBlockJavaScript \"\"\" if \"browserBlockJavaScript\" in self._prop_dict: return", "sets the browserBlockAutomaticDetectionOfIntranetSites Returns: bool: The browserBlockAutomaticDetectionOfIntranetSites \"\"\" if \"browserBlockAutomaticDetectionOfIntranetSites\" in self._prop_dict: return", "if \"workFoldersUrl\" in self._prop_dict: return self._prop_dict[\"workFoldersUrl\"] else: return None @work_folders_url.setter def work_folders_url(self, val):", "storage_require_device_encryption(self, val): self._prop_dict[\"storageRequireDeviceEncryption\"] = val @property def updates_require_automatic_updates(self): \"\"\" Gets and sets the", "Returns: bool: The browserBlockEnterpriseModeAccess \"\"\" if \"browserBlockEnterpriseModeAccess\" in self._prop_dict: return self._prop_dict[\"browserBlockEnterpriseModeAccess\"] else: return", "None @browser_block_autofill.setter def browser_block_autofill(self, val): self._prop_dict[\"browserBlockAutofill\"] = val @property def browser_block_automatic_detection_of_intranet_sites(self): \"\"\" Gets", "val @property def diagnostics_block_data_submission(self): \"\"\" Gets and sets the diagnosticsBlockDataSubmission Returns: bool: The", "browser_internet_security_level(self, val): self._prop_dict[\"browserInternetSecurityLevel\"] = val @property def browser_intranet_security_level(self): \"\"\" Gets and sets the", "Windows81GeneralConfiguration(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict = prop_dict @property def accounts_block_adding_non_microsoft_account_email(self): \"\"\" Gets and", "if \"browserEnterpriseModeSiteListLocation\" in self._prop_dict: return self._prop_dict[\"browserEnterpriseModeSiteListLocation\"] else: return None @browser_enterprise_mode_site_list_location.setter def browser_enterprise_mode_site_list_location(self, val):", "browserRequireFirewall Returns: bool: The browserRequireFirewall \"\"\" if \"browserRequireFirewall\" in self._prop_dict: return self._prop_dict[\"browserRequireFirewall\"] else:", "if \"browserTrustedSitesSecurityLevel\" in self._prop_dict: if isinstance(self._prop_dict[\"browserTrustedSitesSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserTrustedSitesSecurityLevel\"] else : self._prop_dict[\"browserTrustedSitesSecurityLevel\"] =", "self._prop_dict: if isinstance(self._prop_dict[\"browserIntranetSecurityLevel\"], OneDriveObjectBase): return self._prop_dict[\"browserIntranetSecurityLevel\"] else : self._prop_dict[\"browserIntranetSecurityLevel\"] = SiteSecurityLevel(self._prop_dict[\"browserIntranetSecurityLevel\"]) return self._prop_dict[\"browserIntranetSecurityLevel\"]", "and sets the browserBlockSendingDoNotTrackHeader Returns: bool: The browserBlockSendingDoNotTrackHeader \"\"\" if \"browserBlockSendingDoNotTrackHeader\" in self._prop_dict:", "self._prop_dict[\"applyOnlyToWindows81\"] = val @property def browser_block_autofill(self): \"\"\" Gets and sets the browserBlockAutofill Returns:", "int: The passwordMinimumCharacterSetCount \"\"\" if \"passwordMinimumCharacterSetCount\" in self._prop_dict: return self._prop_dict[\"passwordMinimumCharacterSetCount\"] else: return None", "and sets the browserRequireFraudWarning Returns: bool: The browserRequireFraudWarning \"\"\" if \"browserRequireFraudWarning\" in self._prop_dict:", "\"\"\" Gets and sets the passwordBlockPicturePasswordAndPin Returns: bool: The passwordBlockPicturePasswordAndPin \"\"\" if \"passwordBlockPicturePasswordAndPin\"", "self._prop_dict: return self._prop_dict[\"updatesRequireAutomaticUpdates\"] else: return None @updates_require_automatic_updates.setter def updates_require_automatic_updates(self, val): self._prop_dict[\"updatesRequireAutomaticUpdates\"] = val", "in self._prop_dict: return self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] else: return None @browser_require_high_security_for_restricted_sites.setter def browser_require_high_security_for_restricted_sites(self, val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] =", "@user_account_control_settings.setter def user_account_control_settings(self, val): self._prop_dict[\"userAccountControlSettings\"] = val @property def work_folders_url(self): \"\"\" Gets and", "bool: The browserRequireFirewall \"\"\" if \"browserRequireFirewall\" in self._prop_dict: return self._prop_dict[\"browserRequireFirewall\"] else: return None", "..one_drive_object_base import OneDriveObjectBase class Windows81GeneralConfiguration(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict = prop_dict @property def", "self._prop_dict[\"cellularBlockDataRoaming\"] = val @property def diagnostics_block_data_submission(self): \"\"\" Gets and sets the diagnosticsBlockDataSubmission Returns:", "the browserRequireSmartScreen Returns: bool: The browserRequireSmartScreen \"\"\" if \"browserRequireSmartScreen\" in self._prop_dict: return self._prop_dict[\"browserRequireSmartScreen\"]", "def accounts_block_adding_non_microsoft_account_email(self): \"\"\" Gets and sets the accountsBlockAddingNonMicrosoftAccountEmail Returns: bool: The accountsBlockAddingNonMicrosoftAccountEmail \"\"\"", "and sets the diagnosticsBlockDataSubmission Returns: bool: The diagnosticsBlockDataSubmission \"\"\" if \"diagnosticsBlockDataSubmission\" in self._prop_dict:", "val): self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] = val @property def password_minimum_character_set_count(self): \"\"\" Gets and sets the passwordMinimumCharacterSetCount", "self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] else: return None @password_sign_in_failure_count_before_factory_reset.setter def password_sign_in_failure_count_before_factory_reset(self, val): self._prop_dict[\"passwordSignInFailureCountBeforeFactoryReset\"] = val @property def", "\"\"\" Gets and sets the passwordMinimumLength Returns: int: The passwordMinimumLength \"\"\" if \"passwordMinimumLength\"", "The passwordMinutesOfInactivityBeforeScreenTimeout \"\"\" if \"passwordMinutesOfInactivityBeforeScreenTimeout\" in self._prop_dict: return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else: return None @password_minutes_of_inactivity_before_screen_timeout.setter", "def browser_trusted_sites_security_level(self): \"\"\" Gets and sets the browserTrustedSitesSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserTrustedSitesSecurityLevel \"\"\"", "@browser_trusted_sites_security_level.setter def browser_trusted_sites_security_level(self, val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = val @property def cellular_block_data_roaming(self): \"\"\" Gets and", "self._prop_dict: return self._prop_dict[\"browserBlockPlugins\"] else: return None @browser_block_plugins.setter def browser_block_plugins(self, val): self._prop_dict[\"browserBlockPlugins\"] = val", "return self._prop_dict[\"updatesRequireAutomaticUpdates\"] else: return None @updates_require_automatic_updates.setter def updates_require_automatic_updates(self, val): self._prop_dict[\"updatesRequireAutomaticUpdates\"] = val @property", "self._prop_dict: if isinstance(self._prop_dict[\"userAccountControlSettings\"], OneDriveObjectBase): return self._prop_dict[\"userAccountControlSettings\"] else : self._prop_dict[\"userAccountControlSettings\"] = WindowsUserAccountControlSettings(self._prop_dict[\"userAccountControlSettings\"]) return self._prop_dict[\"userAccountControlSettings\"]", "passwordMinutesOfInactivityBeforeScreenTimeout \"\"\" if \"passwordMinutesOfInactivityBeforeScreenTimeout\" in self._prop_dict: return self._prop_dict[\"passwordMinutesOfInactivityBeforeScreenTimeout\"] else: return None @password_minutes_of_inactivity_before_screen_timeout.setter def", "self._prop_dict[\"passwordRequiredType\"] return None @password_required_type.setter def password_required_type(self, val): self._prop_dict[\"passwordRequiredType\"] = val @property def password_sign_in_failure_count_before_factory_reset(self):", "def browser_intranet_security_level(self): \"\"\" Gets and sets the browserIntranetSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserIntranetSecurityLevel \"\"\"", "return None @browser_trusted_sites_security_level.setter def browser_trusted_sites_security_level(self, val): self._prop_dict[\"browserTrustedSitesSecurityLevel\"] = val @property def cellular_block_data_roaming(self): \"\"\"", "bool: The applyOnlyToWindows81 \"\"\" if \"applyOnlyToWindows81\" in self._prop_dict: return self._prop_dict[\"applyOnlyToWindows81\"] else: return None", "else: return None @browser_block_automatic_detection_of_intranet_sites.setter def browser_block_automatic_detection_of_intranet_sites(self, val): self._prop_dict[\"browserBlockAutomaticDetectionOfIntranetSites\"] = val @property def browser_block_enterprise_mode_access(self):", "self._prop_dict[\"browserInternetSecurityLevel\"] = val @property def browser_intranet_security_level(self): \"\"\" Gets and sets the browserIntranetSecurityLevel Returns:", "the project root for license information. # # This file was generated and", "= val @property def password_block_picture_password_and_pin(self): \"\"\" Gets and sets the passwordBlockPicturePasswordAndPin Returns: bool:", "val): self._prop_dict[\"browserInternetSecurityLevel\"] = val @property def browser_intranet_security_level(self): \"\"\" Gets and sets the browserIntranetSecurityLevel", "def browser_internet_security_level(self, val): self._prop_dict[\"browserInternetSecurityLevel\"] = val @property def browser_intranet_security_level(self): \"\"\" Gets and sets", "Gets and sets the browserIntranetSecurityLevel Returns: :class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`: The browserIntranetSecurityLevel \"\"\" if \"browserIntranetSecurityLevel\" in", "int: The passwordMinimumLength \"\"\" if \"passwordMinimumLength\" in self._prop_dict: return self._prop_dict[\"passwordMinimumLength\"] else: return None", "return None @password_minimum_length.setter def password_minimum_length(self, val): self._prop_dict[\"passwordMinimumLength\"] = val @property def password_minutes_of_inactivity_before_screen_timeout(self): \"\"\"", "\"\"\" if \"accountsBlockAddingNonMicrosoftAccountEmail\" in self._prop_dict: return self._prop_dict[\"accountsBlockAddingNonMicrosoftAccountEmail\"] else: return None @accounts_block_adding_non_microsoft_account_email.setter def accounts_block_adding_non_microsoft_account_email(self,", "else: return None @browser_require_high_security_for_restricted_sites.setter def browser_require_high_security_for_restricted_sites(self, val): self._prop_dict[\"browserRequireHighSecurityForRestrictedSites\"] = val @property def browser_require_firewall(self):", "in self._prop_dict: return self._prop_dict[\"diagnosticsBlockDataSubmission\"] else: return None @diagnostics_block_data_submission.setter def diagnostics_block_data_submission(self, val): self._prop_dict[\"diagnosticsBlockDataSubmission\"] =" ]
[ "= json.dumps(data) if isinstance(proxy, bytes): proxy = proxy.decode() return proxy elif format ==", "elif format == \"toml\": return toml.dumps(data) elif format == \"yaml\": return yaml.dump(data) elif", "{group.upper()}\\n\") for extras, version in values.items(): result.append(f\"{extras}=={version}\\n\") return \"\".join(result) def formatter(data: dict, format:", "str: if format == \"json\": proxy = json.dumps(data) if isinstance(proxy, bytes): proxy =", "result.append(f\"### {group.upper()}\\n\") for extras, version in values.items(): result.append(f\"{extras}=={version}\\n\") return \"\".join(result) def formatter(data: dict,", "json except ImportError: import json import toml import yaml def format_requirements(data: dict) ->", "toml import yaml def format_requirements(data: dict) -> str: result = [] for group,", "for extras, version in values.items(): result.append(f\"{extras}=={version}\\n\") return \"\".join(result) def formatter(data: dict, format: str)", "def formatter(data: dict, format: str) -> str: if format == \"json\": proxy =", "return proxy elif format == \"toml\": return toml.dumps(data) elif format == \"yaml\": return", "values.items(): result.append(f\"{extras}=={version}\\n\") return \"\".join(result) def formatter(data: dict, format: str) -> str: if format", "format: str) -> str: if format == \"json\": proxy = json.dumps(data) if isinstance(proxy,", "for group, values in data.items(): result.append(f\"### {group.upper()}\\n\") for extras, version in values.items(): result.append(f\"{extras}=={version}\\n\")", "json.dumps(data) if isinstance(proxy, bytes): proxy = proxy.decode() return proxy elif format == \"toml\":", "in data.items(): result.append(f\"### {group.upper()}\\n\") for extras, version in values.items(): result.append(f\"{extras}=={version}\\n\") return \"\".join(result) def", "[] for group, values in data.items(): result.append(f\"### {group.upper()}\\n\") for extras, version in values.items():", "def format_requirements(data: dict) -> str: result = [] for group, values in data.items():", "import orjson as json except ImportError: import json import toml import yaml def", "import yaml def format_requirements(data: dict) -> str: result = [] for group, values", "group, values in data.items(): result.append(f\"### {group.upper()}\\n\") for extras, version in values.items(): result.append(f\"{extras}=={version}\\n\") return", "proxy elif format == \"toml\": return toml.dumps(data) elif format == \"yaml\": return yaml.dump(data)", "\"yaml\": return yaml.dump(data) elif format == \"requirements.txt\": return format_requirements(data) else: raise TypeError(f\"Invalid format", "str: result = [] for group, values in data.items(): result.append(f\"### {group.upper()}\\n\") for extras,", "str) -> str: if format == \"json\": proxy = json.dumps(data) if isinstance(proxy, bytes):", "except ImportError: import json import toml import yaml def format_requirements(data: dict) -> str:", "-> str: if format == \"json\": proxy = json.dumps(data) if isinstance(proxy, bytes): proxy", "if format == \"json\": proxy = json.dumps(data) if isinstance(proxy, bytes): proxy = proxy.decode()", "values in data.items(): result.append(f\"### {group.upper()}\\n\") for extras, version in values.items(): result.append(f\"{extras}=={version}\\n\") return \"\".join(result)", "json import toml import yaml def format_requirements(data: dict) -> str: result = []", "orjson as json except ImportError: import json import toml import yaml def format_requirements(data:", "format == \"json\": proxy = json.dumps(data) if isinstance(proxy, bytes): proxy = proxy.decode() return", "in values.items(): result.append(f\"{extras}=={version}\\n\") return \"\".join(result) def formatter(data: dict, format: str) -> str: if", "formatter(data: dict, format: str) -> str: if format == \"json\": proxy = json.dumps(data)", "bytes): proxy = proxy.decode() return proxy elif format == \"toml\": return toml.dumps(data) elif", "as json except ImportError: import json import toml import yaml def format_requirements(data: dict)", "result = [] for group, values in data.items(): result.append(f\"### {group.upper()}\\n\") for extras, version", "proxy = proxy.decode() return proxy elif format == \"toml\": return toml.dumps(data) elif format", "try: import orjson as json except ImportError: import json import toml import yaml", "= [] for group, values in data.items(): result.append(f\"### {group.upper()}\\n\") for extras, version in", "format == \"yaml\": return yaml.dump(data) elif format == \"requirements.txt\": return format_requirements(data) else: raise", "data.items(): result.append(f\"### {group.upper()}\\n\") for extras, version in values.items(): result.append(f\"{extras}=={version}\\n\") return \"\".join(result) def formatter(data:", "return yaml.dump(data) elif format == \"requirements.txt\": return format_requirements(data) else: raise TypeError(f\"Invalid format {format}\")", "import json import toml import yaml def format_requirements(data: dict) -> str: result =", "\"toml\": return toml.dumps(data) elif format == \"yaml\": return yaml.dump(data) elif format == \"requirements.txt\":", "format_requirements(data: dict) -> str: result = [] for group, values in data.items(): result.append(f\"###", "yaml def format_requirements(data: dict) -> str: result = [] for group, values in", "= proxy.decode() return proxy elif format == \"toml\": return toml.dumps(data) elif format ==", "proxy.decode() return proxy elif format == \"toml\": return toml.dumps(data) elif format == \"yaml\":", "-> str: result = [] for group, values in data.items(): result.append(f\"### {group.upper()}\\n\") for", "format == \"toml\": return toml.dumps(data) elif format == \"yaml\": return yaml.dump(data) elif format", "version in values.items(): result.append(f\"{extras}=={version}\\n\") return \"\".join(result) def formatter(data: dict, format: str) -> str:", "== \"yaml\": return yaml.dump(data) elif format == \"requirements.txt\": return format_requirements(data) else: raise TypeError(f\"Invalid", "isinstance(proxy, bytes): proxy = proxy.decode() return proxy elif format == \"toml\": return toml.dumps(data)", "toml.dumps(data) elif format == \"yaml\": return yaml.dump(data) elif format == \"requirements.txt\": return format_requirements(data)", "dict) -> str: result = [] for group, values in data.items(): result.append(f\"### {group.upper()}\\n\")", "if isinstance(proxy, bytes): proxy = proxy.decode() return proxy elif format == \"toml\": return", "result.append(f\"{extras}=={version}\\n\") return \"\".join(result) def formatter(data: dict, format: str) -> str: if format ==", "== \"toml\": return toml.dumps(data) elif format == \"yaml\": return yaml.dump(data) elif format ==", "extras, version in values.items(): result.append(f\"{extras}=={version}\\n\") return \"\".join(result) def formatter(data: dict, format: str) ->", "ImportError: import json import toml import yaml def format_requirements(data: dict) -> str: result", "import toml import yaml def format_requirements(data: dict) -> str: result = [] for", "proxy = json.dumps(data) if isinstance(proxy, bytes): proxy = proxy.decode() return proxy elif format", "\"\".join(result) def formatter(data: dict, format: str) -> str: if format == \"json\": proxy", "dict, format: str) -> str: if format == \"json\": proxy = json.dumps(data) if", "== \"json\": proxy = json.dumps(data) if isinstance(proxy, bytes): proxy = proxy.decode() return proxy", "return \"\".join(result) def formatter(data: dict, format: str) -> str: if format == \"json\":", "elif format == \"yaml\": return yaml.dump(data) elif format == \"requirements.txt\": return format_requirements(data) else:", "\"json\": proxy = json.dumps(data) if isinstance(proxy, bytes): proxy = proxy.decode() return proxy elif", "return toml.dumps(data) elif format == \"yaml\": return yaml.dump(data) elif format == \"requirements.txt\": return" ]
[ "args.launchEuler: launchEuler(NUS[i], EPS[i], RUN[i]) #for nu in [0.002, 0.004, 0.008] : # for", "for nu in np.logspace(np.log10(0.002), np.log10(0.02), 16) : for eps in np.logspace(np.log10(0.01), np.log10(2.0), 16)", "python3 import os, numpy as np, argparse def relFit(nu, eps): return 7.33972668 *", "launchDaint(nCases, args.LES) if args.case < 0: cases = range(nCases) else: cases = [args.case]", "5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu); def runspec(nu, eps, run, cs): if cs is", "\\n') f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions` \\n') else: f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \\n')", "modeling.\") parser.set_defaults(LES=False) parser.add_argument('--case', type = int, default = -1, help=\"Simulation case.\") args =", "HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" # % (nu, eps, tke0, ext, scal, eps, nu)) #for nu in", "run, cs) else: return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\ % (eps, nu, run) def getSettings(nu, eps,", "\\n' % (nCases-1)) #f.write('#SBATCH --partition=normal \\n') #f.write('#SBATCH --ntasks-per-node=1 \\n') f.write('ind=$SLURM_ARRAY_TASK_ID \\n') if les:", "1 -nprocsy 1 -nprocsz 1 ' \\ '-spectralForcing 1 -tend 100 -keepMomentumConstant 1", "case.\") args = parser.parse_args() if args.LES: rangeles = np.linspace(0.16, 0.24, 9) else: rangeles", "description = \"Compute a target file for RL agent from DNS data.\") parser.add_argument('--printName',", "0.32] : # tke0 = 2.77578963 * np.power(eps, (2.0/3.0) ) # for scal", ": # for eps in [0.02, 0.04, 0.08, 0.16, 0.32] : # tke0", "if cs is not None: options = '-sgs SSM -cs %f -bpdx 4", "return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\ % (eps, nu, run) def getSettings(nu, eps, cs): if cs", "0.002, 0.004, 0.008, 0.016] : # for eps in [0.02, 0.04, 0.08, 0.16,", "4 -bpdy 4 -bpdz 4 -CFL 0.1 ' % cs else: options =", "np.power(eps, -0.25) * np.power(nu, 0.75) def lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0) *", "return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu); def runspec(nu, eps, run, cs): if cs", "0.1 ' % cs else: options = '-bpdx 12 -bpdy 12 -bpdz 12", "help=\"Only print run options.\") parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler', dest='launchEuler', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchEuler=False)", "as np, argparse def relFit(nu, eps): return 7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu)", "continue for les in rangeles : for i in [0, 1, 2] :", "f.write('#!/bin/bash -l \\n') if les: f.write('#SBATCH --job-name=LES_HIT \\n') else: f.write('#SBATCH --job-name=DNS_HIT \\n') f.write('#SBATCH", "\\n') f.close() os.system('sbatch HIT_sbatch') if __name__ == '__main__': parser = argparse.ArgumentParser( description =", "ext, scal, eps, nu)) #for nu in [0.001, 0.002, 0.004, 0.008, 0.016] :", "f.write('#SBATCH --constraint=gpu \\n') f.write('#SBATCH --account=s929 \\n') f.write('#SBATCH --array=0-%d \\n' % (nCases-1)) #f.write('#SBATCH --partition=normal", "#for nu in [0.002, 0.004, 0.008] : # for eps in [0.02, 0.04,", "if lambdaFit(nu, eps) > 0.1 * 2 * np.pi: continue if etaFit(nu, eps)", "eps, nu)) #for nu in [0.001, 0.002, 0.004, 0.008, 0.016] : # for", "np.power(eps, (2.0/3.0) ) # for scal in [2, 3] : # tke0 =", "etaFit(nu, eps): return np.power(eps, -0.25) * np.power(nu, 0.75) def lambdaFit(nu, eps): return 5.35507603", "/ eps) return options + '-extentx 6.2831853072 -dump2D 0 -dump3D 0 ' \\", "0.04, 0.08, 0.16, 0.32] : # tke0 = 2.77578963 * np.power(eps, (2.0/3.0) )", "h/8: continue for les in rangeles : for i in [0, 1, 2]", "cases: if args.printOptions: print( getSettings(NUS[i], EPS[i], CSS[i]) ) if args.printName: print( runspec(NUS[i], EPS[i],", "--ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS} \\n') f.close() os.system('sbatch HIT_sbatch') if __name__ == '__main__':", "SCRATCH = os.getenv('SCRATCH') HOME = os.getenv('HOME') f = open('HIT_sbatch','w') f.write('#!/bin/bash -l \\n') if", "-energyInjectionRate %f ' \\ % (tAnalysis, nu, eps) def launchEuler(nu, eps, run): runname", "20: continue if lambdaFit(nu, eps) > 0.1 * 2 * np.pi: continue if", "None: return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\ % (eps, nu, run, cs) else: return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\", "# os.system(\"\\ # export NU=%f \\n\\ # export EPS=%f \\n\\ # export TKE0=%f", "f.write('#SBATCH --job-name=DNS_HIT \\n') f.write('#SBATCH --time=24:00:00 \\n') f.write('#SBATCH --output=out.%j.%a.txt \\n') f.write('#SBATCH --error=err.%j.%a.txt \\n') f.write('#SBATCH", "args.case < 0: cases = range(nCases) else: cases = [args.case] for i in", "--case ${ind} --printOptions` \\n') f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cd %s/CubismUP3D/${RUNDIRN} \\n'", "' \\ % (tAnalysis, nu, eps) def launchEuler(nu, eps, run): runname = runspec(nu,", "% SCRATCH) f.write('cp %s/CubismUP_3D/bin/simulation ./exec \\n' % HOME) f.write('export OMP_NUM_THREADS=12 \\n') f.write('export CRAY_CUDA_MPS=1", "scal, eps, nu)) #for nu in [0.001, 0.002, 0.004, 0.008, 0.016] : #", "'-analysis HIT -nu %f -energyInjectionRate %f ' \\ % (tAnalysis, nu, eps) def", "NU=%f \\n export EPS=%f \\n export TANALYSIS=%f \\n \" \\ \"echo $NU $EPS", "./launchEuler.sh settingsHIT_DNS.sh %s \" \\ % (nu, eps, tAnalysis, runname) ) def launchDaint(nCases,", "export EXT=%f \\n\\ # echo $NU $EPS $TKE0 $EXT \\n\\ # ./launchEuler.sh settingsHIT_DNS.sh", "SCRATCH) f.write('cd %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cp %s/CubismUP_3D/bin/simulation ./exec \\n' % HOME) f.write('export", "= argparse.ArgumentParser( description = \"Compute a target file for RL agent from DNS", "else: options = '-bpdx 12 -bpdy 12 -bpdz 12 -CFL 0.02 ' tAnalysis", "--printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \\n') f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH)", "NU=%f \\n\\ # export EPS=%f \\n\\ # export TKE0=%f \\n\\ # export EXT=%f", "% (nu, eps, tAnalysis, runname) ) def launchDaint(nCases, les): SCRATCH = os.getenv('SCRATCH') HOME", "help=\"Only print run options.\") parser.set_defaults(launchEuler=False) parser.add_argument('--LES', dest='LES', action='store_true', help=\"Triggers LES modeling.\") parser.set_defaults(LES=False) parser.add_argument('--case',", ") def launchDaint(nCases, les): SCRATCH = os.getenv('SCRATCH') HOME = os.getenv('HOME') f = open('HIT_sbatch','w')", "'-extentx 6.2831853072 -dump2D 0 -dump3D 0 ' \\ '-tdump 1 -BC_x periodic -BC_y", "cs else: options = '-bpdx 12 -bpdy 12 -bpdz 12 -CFL 0.02 '", "scal * np.pi # os.system(\"\\ # export NU=%f \\n\\ # export EPS=%f \\n\\", "in cases: if args.printOptions: print( getSettings(NUS[i], EPS[i], CSS[i]) ) if args.printName: print( runspec(NUS[i],", "\" \\ % (nu, eps, tAnalysis, runname) ) def launchDaint(nCases, les): SCRATCH =", "if args.launchDaint: launchDaint(nCases, args.LES) if args.case < 0: cases = range(nCases) else: cases", "[2, 3] : # tke0 = 2.77578963 * np.power(eps, (2.0/3.0) ) # for", "nCases = len(NUS) #print('Defined %d cases' % nCases) if args.launchDaint: launchDaint(nCases, args.LES) if", "in [2, 3] : # tke0 = 2.77578963 * np.power(eps, (2.0/3.0) ) #", "* np.power(eps, (2.0/3.0) ) # for scal in [2] : # ext =", "--ntasks-per-node=1 \\n') f.write('ind=$SLURM_ARRAY_TASK_ID \\n') if les: f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py", "# tke0 = 2.77578963 * np.power(eps, (2.0/3.0) ) # for scal in [2]", "% (nCases-1)) #f.write('#SBATCH --partition=normal \\n') #f.write('#SBATCH --ntasks-per-node=1 \\n') f.write('ind=$SLURM_ARRAY_TASK_ID \\n') if les: f.write('RUNDIRN=`./launchLESHIT.py", "(eps, nu, run) def getSettings(nu, eps, cs): if cs is not None: options", "2] : NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i], CSS+[les] nCases = len(NUS) #print('Defined %d", "f.write('export OMP_NUM_THREADS=12 \\n') f.write('export CRAY_CUDA_MPS=1 \\n') f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS} \\n')", "if les: f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions`", "np.power(nu, 0.75) def lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu); def runspec(nu,", ") if args.printName: print( runspec(NUS[i], EPS[i], RUN[i], CSS[i]) ) if args.launchEuler: launchEuler(NUS[i], EPS[i],", "'-tdump 1 -BC_x periodic -BC_y periodic -BC_z periodic ' \\ '-spectralIC fromFit -initCond", "12 -CFL 0.02 ' tAnalysis = np.sqrt(nu / eps) return options + '-extentx", "0: cases = range(nCases) else: cases = [args.case] for i in cases: if", "rangeles = [None] NUS, EPS, RUN, CSS = [], [], [], [] h", "tAnalysis, runname) ) def launchDaint(nCases, les): SCRATCH = os.getenv('SCRATCH') HOME = os.getenv('HOME') f", "options = '-bpdx 12 -bpdy 12 -bpdz 12 -CFL 0.02 ' tAnalysis =", "relFit(nu, eps) > 100 or relFit(nu, eps) < 20: continue if lambdaFit(nu, eps)", "__name__ == '__main__': parser = argparse.ArgumentParser( description = \"Compute a target file for", "args.launchDaint: launchDaint(nCases, args.LES) if args.case < 0: cases = range(nCases) else: cases =", "/ 16 / 12 for nu in np.logspace(np.log10(0.002), np.log10(0.02), 16) : for eps", "./exec ${OPTIONS} \\n') f.close() os.system('sbatch HIT_sbatch') if __name__ == '__main__': parser = argparse.ArgumentParser(", "def getSettings(nu, eps, cs): if cs is not None: options = '-sgs SSM", "type = int, default = -1, help=\"Simulation case.\") args = parser.parse_args() if args.LES:", "2.77578963 * np.power(eps, (2.0/3.0) ) # for scal in [2, 3] : #", "np.log10(2.0), 16) : if relFit(nu, eps) > 100 or relFit(nu, eps) < 20:", "CRAY_CUDA_MPS=1 \\n') f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS} \\n') f.close() os.system('sbatch HIT_sbatch') if", "% (eps, nu, run) def getSettings(nu, eps, cs): if cs is not None:", "RUN[i]) #for nu in [0.002, 0.004, 0.008] : # for eps in [0.02,", "in np.logspace(np.log10(0.01), np.log10(2.0), 16) : if relFit(nu, eps) > 100 or relFit(nu, eps)", "f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cd %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cp %s/CubismUP_3D/bin/simulation", "run) print(runname) tAnalysis = np.sqrt(nu / eps) os.system(\"export NU=%f \\n export EPS=%f \\n", "= np.sqrt(nu / eps) os.system(\"export NU=%f \\n export EPS=%f \\n export TANALYSIS=%f \\n", "int, default = -1, help=\"Simulation case.\") args = parser.parse_args() if args.LES: rangeles =", "(2.0/3.0) ) # for scal in [2, 3] : # tke0 = 2.77578963", "nu in np.logspace(np.log10(0.002), np.log10(0.02), 16) : for eps in np.logspace(np.log10(0.01), np.log10(2.0), 16) :", "= runspec(nu, eps, run) print(runname) tAnalysis = np.sqrt(nu / eps) os.system(\"export NU=%f \\n", "if cs is not None: return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\ % (eps, nu, run, cs)", "# export EPS=%f \\n\\ # export TKE0=%f \\n\\ # export EXT=%f \\n\\ #", "--case ${ind} --printOptions` \\n') else: f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --case ${ind}", "0 -dump3D 0 ' \\ '-tdump 1 -BC_x periodic -BC_y periodic -BC_z periodic", "settingsHIT_DNS.sh %s \" \\ % (nu, eps, tAnalysis, runname) ) def launchDaint(nCases, les):", "options.\") parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler', dest='launchEuler', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchEuler=False) parser.add_argument('--LES', dest='LES', action='store_true',", "def relFit(nu, eps): return 7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu) def etaFit(nu, eps):", "ext = scal * np.pi # os.system(\"\\ # export NU=%f \\n\\ # export", "[], [] h = 2 * np.pi / 16 / 12 for nu", "name.\") parser.set_defaults(printName=False) parser.add_argument('--printOptions', dest='printOptions', action='store_true', help=\"Only print run options.\") parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint', dest='launchDaint', action='store_true',", "%f ' \\ % (tAnalysis, nu, eps) def launchEuler(nu, eps, run): runname =", "= scal * np.pi # os.system(\"\\ # export NU=%f \\n\\ # export EPS=%f", "np.sqrt(nu); def runspec(nu, eps, run, cs): if cs is not None: return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\"", "# for scal in [2] : # ext = scal * np.pi #", "' \\ '-tdump 1 -BC_x periodic -BC_y periodic -BC_z periodic ' \\ '-spectralIC", "-bpdy 12 -bpdz 12 -CFL 0.02 ' tAnalysis = np.sqrt(nu / eps) return", "runspec(nu, eps, run, cs): if cs is not None: return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\ %", "is not None: options = '-sgs SSM -cs %f -bpdx 4 -bpdy 4", "= NUS+[nu], EPS+[eps], RUN+[i], CSS+[les] nCases = len(NUS) #print('Defined %d cases' % nCases)", "for scal in [2] : # ext = scal * np.pi # os.system(\"\\", "etaFit(nu, eps) < h/8: continue for les in rangeles : for i in", "tAnalysis = np.sqrt(nu / eps) os.system(\"export NU=%f \\n export EPS=%f \\n export TANALYSIS=%f", "print run options.\") parser.set_defaults(launchEuler=False) parser.add_argument('--LES', dest='LES', action='store_true', help=\"Triggers LES modeling.\") parser.set_defaults(LES=False) parser.add_argument('--case', type", "\\n') f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cd %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cp", "np.log10(0.02), 16) : for eps in np.logspace(np.log10(0.01), np.log10(2.0), 16) : if relFit(nu, eps)", ": for eps in np.logspace(np.log10(0.01), np.log10(2.0), 16) : if relFit(nu, eps) > 100", "argparse.ArgumentParser( description = \"Compute a target file for RL agent from DNS data.\")", "launchEuler(nu, eps, run): runname = runspec(nu, eps, run) print(runname) tAnalysis = np.sqrt(nu /", "numpy as np, argparse def relFit(nu, eps): return 7.33972668 * np.power(eps, 1/6.0) /", "0.75) def lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu); def runspec(nu, eps,", "np.power(eps,-1/6.0) * np.sqrt(nu); def runspec(nu, eps, run, cs): if cs is not None:", "\\ % (tAnalysis, nu, eps) def launchEuler(nu, eps, run): runname = runspec(nu, eps,", "0.02 ' tAnalysis = np.sqrt(nu / eps) return options + '-extentx 6.2831853072 -dump2D", "f.write('ind=$SLURM_ARRAY_TASK_ID \\n') if les: f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --LES --case", ") # for scal in [2, 3] : # tke0 = 2.77578963 *", "%s/CubismUP_3D/bin/simulation ./exec \\n' % HOME) f.write('export OMP_NUM_THREADS=12 \\n') f.write('export CRAY_CUDA_MPS=1 \\n') f.write('srun --ntasks", "options.\") parser.set_defaults(launchEuler=False) parser.add_argument('--LES', dest='LES', action='store_true', help=\"Triggers LES modeling.\") parser.set_defaults(LES=False) parser.add_argument('--case', type = int,", "--job-name=DNS_HIT \\n') f.write('#SBATCH --time=24:00:00 \\n') f.write('#SBATCH --output=out.%j.%a.txt \\n') f.write('#SBATCH --error=err.%j.%a.txt \\n') f.write('#SBATCH --constraint=gpu", "= int, default = -1, help=\"Simulation case.\") args = parser.parse_args() if args.LES: rangeles", ") # for scal in [2] : # ext = scal * np.pi", "eps, run) print(runname) tAnalysis = np.sqrt(nu / eps) os.system(\"export NU=%f \\n export EPS=%f", "-CFL 0.1 ' % cs else: options = '-bpdx 12 -bpdy 12 -bpdz", "run options.\") parser.set_defaults(launchEuler=False) parser.add_argument('--LES', dest='LES', action='store_true', help=\"Triggers LES modeling.\") parser.set_defaults(LES=False) parser.add_argument('--case', type =", "--partition=normal \\n') #f.write('#SBATCH --ntasks-per-node=1 \\n') f.write('ind=$SLURM_ARRAY_TASK_ID \\n') if les: f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind}", "nu in [0.001, 0.002, 0.004, 0.008, 0.016] : # for eps in [0.02,", "\\n') f.write('#SBATCH --constraint=gpu \\n') f.write('#SBATCH --account=s929 \\n') f.write('#SBATCH --array=0-%d \\n' % (nCases-1)) #f.write('#SBATCH", "NUS+[nu], EPS+[eps], RUN+[i], CSS+[les] nCases = len(NUS) #print('Defined %d cases' % nCases) if", "% (nu, eps, tke0, ext, scal, eps, nu)) #for nu in [0.001, 0.002,", "cs) else: return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\ % (eps, nu, run) def getSettings(nu, eps, cs):", "\\n ./launchEuler.sh settingsHIT_DNS.sh %s \" \\ % (nu, eps, tAnalysis, runname) ) def", "parser.add_argument('--case', type = int, default = -1, help=\"Simulation case.\") args = parser.parse_args() if", "0.24, 9) else: rangeles = [None] NUS, EPS, RUN, CSS = [], [],", "help=\"Triggers LES modeling.\") parser.set_defaults(LES=False) parser.add_argument('--case', type = int, default = -1, help=\"Simulation case.\")", "NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i], CSS+[les] nCases = len(NUS) #print('Defined %d cases' %", "if les: f.write('#SBATCH --job-name=LES_HIT \\n') else: f.write('#SBATCH --job-name=DNS_HIT \\n') f.write('#SBATCH --time=24:00:00 \\n') f.write('#SBATCH", "if relFit(nu, eps) > 100 or relFit(nu, eps) < 20: continue if lambdaFit(nu,", "# ext = scal * np.pi # os.system(\"\\ # export NU=%f \\n\\ #", "lambdaFit(nu, eps) > 0.1 * 2 * np.pi: continue if etaFit(nu, eps) >", "parser.add_argument('--LES', dest='LES', action='store_true', help=\"Triggers LES modeling.\") parser.set_defaults(LES=False) parser.add_argument('--case', type = int, default =", "echo $NU $EPS $TKE0 $EXT \\n\\ # ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" # % (nu,", "def launchEuler(nu, eps, run): runname = runspec(nu, eps, run) print(runname) tAnalysis = np.sqrt(nu", "= [], [], [], [] h = 2 * np.pi / 16 /", "= '-sgs SSM -cs %f -bpdx 4 -bpdy 4 -bpdz 4 -CFL 0.1", "if args.case < 0: cases = range(nCases) else: cases = [args.case] for i", "print run name.\") parser.set_defaults(printName=False) parser.add_argument('--printOptions', dest='printOptions', action='store_true', help=\"Only print run options.\") parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint',", "\\n\\ # export EXT=%f \\n\\ # echo $NU $EPS $TKE0 $EXT \\n\\ #", "nu)) #for nu in [0.001, 0.002, 0.004, 0.008, 0.016] : # for eps", "eps) < h/8: continue for les in rangeles : for i in [0,", "f.close() os.system('sbatch HIT_sbatch') if __name__ == '__main__': parser = argparse.ArgumentParser( description = \"Compute", "2.77578963 * np.power(eps, (2.0/3.0) ) # for scal in [2] : # ext", "%s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cp %s/CubismUP_3D/bin/simulation ./exec \\n' % HOME) f.write('export OMP_NUM_THREADS=12 \\n')", "a target file for RL agent from DNS data.\") parser.add_argument('--printName', dest='printName', action='store_true', help=\"Only", "\\n') f.write('#SBATCH --error=err.%j.%a.txt \\n') f.write('#SBATCH --constraint=gpu \\n') f.write('#SBATCH --account=s929 \\n') f.write('#SBATCH --array=0-%d \\n'", "1 -BC_x periodic -BC_y periodic -BC_z periodic ' \\ '-spectralIC fromFit -initCond HITurbulence", "f.write('cp %s/CubismUP_3D/bin/simulation ./exec \\n' % HOME) f.write('export OMP_NUM_THREADS=12 \\n') f.write('export CRAY_CUDA_MPS=1 \\n') f.write('srun", "\\n') if les: f.write('#SBATCH --job-name=LES_HIT \\n') else: f.write('#SBATCH --job-name=DNS_HIT \\n') f.write('#SBATCH --time=24:00:00 \\n')", "f.write('#SBATCH --job-name=LES_HIT \\n') else: f.write('#SBATCH --job-name=DNS_HIT \\n') f.write('#SBATCH --time=24:00:00 \\n') f.write('#SBATCH --output=out.%j.%a.txt \\n')", "\\n export TANALYSIS=%f \\n \" \\ \"echo $NU $EPS \\n ./launchEuler.sh settingsHIT_DNS.sh %s", "parser.set_defaults(LES=False) parser.add_argument('--case', type = int, default = -1, help=\"Simulation case.\") args = parser.parse_args()", "def runspec(nu, eps, run, cs): if cs is not None: return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\", "f = open('HIT_sbatch','w') f.write('#!/bin/bash -l \\n') if les: f.write('#SBATCH --job-name=LES_HIT \\n') else: f.write('#SBATCH", ": # tke0 = 2.77578963 * np.power(eps, (2.0/3.0) ) # for scal in", "else: rangeles = [None] NUS, EPS, RUN, CSS = [], [], [], []", "# for scal in [2, 3] : # tke0 = 2.77578963 * np.power(eps,", "return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\ % (eps, nu, run, cs) else: return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\ %", "tke0 = 2.77578963 * np.power(eps, (2.0/3.0) ) # for scal in [2] :", "if args.printName: print( runspec(NUS[i], EPS[i], RUN[i], CSS[i]) ) if args.launchEuler: launchEuler(NUS[i], EPS[i], RUN[i])", "-l \\n') if les: f.write('#SBATCH --job-name=LES_HIT \\n') else: f.write('#SBATCH --job-name=DNS_HIT \\n') f.write('#SBATCH --time=24:00:00", "' \\ '-compute-dissipation 1 -nprocsx 1 -nprocsy 1 -nprocsz 1 ' \\ '-spectralForcing", "np.sqrt(nu / eps) os.system(\"export NU=%f \\n export EPS=%f \\n export TANALYSIS=%f \\n \"", "export EPS=%f \\n export TANALYSIS=%f \\n \" \\ \"echo $NU $EPS \\n ./launchEuler.sh", "* 2 * np.pi: continue if etaFit(nu, eps) > h or etaFit(nu, eps)", "'-compute-dissipation 1 -nprocsx 1 -nprocsy 1 -nprocsz 1 ' \\ '-spectralForcing 1 -tend", "np.logspace(np.log10(0.002), np.log10(0.02), 16) : for eps in np.logspace(np.log10(0.01), np.log10(2.0), 16) : if relFit(nu,", "(eps, nu, run, cs) else: return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\ % (eps, nu, run) def", "options = '-sgs SSM -cs %f -bpdx 4 -bpdy 4 -bpdz 4 -CFL", "0 ' \\ '-tdump 1 -BC_x periodic -BC_y periodic -BC_z periodic ' \\", "\"Compute a target file for RL agent from DNS data.\") parser.add_argument('--printName', dest='printName', action='store_true',", "os.getenv('SCRATCH') HOME = os.getenv('HOME') f = open('HIT_sbatch','w') f.write('#!/bin/bash -l \\n') if les: f.write('#SBATCH", "-1, help=\"Simulation case.\") args = parser.parse_args() if args.LES: rangeles = np.linspace(0.16, 0.24, 9)", "[0.001, 0.002, 0.004, 0.008, 0.016] : # for eps in [0.02, 0.04, 0.08,", "f.write('#SBATCH --time=24:00:00 \\n') f.write('#SBATCH --output=out.%j.%a.txt \\n') f.write('#SBATCH --error=err.%j.%a.txt \\n') f.write('#SBATCH --constraint=gpu \\n') f.write('#SBATCH", "--case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \\n') f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \\n'", "scal in [2, 3] : # tke0 = 2.77578963 * np.power(eps, (2.0/3.0) )", "SSM -cs %f -bpdx 4 -bpdy 4 -bpdz 4 -CFL 0.1 ' %", "-initCond HITurbulence -tAnalysis %f ' \\ '-compute-dissipation 1 -nprocsx 1 -nprocsy 1 -nprocsz", "action='store_true', help=\"Only print run options.\") parser.set_defaults(launchEuler=False) parser.add_argument('--LES', dest='LES', action='store_true', help=\"Triggers LES modeling.\") parser.set_defaults(LES=False)", "np.sqrt(nu / eps) return options + '-extentx 6.2831853072 -dump2D 0 -dump3D 0 '", "* np.power(eps,-1/6.0) * np.sqrt(nu); def runspec(nu, eps, run, cs): if cs is not", "h or etaFit(nu, eps) < h/8: continue for les in rangeles : for", "-bpdx 4 -bpdy 4 -bpdz 4 -CFL 0.1 ' % cs else: options", "relFit(nu, eps): return 7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu) def etaFit(nu, eps): return", "(nu, eps, tAnalysis, runname) ) def launchDaint(nCases, les): SCRATCH = os.getenv('SCRATCH') HOME =", "for scal in [2, 3] : # tke0 = 2.77578963 * np.power(eps, (2.0/3.0)", "= open('HIT_sbatch','w') f.write('#!/bin/bash -l \\n') if les: f.write('#SBATCH --job-name=LES_HIT \\n') else: f.write('#SBATCH --job-name=DNS_HIT", "EPS, RUN, CSS = [], [], [], [] h = 2 * np.pi", "len(NUS) #print('Defined %d cases' % nCases) if args.launchDaint: launchDaint(nCases, args.LES) if args.case <", "runname) ) def launchDaint(nCases, les): SCRATCH = os.getenv('SCRATCH') HOME = os.getenv('HOME') f =", "* np.power(eps, 1/6.0) / np.sqrt(nu) def etaFit(nu, eps): return np.power(eps, -0.25) * np.power(nu,", "eps) > 100 or relFit(nu, eps) < 20: continue if lambdaFit(nu, eps) >", "f.write('#SBATCH --array=0-%d \\n' % (nCases-1)) #f.write('#SBATCH --partition=normal \\n') #f.write('#SBATCH --ntasks-per-node=1 \\n') f.write('ind=$SLURM_ARRAY_TASK_ID \\n')", "options.\") parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint', dest='launchDaint', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler', dest='launchEuler', action='store_true',", "[], [], [] h = 2 * np.pi / 16 / 12 for", "parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint', dest='launchDaint', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler', dest='launchEuler', action='store_true', help=\"Only", "else: f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \\n') f.write('mkdir -p", "+ '-extentx 6.2831853072 -dump2D 0 -dump3D 0 ' \\ '-tdump 1 -BC_x periodic", "else: f.write('#SBATCH --job-name=DNS_HIT \\n') f.write('#SBATCH --time=24:00:00 \\n') f.write('#SBATCH --output=out.%j.%a.txt \\n') f.write('#SBATCH --error=err.%j.%a.txt \\n')", "periodic -BC_y periodic -BC_z periodic ' \\ '-spectralIC fromFit -initCond HITurbulence -tAnalysis %f", "\\ '-spectralForcing 1 -tend 100 -keepMomentumConstant 1 ' \\ '-analysis HIT -nu %f", "* np.power(nu, 0.75) def lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu); def", "h = 2 * np.pi / 16 / 12 for nu in np.logspace(np.log10(0.002),", "eps, tAnalysis, runname) ) def launchDaint(nCases, les): SCRATCH = os.getenv('SCRATCH') HOME = os.getenv('HOME')", "(tAnalysis, nu, eps) def launchEuler(nu, eps, run): runname = runspec(nu, eps, run) print(runname)", "-p %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cd %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cp %s/CubismUP_3D/bin/simulation ./exec", "0.1 * 2 * np.pi: continue if etaFit(nu, eps) > h or etaFit(nu,", "-dump2D 0 -dump3D 0 ' \\ '-tdump 1 -BC_x periodic -BC_y periodic -BC_z", "OMP_NUM_THREADS=12 \\n') f.write('export CRAY_CUDA_MPS=1 \\n') f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS} \\n') f.close()", "\\n') f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \\n') f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cd", "f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \\n') f.write('mkdir -p %s/CubismUP3D/${RUNDIRN}", "[args.case] for i in cases: if args.printOptions: print( getSettings(NUS[i], EPS[i], CSS[i]) ) if", "for eps in [0.02, 0.04, 0.08, 0.16, 0.32] : # tke0 = 2.77578963", "-BC_x periodic -BC_y periodic -BC_z periodic ' \\ '-spectralIC fromFit -initCond HITurbulence -tAnalysis", "\\n') else: f.write('#SBATCH --job-name=DNS_HIT \\n') f.write('#SBATCH --time=24:00:00 \\n') f.write('#SBATCH --output=out.%j.%a.txt \\n') f.write('#SBATCH --error=err.%j.%a.txt", "run options.\") parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint', dest='launchDaint', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler', dest='launchEuler',", "os.system('sbatch HIT_sbatch') if __name__ == '__main__': parser = argparse.ArgumentParser( description = \"Compute a", "cases' % nCases) if args.launchDaint: launchDaint(nCases, args.LES) if args.case < 0: cases =", "--case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions` \\n') else: f.write('RUNDIRN=`./launchLESHIT.py --case", "np.power(eps, (2.0/3.0) ) # for scal in [2] : # ext = scal", "getSettings(NUS[i], EPS[i], CSS[i]) ) if args.printName: print( runspec(NUS[i], EPS[i], RUN[i], CSS[i]) ) if", "nu, run, cs) else: return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\ % (eps, nu, run) def getSettings(nu,", "eps, cs): if cs is not None: options = '-sgs SSM -cs %f", "CSS[i]) ) if args.launchEuler: launchEuler(NUS[i], EPS[i], RUN[i]) #for nu in [0.002, 0.004, 0.008]", "4 -CFL 0.1 ' % cs else: options = '-bpdx 12 -bpdy 12", "> h or etaFit(nu, eps) < h/8: continue for les in rangeles :", "parser.add_argument('--launchDaint', dest='launchDaint', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler', dest='launchEuler', action='store_true', help=\"Only print", "' \\ '-analysis HIT -nu %f -energyInjectionRate %f ' \\ % (tAnalysis, nu,", "\\n') f.write('#SBATCH --output=out.%j.%a.txt \\n') f.write('#SBATCH --error=err.%j.%a.txt \\n') f.write('#SBATCH --constraint=gpu \\n') f.write('#SBATCH --account=s929 \\n')", "args = parser.parse_args() if args.LES: rangeles = np.linspace(0.16, 0.24, 9) else: rangeles =", "0.16, 0.32] : # tke0 = 2.77578963 * np.power(eps, (2.0/3.0) ) # for", "eps in [0.02, 0.04, 0.08, 0.16, 0.32] : # tke0 = 2.77578963 *", "LES modeling.\") parser.set_defaults(LES=False) parser.add_argument('--case', type = int, default = -1, help=\"Simulation case.\") args", "cs): if cs is not None: options = '-sgs SSM -cs %f -bpdx", "= np.linspace(0.16, 0.24, 9) else: rangeles = [None] NUS, EPS, RUN, CSS =", "TKE0=%f \\n\\ # export EXT=%f \\n\\ # echo $NU $EPS $TKE0 $EXT \\n\\", "EPS=%f \\n\\ # export TKE0=%f \\n\\ # export EXT=%f \\n\\ # echo $NU", "print(runname) tAnalysis = np.sqrt(nu / eps) os.system(\"export NU=%f \\n export EPS=%f \\n export", "\"echo $NU $EPS \\n ./launchEuler.sh settingsHIT_DNS.sh %s \" \\ % (nu, eps, tAnalysis,", "${OPTIONS} \\n') f.close() os.system('sbatch HIT_sbatch') if __name__ == '__main__': parser = argparse.ArgumentParser( description", "help=\"Only print run options.\") parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint', dest='launchDaint', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchDaint=False)", "\\n\\ # export EPS=%f \\n\\ # export TKE0=%f \\n\\ # export EXT=%f \\n\\", "#f.write('#SBATCH --partition=normal \\n') #f.write('#SBATCH --ntasks-per-node=1 \\n') f.write('ind=$SLURM_ARRAY_TASK_ID \\n') if les: f.write('RUNDIRN=`./launchLESHIT.py --LES --case", "np.pi # os.system(\"\\ # export NU=%f \\n\\ # export EPS=%f \\n\\ # export", "range(nCases) else: cases = [args.case] for i in cases: if args.printOptions: print( getSettings(NUS[i],", "os.getenv('HOME') f = open('HIT_sbatch','w') f.write('#!/bin/bash -l \\n') if les: f.write('#SBATCH --job-name=LES_HIT \\n') else:", "options + '-extentx 6.2831853072 -dump2D 0 -dump3D 0 ' \\ '-tdump 1 -BC_x", "action='store_true', help=\"Only print run options.\") parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler', dest='launchEuler', action='store_true', help=\"Only print run options.\")", "cs is not None: options = '-sgs SSM -cs %f -bpdx 4 -bpdy", ": for i in [0, 1, 2] : NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i],", "runname = runspec(nu, eps, run) print(runname) tAnalysis = np.sqrt(nu / eps) os.system(\"export NU=%f", "--printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions` \\n') else: f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName`", "\\ '-compute-dissipation 1 -nprocsx 1 -nprocsy 1 -nprocsz 1 ' \\ '-spectralForcing 1", "(nCases-1)) #f.write('#SBATCH --partition=normal \\n') #f.write('#SBATCH --ntasks-per-node=1 \\n') f.write('ind=$SLURM_ARRAY_TASK_ID \\n') if les: f.write('RUNDIRN=`./launchLESHIT.py --LES", "data.\") parser.add_argument('--printName', dest='printName', action='store_true', help=\"Only print run name.\") parser.set_defaults(printName=False) parser.add_argument('--printOptions', dest='printOptions', action='store_true', help=\"Only", "os, numpy as np, argparse def relFit(nu, eps): return 7.33972668 * np.power(eps, 1/6.0)", "HOME) f.write('export OMP_NUM_THREADS=12 \\n') f.write('export CRAY_CUDA_MPS=1 \\n') f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS}", "lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu); def runspec(nu, eps, run, cs):", "args.printName: print( runspec(NUS[i], EPS[i], RUN[i], CSS[i]) ) if args.launchEuler: launchEuler(NUS[i], EPS[i], RUN[i]) #for", "dest='launchDaint', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler', dest='launchEuler', action='store_true', help=\"Only print run", "action='store_true', help=\"Triggers LES modeling.\") parser.set_defaults(LES=False) parser.add_argument('--case', type = int, default = -1, help=\"Simulation", "= len(NUS) #print('Defined %d cases' % nCases) if args.launchDaint: launchDaint(nCases, args.LES) if args.case", "export TANALYSIS=%f \\n \" \\ \"echo $NU $EPS \\n ./launchEuler.sh settingsHIT_DNS.sh %s \"", "\\n') else: f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \\n') f.write('mkdir", "f.write('#SBATCH --account=s929 \\n') f.write('#SBATCH --array=0-%d \\n' % (nCases-1)) #f.write('#SBATCH --partition=normal \\n') #f.write('#SBATCH --ntasks-per-node=1", ": # ext = scal * np.pi # os.system(\"\\ # export NU=%f \\n\\", "# export EXT=%f \\n\\ # echo $NU $EPS $TKE0 $EXT \\n\\ # ./launchEuler.sh", "run): runname = runspec(nu, eps, run) print(runname) tAnalysis = np.sqrt(nu / eps) os.system(\"export", "(2.0/3.0) ) # for scal in [2] : # ext = scal *", "= [args.case] for i in cases: if args.printOptions: print( getSettings(NUS[i], EPS[i], CSS[i]) )", "or etaFit(nu, eps) < h/8: continue for les in rangeles : for i", "return options + '-extentx 6.2831853072 -dump2D 0 -dump3D 0 ' \\ '-tdump 1", "EPS+[eps], RUN+[i], CSS+[les] nCases = len(NUS) #print('Defined %d cases' % nCases) if args.launchDaint:", "\\n' % SCRATCH) f.write('cp %s/CubismUP_3D/bin/simulation ./exec \\n' % HOME) f.write('export OMP_NUM_THREADS=12 \\n') f.write('export", "7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu) def etaFit(nu, eps): return np.power(eps, -0.25) *", "import os, numpy as np, argparse def relFit(nu, eps): return 7.33972668 * np.power(eps,", "np.sqrt(nu) def etaFit(nu, eps): return np.power(eps, -0.25) * np.power(nu, 0.75) def lambdaFit(nu, eps):", "HIT -nu %f -energyInjectionRate %f ' \\ % (tAnalysis, nu, eps) def launchEuler(nu,", "NUS, EPS, RUN, CSS = [], [], [], [] h = 2 *", "f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \\n') f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cd %s/CubismUP3D/${RUNDIRN}", "run, cs): if cs is not None: return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\ % (eps, nu,", "eps in np.logspace(np.log10(0.01), np.log10(2.0), 16) : if relFit(nu, eps) > 100 or relFit(nu,", "continue if etaFit(nu, eps) > h or etaFit(nu, eps) < h/8: continue for", "< 0: cases = range(nCases) else: cases = [args.case] for i in cases:", "$NU $EPS \\n ./launchEuler.sh settingsHIT_DNS.sh %s \" \\ % (nu, eps, tAnalysis, runname)", "CSS[i]) ) if args.printName: print( runspec(NUS[i], EPS[i], RUN[i], CSS[i]) ) if args.launchEuler: launchEuler(NUS[i],", "f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions` \\n') else:", "# export NU=%f \\n\\ # export EPS=%f \\n\\ # export TKE0=%f \\n\\ #", "'-spectralIC fromFit -initCond HITurbulence -tAnalysis %f ' \\ '-compute-dissipation 1 -nprocsx 1 -nprocsy", "runspec(nu, eps, run) print(runname) tAnalysis = np.sqrt(nu / eps) os.system(\"export NU=%f \\n export", "--LES --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions` \\n') else: f.write('RUNDIRN=`./launchLESHIT.py", "print run options.\") parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint', dest='launchDaint', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler',", "periodic -BC_z periodic ' \\ '-spectralIC fromFit -initCond HITurbulence -tAnalysis %f ' \\", "help=\"Simulation case.\") args = parser.parse_args() if args.LES: rangeles = np.linspace(0.16, 0.24, 9) else:", "DNS data.\") parser.add_argument('--printName', dest='printName', action='store_true', help=\"Only print run name.\") parser.set_defaults(printName=False) parser.add_argument('--printOptions', dest='printOptions', action='store_true',", "or relFit(nu, eps) < 20: continue if lambdaFit(nu, eps) > 0.1 * 2", "np, argparse def relFit(nu, eps): return 7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu) def", "\\n\\ # ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" # % (nu, eps, tke0, ext, scal, eps,", "export EPS=%f \\n\\ # export TKE0=%f \\n\\ # export EXT=%f \\n\\ # echo", "scal in [2] : # ext = scal * np.pi # os.system(\"\\ #", "= [None] NUS, EPS, RUN, CSS = [], [], [], [] h =", "args.printOptions: print( getSettings(NUS[i], EPS[i], CSS[i]) ) if args.printName: print( runspec(NUS[i], EPS[i], RUN[i], CSS[i])", "np.pi: continue if etaFit(nu, eps) > h or etaFit(nu, eps) < h/8: continue", "in [0, 1, 2] : NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i], CSS+[les] nCases =", "= 2 * np.pi / 16 / 12 for nu in np.logspace(np.log10(0.002), np.log10(0.02),", "1 -nprocsx 1 -nprocsy 1 -nprocsz 1 ' \\ '-spectralForcing 1 -tend 100", "if args.LES: rangeles = np.linspace(0.16, 0.24, 9) else: rangeles = [None] NUS, EPS,", "#for nu in [0.001, 0.002, 0.004, 0.008, 0.016] : # for eps in", "return 7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu) def etaFit(nu, eps): return np.power(eps, -0.25)", "2 * np.pi: continue if etaFit(nu, eps) > h or etaFit(nu, eps) <", "%f -bpdx 4 -bpdy 4 -bpdz 4 -CFL 0.1 ' % cs else:", "-tAnalysis %f ' \\ '-compute-dissipation 1 -nprocsx 1 -nprocsy 1 -nprocsz 1 '", "TANALYSIS=%f \\n \" \\ \"echo $NU $EPS \\n ./launchEuler.sh settingsHIT_DNS.sh %s \" \\", "1 ' \\ '-analysis HIT -nu %f -energyInjectionRate %f ' \\ % (tAnalysis,", "2 * np.pi / 16 / 12 for nu in np.logspace(np.log10(0.002), np.log10(0.02), 16)", "cs is not None: return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\ % (eps, nu, run, cs) else:", "12 -bpdz 12 -CFL 0.02 ' tAnalysis = np.sqrt(nu / eps) return options", "cs): if cs is not None: return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\ % (eps, nu, run,", "f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS} \\n') f.close() os.system('sbatch HIT_sbatch') if __name__ ==", "# echo $NU $EPS $TKE0 $EXT \\n\\ # ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" # %", "== '__main__': parser = argparse.ArgumentParser( description = \"Compute a target file for RL", "-nu %f -energyInjectionRate %f ' \\ % (tAnalysis, nu, eps) def launchEuler(nu, eps,", "\\n') f.write('export CRAY_CUDA_MPS=1 \\n') f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS} \\n') f.close() os.system('sbatch", "settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" # % (nu, eps, tke0, ext, scal, eps, nu)) #for nu", "EPS[i], CSS[i]) ) if args.printName: print( runspec(NUS[i], EPS[i], RUN[i], CSS[i]) ) if args.launchEuler:", "RUN[i], CSS[i]) ) if args.launchEuler: launchEuler(NUS[i], EPS[i], RUN[i]) #for nu in [0.002, 0.004,", "np.logspace(np.log10(0.01), np.log10(2.0), 16) : if relFit(nu, eps) > 100 or relFit(nu, eps) <", "in [2] : # ext = scal * np.pi # os.system(\"\\ # export", "\\n' % HOME) f.write('export OMP_NUM_THREADS=12 \\n') f.write('export CRAY_CUDA_MPS=1 \\n') f.write('srun --ntasks 1 --ntasks-per-node=1", "nu, eps) def launchEuler(nu, eps, run): runname = runspec(nu, eps, run) print(runname) tAnalysis", "argparse def relFit(nu, eps): return 7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu) def etaFit(nu,", "HIT_sbatch') if __name__ == '__main__': parser = argparse.ArgumentParser( description = \"Compute a target", "f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions` \\n') else: f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py", "if etaFit(nu, eps) > h or etaFit(nu, eps) < h/8: continue for les", "[], [], [], [] h = 2 * np.pi / 16 / 12", "parser.set_defaults(launchEuler=False) parser.add_argument('--LES', dest='LES', action='store_true', help=\"Triggers LES modeling.\") parser.set_defaults(LES=False) parser.add_argument('--case', type = int, default", "./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" # % (nu, eps, tke0, ext, scal, eps, nu)) #for", "agent from DNS data.\") parser.add_argument('--printName', dest='printName', action='store_true', help=\"Only print run name.\") parser.set_defaults(printName=False) parser.add_argument('--printOptions',", "in rangeles : for i in [0, 1, 2] : NUS,EPS,RUN,CSS = NUS+[nu],", "else: cases = [args.case] for i in cases: if args.printOptions: print( getSettings(NUS[i], EPS[i],", "[None] NUS, EPS, RUN, CSS = [], [], [], [] h = 2", "args.LES) if args.case < 0: cases = range(nCases) else: cases = [args.case] for", "--printOptions` \\n') f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cd %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH)", "\\n') f.write('#SBATCH --account=s929 \\n') f.write('#SBATCH --array=0-%d \\n' % (nCases-1)) #f.write('#SBATCH --partition=normal \\n') #f.write('#SBATCH", "np.pi / 16 / 12 for nu in np.logspace(np.log10(0.002), np.log10(0.02), 16) : for", "\\ % (eps, nu, run) def getSettings(nu, eps, cs): if cs is not", "for les in rangeles : for i in [0, 1, 2] : NUS,EPS,RUN,CSS", "0.004, 0.008] : # for eps in [0.02, 0.04, 0.08, 0.16, 0.32] :", "4 -bpdz 4 -CFL 0.1 ' % cs else: options = '-bpdx 12", "export TKE0=%f \\n\\ # export EXT=%f \\n\\ # echo $NU $EPS $TKE0 $EXT", "HOME = os.getenv('HOME') f = open('HIT_sbatch','w') f.write('#!/bin/bash -l \\n') if les: f.write('#SBATCH --job-name=LES_HIT", "16) : if relFit(nu, eps) > 100 or relFit(nu, eps) < 20: continue", "#!/usr/bin/env python3 import os, numpy as np, argparse def relFit(nu, eps): return 7.33972668", "action='store_true', help=\"Only print run name.\") parser.set_defaults(printName=False) parser.add_argument('--printOptions', dest='printOptions', action='store_true', help=\"Only print run options.\")", "\\ % (eps, nu, run, cs) else: return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\ % (eps, nu,", "os.system(\"\\ # export NU=%f \\n\\ # export EPS=%f \\n\\ # export TKE0=%f \\n\\", "export NU=%f \\n\\ # export EPS=%f \\n\\ # export TKE0=%f \\n\\ # export", "def lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu); def runspec(nu, eps, run,", "CSS = [], [], [], [] h = 2 * np.pi / 16", "0.008] : # for eps in [0.02, 0.04, 0.08, 0.16, 0.32] : #", "* np.power(eps, (2.0/3.0) ) # for scal in [2, 3] : # tke0", "# export TKE0=%f \\n\\ # export EXT=%f \\n\\ # echo $NU $EPS $TKE0", "> 100 or relFit(nu, eps) < 20: continue if lambdaFit(nu, eps) > 0.1", "-bpdz 12 -CFL 0.02 ' tAnalysis = np.sqrt(nu / eps) return options +", "for i in [0, 1, 2] : NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i], CSS+[les]", "--constraint=gpu \\n') f.write('#SBATCH --account=s929 \\n') f.write('#SBATCH --array=0-%d \\n' % (nCases-1)) #f.write('#SBATCH --partition=normal \\n')", "--printOptions` \\n') else: f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \\n')", "-bpdz 4 -CFL 0.1 ' % cs else: options = '-bpdx 12 -bpdy", "-CFL 0.02 ' tAnalysis = np.sqrt(nu / eps) return options + '-extentx 6.2831853072", "* np.pi / 16 / 12 for nu in np.logspace(np.log10(0.002), np.log10(0.02), 16) :", "1 -tend 100 -keepMomentumConstant 1 ' \\ '-analysis HIT -nu %f -energyInjectionRate %f", "12 for nu in np.logspace(np.log10(0.002), np.log10(0.02), 16) : for eps in np.logspace(np.log10(0.01), np.log10(2.0),", "--account=s929 \\n') f.write('#SBATCH --array=0-%d \\n' % (nCases-1)) #f.write('#SBATCH --partition=normal \\n') #f.write('#SBATCH --ntasks-per-node=1 \\n')", "action='store_true', help=\"Only print run options.\") parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint', dest='launchDaint', action='store_true', help=\"Only print run options.\")", "1 ' \\ '-spectralForcing 1 -tend 100 -keepMomentumConstant 1 ' \\ '-analysis HIT", "-nprocsx 1 -nprocsy 1 -nprocsz 1 ' \\ '-spectralForcing 1 -tend 100 -keepMomentumConstant", "# ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" # % (nu, eps, tke0, ext, scal, eps, nu))", "f.write('#SBATCH --output=out.%j.%a.txt \\n') f.write('#SBATCH --error=err.%j.%a.txt \\n') f.write('#SBATCH --constraint=gpu \\n') f.write('#SBATCH --account=s929 \\n') f.write('#SBATCH", "= \"Compute a target file for RL agent from DNS data.\") parser.add_argument('--printName', dest='printName',", "% (tAnalysis, nu, eps) def launchEuler(nu, eps, run): runname = runspec(nu, eps, run)", "$TKE0 $EXT \\n\\ # ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" # % (nu, eps, tke0, ext,", "def launchDaint(nCases, les): SCRATCH = os.getenv('SCRATCH') HOME = os.getenv('HOME') f = open('HIT_sbatch','w') f.write('#!/bin/bash", "6.2831853072 -dump2D 0 -dump3D 0 ' \\ '-tdump 1 -BC_x periodic -BC_y periodic", "HITurbulence -tAnalysis %f ' \\ '-compute-dissipation 1 -nprocsx 1 -nprocsy 1 -nprocsz 1", "\\n' % SCRATCH) f.write('cd %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cp %s/CubismUP_3D/bin/simulation ./exec \\n' %", "'-bpdx 12 -bpdy 12 -bpdz 12 -CFL 0.02 ' tAnalysis = np.sqrt(nu /", "-tend 100 -keepMomentumConstant 1 ' \\ '-analysis HIT -nu %f -energyInjectionRate %f '", "f.write('export CRAY_CUDA_MPS=1 \\n') f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS} \\n') f.close() os.system('sbatch HIT_sbatch')", "f.write('cd %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cp %s/CubismUP_3D/bin/simulation ./exec \\n' % HOME) f.write('export OMP_NUM_THREADS=12", "else: return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\ % (eps, nu, run) def getSettings(nu, eps, cs): if", "= os.getenv('HOME') f = open('HIT_sbatch','w') f.write('#!/bin/bash -l \\n') if les: f.write('#SBATCH --job-name=LES_HIT \\n')", "--LES --case ${ind} --printOptions` \\n') else: f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --case", ") if args.launchEuler: launchEuler(NUS[i], EPS[i], RUN[i]) #for nu in [0.002, 0.004, 0.008] :", "= '-bpdx 12 -bpdy 12 -bpdz 12 -CFL 0.02 ' tAnalysis = np.sqrt(nu", "os.system(\"export NU=%f \\n export EPS=%f \\n export TANALYSIS=%f \\n \" \\ \"echo $NU", "-keepMomentumConstant 1 ' \\ '-analysis HIT -nu %f -energyInjectionRate %f ' \\ %", "dest='printOptions', action='store_true', help=\"Only print run options.\") parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint', dest='launchDaint', action='store_true', help=\"Only print run", "/ eps) os.system(\"export NU=%f \\n export EPS=%f \\n export TANALYSIS=%f \\n \" \\", "EPS=%f \\n export TANALYSIS=%f \\n \" \\ \"echo $NU $EPS \\n ./launchEuler.sh settingsHIT_DNS.sh", "\\n\\ # echo $NU $EPS $TKE0 $EXT \\n\\ # ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" #", "[0.002, 0.004, 0.008] : # for eps in [0.02, 0.04, 0.08, 0.16, 0.32]", "default = -1, help=\"Simulation case.\") args = parser.parse_args() if args.LES: rangeles = np.linspace(0.16,", "np.linspace(0.16, 0.24, 9) else: rangeles = [None] NUS, EPS, RUN, CSS = [],", "cases = [args.case] for i in cases: if args.printOptions: print( getSettings(NUS[i], EPS[i], CSS[i])", "RL agent from DNS data.\") parser.add_argument('--printName', dest='printName', action='store_true', help=\"Only print run name.\") parser.set_defaults(printName=False)", "--error=err.%j.%a.txt \\n') f.write('#SBATCH --constraint=gpu \\n') f.write('#SBATCH --account=s929 \\n') f.write('#SBATCH --array=0-%d \\n' % (nCases-1))", "< 20: continue if lambdaFit(nu, eps) > 0.1 * 2 * np.pi: continue", "\\n') f.write('ind=$SLURM_ARRAY_TASK_ID \\n') if les: f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --LES", "help=\"Only print run name.\") parser.set_defaults(printName=False) parser.add_argument('--printOptions', dest='printOptions', action='store_true', help=\"Only print run options.\") parser.set_defaults(printOptions=False)", "args.LES: rangeles = np.linspace(0.16, 0.24, 9) else: rangeles = [None] NUS, EPS, RUN,", "\"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\ % (eps, nu, run) def getSettings(nu, eps, cs): if cs is", "%s \" \\ % (nu, eps, tAnalysis, runname) ) def launchDaint(nCases, les): SCRATCH", "eps) > h or etaFit(nu, eps) < h/8: continue for les in rangeles", "%f ' \\ '-compute-dissipation 1 -nprocsx 1 -nprocsy 1 -nprocsz 1 ' \\", "1 --ntasks-per-node=1 ./exec ${OPTIONS} \\n') f.close() os.system('sbatch HIT_sbatch') if __name__ == '__main__': parser", "# for eps in [0.02, 0.04, 0.08, 0.16, 0.32] : # tke0 =", "eps): return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu); def runspec(nu, eps, run, cs): if", "parser = argparse.ArgumentParser( description = \"Compute a target file for RL agent from", "1, 2] : NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i], CSS+[les] nCases = len(NUS) #print('Defined", "les: f.write('#SBATCH --job-name=LES_HIT \\n') else: f.write('#SBATCH --job-name=DNS_HIT \\n') f.write('#SBATCH --time=24:00:00 \\n') f.write('#SBATCH --output=out.%j.%a.txt", "parser.add_argument('--printOptions', dest='printOptions', action='store_true', help=\"Only print run options.\") parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint', dest='launchDaint', action='store_true', help=\"Only print", "run) def getSettings(nu, eps, cs): if cs is not None: options = '-sgs", "= 2.77578963 * np.power(eps, (2.0/3.0) ) # for scal in [2] : #", "print( runspec(NUS[i], EPS[i], RUN[i], CSS[i]) ) if args.launchEuler: launchEuler(NUS[i], EPS[i], RUN[i]) #for nu", "-cs %f -bpdx 4 -bpdy 4 -bpdz 4 -CFL 0.1 ' % cs", "eps) def launchEuler(nu, eps, run): runname = runspec(nu, eps, run) print(runname) tAnalysis =", "* np.pi: continue if etaFit(nu, eps) > h or etaFit(nu, eps) < h/8:", "fromFit -initCond HITurbulence -tAnalysis %f ' \\ '-compute-dissipation 1 -nprocsx 1 -nprocsy 1", "np.power(eps, 1/6.0) / np.sqrt(nu) def etaFit(nu, eps): return np.power(eps, -0.25) * np.power(nu, 0.75)", "#f.write('#SBATCH --ntasks-per-node=1 \\n') f.write('ind=$SLURM_ARRAY_TASK_ID \\n') if les: f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName` \\n')", "% (eps, nu, run, cs) else: return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\ % (eps, nu, run)", "= os.getenv('SCRATCH') HOME = os.getenv('HOME') f = open('HIT_sbatch','w') f.write('#!/bin/bash -l \\n') if les:", "EPS[i], RUN[i], CSS[i]) ) if args.launchEuler: launchEuler(NUS[i], EPS[i], RUN[i]) #for nu in [0.002,", "getSettings(nu, eps, cs): if cs is not None: options = '-sgs SSM -cs", "(nu, eps, tke0, ext, scal, eps, nu)) #for nu in [0.001, 0.002, 0.004,", "les in rangeles : for i in [0, 1, 2] : NUS,EPS,RUN,CSS =", "-0.25) * np.power(nu, 0.75) def lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu);", "tke0 = 2.77578963 * np.power(eps, (2.0/3.0) ) # for scal in [2, 3]", "100 -keepMomentumConstant 1 ' \\ '-analysis HIT -nu %f -energyInjectionRate %f ' \\", "from DNS data.\") parser.add_argument('--printName', dest='printName', action='store_true', help=\"Only print run name.\") parser.set_defaults(printName=False) parser.add_argument('--printOptions', dest='printOptions',", "= 2.77578963 * np.power(eps, (2.0/3.0) ) # for scal in [2, 3] :", "RUN+[i], CSS+[les] nCases = len(NUS) #print('Defined %d cases' % nCases) if args.launchDaint: launchDaint(nCases,", "/ 12 for nu in np.logspace(np.log10(0.002), np.log10(0.02), 16) : for eps in np.logspace(np.log10(0.01),", "0.016] : # for eps in [0.02, 0.04, 0.08, 0.16, 0.32, 0.64] :", "# % (nu, eps, tke0, ext, scal, eps, nu)) #for nu in [0.001,", "\\ % (nu, eps, tAnalysis, runname) ) def launchDaint(nCases, les): SCRATCH = os.getenv('SCRATCH')", "[] h = 2 * np.pi / 16 / 12 for nu in", "in np.logspace(np.log10(0.002), np.log10(0.02), 16) : for eps in np.logspace(np.log10(0.01), np.log10(2.0), 16) : if", "eps, tke0, ext, scal, eps, nu)) #for nu in [0.001, 0.002, 0.004, 0.008,", "nu in [0.002, 0.004, 0.008] : # for eps in [0.02, 0.04, 0.08,", "\\ '-spectralIC fromFit -initCond HITurbulence -tAnalysis %f ' \\ '-compute-dissipation 1 -nprocsx 1", "for eps in np.logspace(np.log10(0.01), np.log10(2.0), 16) : if relFit(nu, eps) > 100 or", "SCRATCH) f.write('cp %s/CubismUP_3D/bin/simulation ./exec \\n' % HOME) f.write('export OMP_NUM_THREADS=12 \\n') f.write('export CRAY_CUDA_MPS=1 \\n')", "' % cs else: options = '-bpdx 12 -bpdy 12 -bpdz 12 -CFL", "${ind} --printOptions` \\n') else: f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions`", "les: f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions` \\n')", "if args.printOptions: print( getSettings(NUS[i], EPS[i], CSS[i]) ) if args.printName: print( runspec(NUS[i], EPS[i], RUN[i],", "${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions` \\n') else: f.write('RUNDIRN=`./launchLESHIT.py --case ${ind}", "-bpdy 4 -bpdz 4 -CFL 0.1 ' % cs else: options = '-bpdx", "eps): return 7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu) def etaFit(nu, eps): return np.power(eps,", "--job-name=LES_HIT \\n') else: f.write('#SBATCH --job-name=DNS_HIT \\n') f.write('#SBATCH --time=24:00:00 \\n') f.write('#SBATCH --output=out.%j.%a.txt \\n') f.write('#SBATCH", "is not None: return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\ % (eps, nu, run, cs) else: return", "-nprocsy 1 -nprocsz 1 ' \\ '-spectralForcing 1 -tend 100 -keepMomentumConstant 1 '", "< h/8: continue for les in rangeles : for i in [0, 1,", "'__main__': parser = argparse.ArgumentParser( description = \"Compute a target file for RL agent", "= range(nCases) else: cases = [args.case] for i in cases: if args.printOptions: print(", "\\n') #f.write('#SBATCH --ntasks-per-node=1 \\n') f.write('ind=$SLURM_ARRAY_TASK_ID \\n') if les: f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName`", "%f -energyInjectionRate %f ' \\ % (tAnalysis, nu, eps) def launchEuler(nu, eps, run):", "eps, run, cs): if cs is not None: return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\ % (eps,", "in [0.02, 0.04, 0.08, 0.16, 0.32] : # tke0 = 2.77578963 * np.power(eps,", "3] : # tke0 = 2.77578963 * np.power(eps, (2.0/3.0) ) # for scal", "if __name__ == '__main__': parser = argparse.ArgumentParser( description = \"Compute a target file", "#print('Defined %d cases' % nCases) if args.launchDaint: launchDaint(nCases, args.LES) if args.case < 0:", "-BC_y periodic -BC_z periodic ' \\ '-spectralIC fromFit -initCond HITurbulence -tAnalysis %f '", "> 0.1 * 2 * np.pi: continue if etaFit(nu, eps) > h or", "9) else: rangeles = [None] NUS, EPS, RUN, CSS = [], [], [],", "'-spectralForcing 1 -tend 100 -keepMomentumConstant 1 ' \\ '-analysis HIT -nu %f -energyInjectionRate", "EPS[i], RUN[i]) #for nu in [0.002, 0.004, 0.008] : # for eps in", "parser.add_argument('--printName', dest='printName', action='store_true', help=\"Only print run name.\") parser.set_defaults(printName=False) parser.add_argument('--printOptions', dest='printOptions', action='store_true', help=\"Only print", "dest='LES', action='store_true', help=\"Triggers LES modeling.\") parser.set_defaults(LES=False) parser.add_argument('--case', type = int, default = -1,", "* np.sqrt(nu); def runspec(nu, eps, run, cs): if cs is not None: return", "--ntasks-per-node=1 ./exec ${OPTIONS} \\n') f.close() os.system('sbatch HIT_sbatch') if __name__ == '__main__': parser =", "cases = range(nCases) else: cases = [args.case] for i in cases: if args.printOptions:", "# tke0 = 2.77578963 * np.power(eps, (2.0/3.0) ) # for scal in [2,", "file for RL agent from DNS data.\") parser.add_argument('--printName', dest='printName', action='store_true', help=\"Only print run", "1 -nprocsz 1 ' \\ '-spectralForcing 1 -tend 100 -keepMomentumConstant 1 ' \\", "% cs else: options = '-bpdx 12 -bpdy 12 -bpdz 12 -CFL 0.02", "f.write('#SBATCH --error=err.%j.%a.txt \\n') f.write('#SBATCH --constraint=gpu \\n') f.write('#SBATCH --account=s929 \\n') f.write('#SBATCH --array=0-%d \\n' %", "\\n \" \\ \"echo $NU $EPS \\n ./launchEuler.sh settingsHIT_DNS.sh %s \" \\ %", "not None: options = '-sgs SSM -cs %f -bpdx 4 -bpdy 4 -bpdz", "* np.pi # os.system(\"\\ # export NU=%f \\n\\ # export EPS=%f \\n\\ #", "eps): return np.power(eps, -0.25) * np.power(nu, 0.75) def lambdaFit(nu, eps): return 5.35507603 *", "${ind} --printOptions` \\n') f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cd %s/CubismUP3D/${RUNDIRN} \\n' %", "-nprocsz 1 ' \\ '-spectralForcing 1 -tend 100 -keepMomentumConstant 1 ' \\ '-analysis", "\"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\ % (eps, nu, run, cs) else: return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\" \\ % (eps,", "% HOME) f.write('export OMP_NUM_THREADS=12 \\n') f.write('export CRAY_CUDA_MPS=1 \\n') f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec", "\\ '-tdump 1 -BC_x periodic -BC_y periodic -BC_z periodic ' \\ '-spectralIC fromFit", "--time=24:00:00 \\n') f.write('#SBATCH --output=out.%j.%a.txt \\n') f.write('#SBATCH --error=err.%j.%a.txt \\n') f.write('#SBATCH --constraint=gpu \\n') f.write('#SBATCH --account=s929", "runspec(NUS[i], EPS[i], RUN[i], CSS[i]) ) if args.launchEuler: launchEuler(NUS[i], EPS[i], RUN[i]) #for nu in", "\\ '-analysis HIT -nu %f -energyInjectionRate %f ' \\ % (tAnalysis, nu, eps)", "--output=out.%j.%a.txt \\n') f.write('#SBATCH --error=err.%j.%a.txt \\n') f.write('#SBATCH --constraint=gpu \\n') f.write('#SBATCH --account=s929 \\n') f.write('#SBATCH --array=0-%d", "$EXT \\n\\ # ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" # % (nu, eps, tke0, ext, scal,", "% SCRATCH) f.write('cd %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cp %s/CubismUP_3D/bin/simulation ./exec \\n' % HOME)", "print( getSettings(NUS[i], EPS[i], CSS[i]) ) if args.printName: print( runspec(NUS[i], EPS[i], RUN[i], CSS[i]) )", "' \\ '-spectralIC fromFit -initCond HITurbulence -tAnalysis %f ' \\ '-compute-dissipation 1 -nprocsx", "'-sgs SSM -cs %f -bpdx 4 -bpdy 4 -bpdz 4 -CFL 0.1 '", "${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \\n') f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \\n' %", "= parser.parse_args() if args.LES: rangeles = np.linspace(0.16, 0.24, 9) else: rangeles = [None]", "--array=0-%d \\n' % (nCases-1)) #f.write('#SBATCH --partition=normal \\n') #f.write('#SBATCH --ntasks-per-node=1 \\n') f.write('ind=$SLURM_ARRAY_TASK_ID \\n') if", "rangeles = np.linspace(0.16, 0.24, 9) else: rangeles = [None] NUS, EPS, RUN, CSS", "continue if lambdaFit(nu, eps) > 0.1 * 2 * np.pi: continue if etaFit(nu,", "-BC_z periodic ' \\ '-spectralIC fromFit -initCond HITurbulence -tAnalysis %f ' \\ '-compute-dissipation", ": if relFit(nu, eps) > 100 or relFit(nu, eps) < 20: continue if", "eps) < 20: continue if lambdaFit(nu, eps) > 0.1 * 2 * np.pi:", "\\n\\ # export TKE0=%f \\n\\ # export EXT=%f \\n\\ # echo $NU $EPS", "dest='launchEuler', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchEuler=False) parser.add_argument('--LES', dest='LES', action='store_true', help=\"Triggers LES modeling.\")", "\\n') f.write('#SBATCH --time=24:00:00 \\n') f.write('#SBATCH --output=out.%j.%a.txt \\n') f.write('#SBATCH --error=err.%j.%a.txt \\n') f.write('#SBATCH --constraint=gpu \\n')", "launchEuler(NUS[i], EPS[i], RUN[i]) #for nu in [0.002, 0.004, 0.008] : # for eps", "target file for RL agent from DNS data.\") parser.add_argument('--printName', dest='printName', action='store_true', help=\"Only print", "\\ \"echo $NU $EPS \\n ./launchEuler.sh settingsHIT_DNS.sh %s \" \\ % (nu, eps,", ": NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i], CSS+[les] nCases = len(NUS) #print('Defined %d cases'", "nCases) if args.launchDaint: launchDaint(nCases, args.LES) if args.case < 0: cases = range(nCases) else:", "' \\ '-spectralForcing 1 -tend 100 -keepMomentumConstant 1 ' \\ '-analysis HIT -nu", "0.08, 0.16, 0.32] : # tke0 = 2.77578963 * np.power(eps, (2.0/3.0) ) #", "\\n') f.write('#SBATCH --array=0-%d \\n' % (nCases-1)) #f.write('#SBATCH --partition=normal \\n') #f.write('#SBATCH --ntasks-per-node=1 \\n') f.write('ind=$SLURM_ARRAY_TASK_ID", "[2] : # ext = scal * np.pi # os.system(\"\\ # export NU=%f", "parser.set_defaults(printName=False) parser.add_argument('--printOptions', dest='printOptions', action='store_true', help=\"Only print run options.\") parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint', dest='launchDaint', action='store_true', help=\"Only", "16) : for eps in np.logspace(np.log10(0.01), np.log10(2.0), 16) : if relFit(nu, eps) >", "tAnalysis = np.sqrt(nu / eps) return options + '-extentx 6.2831853072 -dump2D 0 -dump3D", "run options.\") parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler', dest='launchEuler', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchEuler=False) parser.add_argument('--LES', dest='LES',", "16 / 12 for nu in np.logspace(np.log10(0.002), np.log10(0.02), 16) : for eps in", "return np.power(eps, -0.25) * np.power(nu, 0.75) def lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0)", "[0, 1, 2] : NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i], CSS+[les] nCases = len(NUS)", "relFit(nu, eps) < 20: continue if lambdaFit(nu, eps) > 0.1 * 2 *", "not None: return \"HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d\" \\ % (eps, nu, run, cs) else: return \"HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d\"", "% nCases) if args.launchDaint: launchDaint(nCases, args.LES) if args.case < 0: cases = range(nCases)", "if args.launchEuler: launchEuler(NUS[i], EPS[i], RUN[i]) #for nu in [0.002, 0.004, 0.008] : #", "print run options.\") parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler', dest='launchEuler', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchEuler=False) parser.add_argument('--LES',", "for i in cases: if args.printOptions: print( getSettings(NUS[i], EPS[i], CSS[i]) ) if args.printName:", "eps) > 0.1 * 2 * np.pi: continue if etaFit(nu, eps) > h", "\\n') if les: f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName` \\n') f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind}", "parser.add_argument('--launchEuler', dest='launchEuler', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchEuler=False) parser.add_argument('--LES', dest='LES', action='store_true', help=\"Triggers LES", "\" \\ \"echo $NU $EPS \\n ./launchEuler.sh settingsHIT_DNS.sh %s \" \\ % (nu,", "EXT=%f \\n\\ # echo $NU $EPS $TKE0 $EXT \\n\\ # ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\"", "parser.set_defaults(launchDaint=False) parser.add_argument('--launchEuler', dest='launchEuler', action='store_true', help=\"Only print run options.\") parser.set_defaults(launchEuler=False) parser.add_argument('--LES', dest='LES', action='store_true', help=\"Triggers", "0.008, 0.016] : # for eps in [0.02, 0.04, 0.08, 0.16, 0.32, 0.64]", "rangeles : for i in [0, 1, 2] : NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps],", "eps, run): runname = runspec(nu, eps, run) print(runname) tAnalysis = np.sqrt(nu / eps)", "12 -bpdy 12 -bpdz 12 -CFL 0.02 ' tAnalysis = np.sqrt(nu / eps)", "\\n') f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS} \\n') f.close() os.system('sbatch HIT_sbatch') if __name__", "les): SCRATCH = os.getenv('SCRATCH') HOME = os.getenv('HOME') f = open('HIT_sbatch','w') f.write('#!/bin/bash -l \\n')", "1/6.0) / np.sqrt(nu) def etaFit(nu, eps): return np.power(eps, -0.25) * np.power(nu, 0.75) def", "$EPS \\n ./launchEuler.sh settingsHIT_DNS.sh %s \" \\ % (nu, eps, tAnalysis, runname) )", "eps) return options + '-extentx 6.2831853072 -dump2D 0 -dump3D 0 ' \\ '-tdump", "\\n export EPS=%f \\n export TANALYSIS=%f \\n \" \\ \"echo $NU $EPS \\n", "tke0, ext, scal, eps, nu)) #for nu in [0.001, 0.002, 0.004, 0.008, 0.016]", "eps) os.system(\"export NU=%f \\n export EPS=%f \\n export TANALYSIS=%f \\n \" \\ \"echo", "-dump3D 0 ' \\ '-tdump 1 -BC_x periodic -BC_y periodic -BC_z periodic '", "= np.sqrt(nu / eps) return options + '-extentx 6.2831853072 -dump2D 0 -dump3D 0", "None: options = '-sgs SSM -cs %f -bpdx 4 -bpdy 4 -bpdz 4", "periodic ' \\ '-spectralIC fromFit -initCond HITurbulence -tAnalysis %f ' \\ '-compute-dissipation 1", "CSS+[les] nCases = len(NUS) #print('Defined %d cases' % nCases) if args.launchDaint: launchDaint(nCases, args.LES)", "nu, run) def getSettings(nu, eps, cs): if cs is not None: options =", "in [0.002, 0.004, 0.008] : # for eps in [0.02, 0.04, 0.08, 0.16,", "%d cases' % nCases) if args.launchDaint: launchDaint(nCases, args.LES) if args.case < 0: cases", "100 or relFit(nu, eps) < 20: continue if lambdaFit(nu, eps) > 0.1 *", "i in [0, 1, 2] : NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i], CSS+[les] nCases", "$EPS $TKE0 $EXT \\n\\ # ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" # % (nu, eps, tke0,", "in [0.001, 0.002, 0.004, 0.008, 0.016] : # for eps in [0.02, 0.04,", "i in cases: if args.printOptions: print( getSettings(NUS[i], EPS[i], CSS[i]) ) if args.printName: print(", "dest='printName', action='store_true', help=\"Only print run name.\") parser.set_defaults(printName=False) parser.add_argument('--printOptions', dest='printOptions', action='store_true', help=\"Only print run", "= -1, help=\"Simulation case.\") args = parser.parse_args() if args.LES: rangeles = np.linspace(0.16, 0.24,", "run name.\") parser.set_defaults(printName=False) parser.add_argument('--printOptions', dest='printOptions', action='store_true', help=\"Only print run options.\") parser.set_defaults(printOptions=False) parser.add_argument('--launchDaint', dest='launchDaint',", "open('HIT_sbatch','w') f.write('#!/bin/bash -l \\n') if les: f.write('#SBATCH --job-name=LES_HIT \\n') else: f.write('#SBATCH --job-name=DNS_HIT \\n')", "for RL agent from DNS data.\") parser.add_argument('--printName', dest='printName', action='store_true', help=\"Only print run name.\")", "/ np.sqrt(nu) def etaFit(nu, eps): return np.power(eps, -0.25) * np.power(nu, 0.75) def lambdaFit(nu,", "parser.parse_args() if args.LES: rangeles = np.linspace(0.16, 0.24, 9) else: rangeles = [None] NUS,", "./exec \\n' % HOME) f.write('export OMP_NUM_THREADS=12 \\n') f.write('export CRAY_CUDA_MPS=1 \\n') f.write('srun --ntasks 1", "launchDaint(nCases, les): SCRATCH = os.getenv('SCRATCH') HOME = os.getenv('HOME') f = open('HIT_sbatch','w') f.write('#!/bin/bash -l", "%s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cd %s/CubismUP3D/${RUNDIRN} \\n' % SCRATCH) f.write('cp %s/CubismUP_3D/bin/simulation ./exec \\n'", "def etaFit(nu, eps): return np.power(eps, -0.25) * np.power(nu, 0.75) def lambdaFit(nu, eps): return", "RUN, CSS = [], [], [], [] h = 2 * np.pi /", "$NU $EPS $TKE0 $EXT \\n\\ # ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f\" # % (nu, eps,", "0.004, 0.008, 0.016] : # for eps in [0.02, 0.04, 0.08, 0.16, 0.32,", "[0.02, 0.04, 0.08, 0.16, 0.32] : # tke0 = 2.77578963 * np.power(eps, (2.0/3.0)", "etaFit(nu, eps) > h or etaFit(nu, eps) < h/8: continue for les in", "' tAnalysis = np.sqrt(nu / eps) return options + '-extentx 6.2831853072 -dump2D 0" ]
[ "x in enumerate(score_vector[1:], 1): if max_ending_here < 0: max_ending_here = x begin_temp =", "def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): # extract subarrays for subarray in", "(begin,end) of subarray in each element begin, end = compute_maximum_subarray(score_vector=score) # check that", "= -1 for x in alist: if x > 0: minpos = x", "< min_subarray_size - 1: break else: # extract maximum subarray # NOTE: in", "extract subarrays for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin): yield subarray", "position are identical assert(sum([1 for label in labels if label == labels[0]]) ==", "as keys and lists of ids as values pos_to_ids = defaultdict(list) for u", "if output == 'minimal': subarray = {'subarray_string': ''.join(subarray)} else: subarray = {'subarray_string': ''.join(subarray),", "margin=margin): yield subarray def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): seq, score = extract_sequence_and_score(graph)", "print_function from collections import defaultdict import numpy as np def find_smallest_positive(alist): # find", "1): if max_ending_here < 0: max_ending_here = x begin_temp = pos else: max_ending_here", "min(len(seq), end + margin + 1) subarray = seq[first: last] subarray_size = len(subarray)", "%s' % (pos, labels) seq[pos] = labels[0] # average all importance score for", "if score is None: break else: # remove current subarray by zeroing importance", "from __future__ import division from __future__ import print_function from collections import defaultdict import", "graph, use the vertex id instead raise Exception('Missing \"position\" attribute in node:%s %s'", "''.join(subarray)} else: subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first, 'end': last, 'size':", "absolute_import from __future__ import division from __future__ import print_function from collections import defaultdict", "larger than min_subarray_size if end - begin < min_subarray_size - 1: break else:", "values of subarray score[first: last] = [0.0] * subarray_size # iterate after removal", "referring to same position: %s %s' % (pos, labels) seq[pos] = labels[0] #", "subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin): yield subarray def compute_max_subarrays(graph=None, min_subarray_size=None,", "node ids pos_to_ids[pos] += [u] # extract sequence of labels and importances seq", "* len(pos_to_ids) for pos in sorted(pos_to_ids): ids = pos_to_ids[pos] labels = [graph.node[u].get('label', 'N/A')", "= score while True: # find (begin,end) of subarray in each element begin,", "= seq[first: last] subarray_size = len(subarray) if max_subarray_size == -1 or subarray_size <=", "0: minpos = x break if minpos > 0: # find smallest positive", "is None: break else: # remove current subarray by zeroing importance values of", "in ids] # check that all labels for the same position are identical", "'position' not in graph.node[u]: # no position attributes in graph, use the vertex", "non identical labels referring to same position: %s %s' % (pos, labels) seq[pos]", "the rightmost postition to be compliant with the 'one after the end' semantics", "= compute_maximum_subarray(score_vector=score) # check that the retrieved subarray is larger than min_subarray_size if", "first = max(0, begin - margin) # NOTE: we return + 1 for", "value score = rebase_to_smallest_positive(score) if score is None: break else: # remove current", "# if the subarray is too large then rebase the score list, i.e.", "last, 'size': subarray_size, 'seq': seq, 'score': acc} yield subarray if subarray_size > max_subarray_size:", "with positions as keys and lists of ids as values pos_to_ids = defaultdict(list)", "if end - begin < min_subarray_size - 1: break else: # extract maximum", "= pos_to_ids[pos] labels = [graph.node[u].get('label', 'N/A') for u in ids] # check that", "pos else: max_ending_here = max_ending_here + x if max_ending_here > max_so_far: max_so_far =", "x begin_temp = pos else: max_ending_here = max_ending_here + x if max_ending_here >", "# no position attributes in graph, use the vertex id instead raise Exception('Missing", "for the same position importances = [graph.node[u].get('importance', 0) for u in ids] score[pos]", "in order to account for border effects we expand on the left and", "minpos = -1 for x in alist: if x > 0: minpos =", "), 'ERROR: non identical labels referring to same position: %s %s' % (pos,", "= x break if minpos > 0: # find smallest positive value for", "for u in ids] score[pos] = np.mean(importances) return seq, score def compute_max_subarrays_sequence(seq=None, score=None,", "compliant with the 'one after the end' semantics last = min(len(seq), end +", "graph.nodes(): if 'position' not in graph.node[u]: # no position attributes in graph, use", "\"position\" attribute in node:%s %s' % (u, graph.node[u])) else: pos = graph.node[u]['position'] #", "> 0 and x < minpos: minpos = x return minpos def rebase_to_smallest_positive(alist):", "for label in labels if label == labels[0]]) == len(labels) ), 'ERROR: non", "importance score for the same position importances = [graph.node[u].get('importance', 0) for u in", "begin - margin) # NOTE: we return + 1 for the rightmost postition", "assert(sum([1 for label in labels if label == labels[0]]) == len(labels) ), 'ERROR:", "seq[first: last] subarray_size = len(subarray) if max_subarray_size == -1 or subarray_size <= max_subarray_size:", "of labels and importances seq = [None] * len(pos_to_ids) score = [0] *", "<= max_subarray_size: # store data acc = 0 for x in original_score[begin: end", "margin=1): seq, score = extract_sequence_and_score(graph) for subarray in compute_max_subarrays_sequence(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output,", "max(0, begin - margin) # NOTE: we return + 1 for the rightmost", "= start_val for pos, x in enumerate(score_vector[1:], 1): if max_ending_here < 0: max_ending_here", "acc} yield subarray if subarray_size > max_subarray_size: # if the subarray is too", "the score list, i.e. offset by the smallest positive value score = rebase_to_smallest_positive(score)", "labels if label == labels[0]]) == len(labels) ), 'ERROR: non identical labels referring", "if x > 0 and x < minpos: minpos = x return minpos", "x in alist] def compute_maximum_subarray(score_vector=None): begin_temp = begin = end = 0 start_val", "else: # remove current subarray by zeroing importance values of subarray score[first: last]", "max_subarray_size=None, output='minimal', margin=1): seq, score = extract_sequence_and_score(graph) for subarray in compute_max_subarrays_sequence(seq=seq, score=score, min_subarray_size=min_subarray_size,", "* subarray_size # iterate after removal of current subarray def extract_sequence_and_score(graph=None): # make", "= pos return begin, end def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): original_score", "'end': last, 'size': subarray_size, 'seq': seq, 'score': acc} yield subarray if subarray_size >", "[None] * len(pos_to_ids) score = [0] * len(pos_to_ids) for pos in sorted(pos_to_ids): ids", "sorted(pos_to_ids): ids = pos_to_ids[pos] labels = [graph.node[u].get('label', 'N/A') for u in ids] #", "score for the same position importances = [graph.node[u].get('importance', 0) for u in ids]", "end = compute_maximum_subarray(score_vector=score) # check that the retrieved subarray is larger than min_subarray_size", "= [graph.node[u].get('importance', 0) for u in ids] score[pos] = np.mean(importances) return seq, score", "numpy as np def find_smallest_positive(alist): # find first positive value minpos = -1", "= x return minpos def rebase_to_smallest_positive(alist): base = find_smallest_positive(alist) if base == -1:", "for pos in sorted(pos_to_ids): ids = pos_to_ids[pos] labels = [graph.node[u].get('label', 'N/A') for u", "on the left and on the right by 'margin' first = max(0, begin", "pos_to_ids[pos] += [u] # extract sequence of labels and importances seq = [None]", "last] = [0.0] * subarray_size # iterate after removal of current subarray def", "smallest positive value for x in alist: if x > 0 and x", "- begin < min_subarray_size - 1: break else: # extract maximum subarray #", "graph.node[u])) else: pos = graph.node[u]['position'] # accumulate all node ids pos_to_ids[pos] += [u]", "[x - base for x in alist] def compute_maximum_subarray(score_vector=None): begin_temp = begin =", "the same position importances = [graph.node[u].get('importance', 0) for u in ids] score[pos] =", "start_val = score_vector[0] max_ending_here = max_so_far = start_val for pos, x in enumerate(score_vector[1:],", "end - begin < min_subarray_size - 1: break else: # extract maximum subarray", "in alist: if x > 0: minpos = x break if minpos >", "break if minpos > 0: # find smallest positive value for x in", "yield subarray if subarray_size > max_subarray_size: # if the subarray is too large", "defaultdict import numpy as np def find_smallest_positive(alist): # find first positive value minpos", "'N/A') for u in ids] # check that all labels for the same", "import defaultdict import numpy as np def find_smallest_positive(alist): # find first positive value", "while True: # find (begin,end) of subarray in each element begin, end =", "> 0: # find smallest positive value for x in alist: if x", "label in labels if label == labels[0]]) == len(labels) ), 'ERROR: non identical", "seq, score def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): # extract subarrays for", "position attributes in graph, use the vertex id instead raise Exception('Missing \"position\" attribute", "score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): original_score = score while True: # find (begin,end)", "min_subarray_size if end - begin < min_subarray_size - 1: break else: # extract", "subarray_size > max_subarray_size: # if the subarray is too large then rebase the", "def find_smallest_positive(alist): # find first positive value minpos = -1 for x in", "rebase_to_smallest_positive(score) if score is None: break else: # remove current subarray by zeroing", "= score_vector[0] max_ending_here = max_so_far = start_val for pos, x in enumerate(score_vector[1:], 1):", "max_subarray_size=max_subarray_size, output=output, margin=margin): yield subarray def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): seq, score", "in each element begin, end = compute_maximum_subarray(score_vector=score) # check that the retrieved subarray", "compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): original_score = score while True: # find", "return minpos def rebase_to_smallest_positive(alist): base = find_smallest_positive(alist) if base == -1: return None", "def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): seq, score = extract_sequence_and_score(graph) for subarray in", "for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin): yield subarray def compute_max_subarrays(graph=None,", "in sorted(pos_to_ids): ids = pos_to_ids[pos] labels = [graph.node[u].get('label', 'N/A') for u in ids]", "subarray by zeroing importance values of subarray score[first: last] = [0.0] * subarray_size", "ids] score[pos] = np.mean(importances) return seq, score def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal',", "= x begin_temp = pos else: max_ending_here = max_ending_here + x if max_ending_here", "= max_ending_here begin = begin_temp end = pos return begin, end def compute_iterated_maximum_subarray(seq=None,", "labels[0]]) == len(labels) ), 'ERROR: non identical labels referring to same position: %s", "= 0 start_val = score_vector[0] max_ending_here = max_so_far = start_val for pos, x", "0: # find smallest positive value for x in alist: if x >", "positive value minpos = -1 for x in alist: if x > 0:", "seq, score = extract_sequence_and_score(graph) for subarray in compute_max_subarrays_sequence(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin):", "first, 'end': last, 'size': subarray_size, 'seq': seq, 'score': acc} yield subarray if subarray_size", "= {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first, 'end': last, 'size': subarray_size, 'seq': seq,", "'margin' first = max(0, begin - margin) # NOTE: we return + 1", "= [None] * len(pos_to_ids) score = [0] * len(pos_to_ids) for pos in sorted(pos_to_ids):", "max_ending_here < 0: max_ending_here = x begin_temp = pos else: max_ending_here = max_ending_here", "if 'position' not in graph.node[u]: # no position attributes in graph, use the", "rebase_to_smallest_positive(alist): base = find_smallest_positive(alist) if base == -1: return None else: return [x", "as values pos_to_ids = defaultdict(list) for u in graph.nodes(): if 'position' not in", "is too large then rebase the score list, i.e. offset by the smallest", "x in alist: if x > 0 and x < minpos: minpos =", "begin, end = compute_maximum_subarray(score_vector=score) # check that the retrieved subarray is larger than", "that all labels for the same position are identical assert(sum([1 for label in", "= np.mean(importances) return seq, score def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): #", "first positive value minpos = -1 for x in alist: if x >", "+= x if output == 'minimal': subarray = {'subarray_string': ''.join(subarray)} else: subarray =", "keys and lists of ids as values pos_to_ids = defaultdict(list) for u in", "graph.node[u]['position'] # accumulate all node ids pos_to_ids[pos] += [u] # extract sequence of", "end def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): original_score = score while True:", "begin = end = 0 start_val = score_vector[0] max_ending_here = max_so_far = start_val", "NOTE: in order to account for border effects we expand on the left", "all labels for the same position are identical assert(sum([1 for label in labels", "x in alist: if x > 0: minpos = x break if minpos", "seq, 'score': acc} yield subarray if subarray_size > max_subarray_size: # if the subarray", "max_subarray_size=None, output='minimal', margin=1): original_score = score while True: # find (begin,end) of subarray", "is larger than min_subarray_size if end - begin < min_subarray_size - 1: break", "be compliant with the 'one after the end' semantics last = min(len(seq), end", "subarray if subarray_size > max_subarray_size: # if the subarray is too large then", "instead raise Exception('Missing \"position\" attribute in node:%s %s' % (u, graph.node[u])) else: pos", "= [0] * len(pos_to_ids) for pos in sorted(pos_to_ids): ids = pos_to_ids[pos] labels =", "min_subarray_size - 1: break else: # extract maximum subarray # NOTE: in order", "% (pos, labels) seq[pos] = labels[0] # average all importance score for the", "attributes in graph, use the vertex id instead raise Exception('Missing \"position\" attribute in", "score is None: break else: # remove current subarray by zeroing importance values", "= extract_sequence_and_score(graph) for subarray in compute_max_subarrays_sequence(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin): yield subarray", "graph.node[u]: # no position attributes in graph, use the vertex id instead raise", "identical labels referring to same position: %s %s' % (pos, labels) seq[pos] =", "by 'margin' first = max(0, begin - margin) # NOTE: we return +", "sequence of labels and importances seq = [None] * len(pos_to_ids) score = [0]", "return seq, score def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): # extract subarrays", "current subarray def extract_sequence_and_score(graph=None): # make dict with positions as keys and lists", "if x > 0: minpos = x break if minpos > 0: #", "len(pos_to_ids) score = [0] * len(pos_to_ids) for pos in sorted(pos_to_ids): ids = pos_to_ids[pos]", "for x in original_score[begin: end + 1]: acc += x if output ==", "'begin': first, 'end': last, 'size': subarray_size, 'seq': seq, 'score': acc} yield subarray if", "iterate after removal of current subarray def extract_sequence_and_score(graph=None): # make dict with positions", "__future__ import absolute_import from __future__ import division from __future__ import print_function from collections", "score while True: # find (begin,end) of subarray in each element begin, end", "# extract maximum subarray # NOTE: in order to account for border effects", "__future__ import division from __future__ import print_function from collections import defaultdict import numpy", "== len(labels) ), 'ERROR: non identical labels referring to same position: %s %s'", "len(labels) ), 'ERROR: non identical labels referring to same position: %s %s' %", "= 0 for x in original_score[begin: end + 1]: acc += x if", "score = extract_sequence_and_score(graph) for subarray in compute_max_subarrays_sequence(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin): yield", "subarray score[first: last] = [0.0] * subarray_size # iterate after removal of current", "1 for the rightmost postition to be compliant with the 'one after the", "base == -1: return None else: return [x - base for x in", "all importance score for the same position importances = [graph.node[u].get('importance', 0) for u", "- base for x in alist] def compute_maximum_subarray(score_vector=None): begin_temp = begin = end", "# extract subarrays for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin): yield", "= rebase_to_smallest_positive(score) if score is None: break else: # remove current subarray by", "score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin): yield subarray def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):", "the right by 'margin' first = max(0, begin - margin) # NOTE: we", "subarray is larger than min_subarray_size if end - begin < min_subarray_size - 1:", "score list, i.e. offset by the smallest positive value score = rebase_to_smallest_positive(score) if", "in node:%s %s' % (u, graph.node[u])) else: pos = graph.node[u]['position'] # accumulate all", "min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): # extract subarrays for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size,", "min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): original_score = score while True: # find (begin,end) of", "= [0.0] * subarray_size # iterate after removal of current subarray def extract_sequence_and_score(graph=None):", "if minpos > 0: # find smallest positive value for x in alist:", "[u] # extract sequence of labels and importances seq = [None] * len(pos_to_ids)", "in graph.node[u]: # no position attributes in graph, use the vertex id instead", "ids] # check that all labels for the same position are identical assert(sum([1", "position: %s %s' % (pos, labels) seq[pos] = labels[0] # average all importance", "dict with positions as keys and lists of ids as values pos_to_ids =", "in graph.nodes(): if 'position' not in graph.node[u]: # no position attributes in graph,", "# remove current subarray by zeroing importance values of subarray score[first: last] =", "average all importance score for the same position importances = [graph.node[u].get('importance', 0) for", "from __future__ import print_function from collections import defaultdict import numpy as np def", "None: break else: # remove current subarray by zeroing importance values of subarray", "begin_temp = begin = end = 0 start_val = score_vector[0] max_ending_here = max_so_far", "end = 0 start_val = score_vector[0] max_ending_here = max_so_far = start_val for pos,", "check that all labels for the same position are identical assert(sum([1 for label", "positions as keys and lists of ids as values pos_to_ids = defaultdict(list) for", "not in graph.node[u]: # no position attributes in graph, use the vertex id", "start_val for pos, x in enumerate(score_vector[1:], 1): if max_ending_here < 0: max_ending_here =", "attribute in node:%s %s' % (u, graph.node[u])) else: pos = graph.node[u]['position'] # accumulate", "subarray in each element begin, end = compute_maximum_subarray(score_vector=score) # check that the retrieved", "begin < min_subarray_size - 1: break else: # extract maximum subarray # NOTE:", "in alist] def compute_maximum_subarray(score_vector=None): begin_temp = begin = end = 0 start_val =", "(u, graph.node[u])) else: pos = graph.node[u]['position'] # accumulate all node ids pos_to_ids[pos] +=", "'score': acc} yield subarray if subarray_size > max_subarray_size: # if the subarray is", "'subarray': subarray, 'begin': first, 'end': last, 'size': subarray_size, 'seq': seq, 'score': acc} yield", "break else: # remove current subarray by zeroing importance values of subarray score[first:", "x in original_score[begin: end + 1]: acc += x if output == 'minimal':", "# make dict with positions as keys and lists of ids as values", "subarray, 'begin': first, 'end': last, 'size': subarray_size, 'seq': seq, 'score': acc} yield subarray", "output='minimal', margin=1): # extract subarrays for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output,", "< 0: max_ending_here = x begin_temp = pos else: max_ending_here = max_ending_here +", "end + margin + 1) subarray = seq[first: last] subarray_size = len(subarray) if", "u in ids] # check that all labels for the same position are", "1) subarray = seq[first: last] subarray_size = len(subarray) if max_subarray_size == -1 or", "minpos = x return minpos def rebase_to_smallest_positive(alist): base = find_smallest_positive(alist) if base ==", "0 for x in original_score[begin: end + 1]: acc += x if output", "the end' semantics last = min(len(seq), end + margin + 1) subarray =", "subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first, 'end': last, 'size': subarray_size, 'seq':", "> max_subarray_size: # if the subarray is too large then rebase the score", "subarray_size = len(subarray) if max_subarray_size == -1 or subarray_size <= max_subarray_size: # store", "and x < minpos: minpos = x return minpos def rebase_to_smallest_positive(alist): base =", "x if max_ending_here > max_so_far: max_so_far = max_ending_here begin = begin_temp end =", "%s' % (u, graph.node[u])) else: pos = graph.node[u]['position'] # accumulate all node ids", "max_so_far = start_val for pos, x in enumerate(score_vector[1:], 1): if max_ending_here < 0:", "len(pos_to_ids) for pos in sorted(pos_to_ids): ids = pos_to_ids[pos] labels = [graph.node[u].get('label', 'N/A') for", "max_ending_here + x if max_ending_here > max_so_far: max_so_far = max_ending_here begin = begin_temp", "in labels if label == labels[0]]) == len(labels) ), 'ERROR: non identical labels", "acc = 0 for x in original_score[begin: end + 1]: acc += x", "raise Exception('Missing \"position\" attribute in node:%s %s' % (u, graph.node[u])) else: pos =", "or subarray_size <= max_subarray_size: # store data acc = 0 for x in", "0: max_ending_here = x begin_temp = pos else: max_ending_here = max_ending_here + x", "the smallest positive value score = rebase_to_smallest_positive(score) if score is None: break else:", "if label == labels[0]]) == len(labels) ), 'ERROR: non identical labels referring to", "labels) seq[pos] = labels[0] # average all importance score for the same position", "def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): original_score = score while True: #", "offset by the smallest positive value score = rebase_to_smallest_positive(score) if score is None:", "last = min(len(seq), end + margin + 1) subarray = seq[first: last] subarray_size", "for u in graph.nodes(): if 'position' not in graph.node[u]: # no position attributes", "i.e. offset by the smallest positive value score = rebase_to_smallest_positive(score) if score is", "on the right by 'margin' first = max(0, begin - margin) # NOTE:", "current subarray by zeroing importance values of subarray score[first: last] = [0.0] *", "that the retrieved subarray is larger than min_subarray_size if end - begin <", "compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): # extract subarrays for subarray in compute_iterated_maximum_subarray(seq=seq,", "right by 'margin' first = max(0, begin - margin) # NOTE: we return", "else: # extract maximum subarray # NOTE: in order to account for border", "def extract_sequence_and_score(graph=None): # make dict with positions as keys and lists of ids", "> 0: minpos = x break if minpos > 0: # find smallest", "enumerate(score_vector[1:], 1): if max_ending_here < 0: max_ending_here = x begin_temp = pos else:", "the left and on the right by 'margin' first = max(0, begin -", "to account for border effects we expand on the left and on the", "-1 or subarray_size <= max_subarray_size: # store data acc = 0 for x", "+ margin + 1) subarray = seq[first: last] subarray_size = len(subarray) if max_subarray_size", "removal of current subarray def extract_sequence_and_score(graph=None): # make dict with positions as keys", "'seq': seq, 'score': acc} yield subarray if subarray_size > max_subarray_size: # if the", "to be compliant with the 'one after the end' semantics last = min(len(seq),", "subarray_size # iterate after removal of current subarray def extract_sequence_and_score(graph=None): # make dict", "begin = begin_temp end = pos return begin, end def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None,", "- 1: break else: # extract maximum subarray # NOTE: in order to", "zeroing importance values of subarray score[first: last] = [0.0] * subarray_size # iterate", "left and on the right by 'margin' first = max(0, begin - margin)", "= max(0, begin - margin) # NOTE: we return + 1 for the", "the subarray is too large then rebase the score list, i.e. offset by", "the vertex id instead raise Exception('Missing \"position\" attribute in node:%s %s' % (u,", "[graph.node[u].get('importance', 0) for u in ids] score[pos] = np.mean(importances) return seq, score def", "np def find_smallest_positive(alist): # find first positive value minpos = -1 for x", "extract sequence of labels and importances seq = [None] * len(pos_to_ids) score =", "begin, end def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): original_score = score while", "find smallest positive value for x in alist: if x > 0 and", "{'subarray_string': ''.join(subarray)} else: subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first, 'end': last,", "too large then rebase the score list, i.e. offset by the smallest positive", "find first positive value minpos = -1 for x in alist: if x", "find (begin,end) of subarray in each element begin, end = compute_maximum_subarray(score_vector=score) # check", "= end = 0 start_val = score_vector[0] max_ending_here = max_so_far = start_val for", "the 'one after the end' semantics last = min(len(seq), end + margin +", "in graph, use the vertex id instead raise Exception('Missing \"position\" attribute in node:%s", "output='minimal', margin=1): seq, score = extract_sequence_and_score(graph) for subarray in compute_max_subarrays_sequence(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size,", "min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin): yield subarray def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): seq,", "importance values of subarray score[first: last] = [0.0] * subarray_size # iterate after", "seq[pos] = labels[0] # average all importance score for the same position importances", "store data acc = 0 for x in original_score[begin: end + 1]: acc", "import absolute_import from __future__ import division from __future__ import print_function from collections import", "make dict with positions as keys and lists of ids as values pos_to_ids", "import division from __future__ import print_function from collections import defaultdict import numpy as", "than min_subarray_size if end - begin < min_subarray_size - 1: break else: #", "Exception('Missing \"position\" attribute in node:%s %s' % (u, graph.node[u])) else: pos = graph.node[u]['position']", "data acc = 0 for x in original_score[begin: end + 1]: acc +=", "max_ending_here = max_so_far = start_val for pos, x in enumerate(score_vector[1:], 1): if max_ending_here", "large then rebase the score list, i.e. offset by the smallest positive value", "compute_maximum_subarray(score_vector=None): begin_temp = begin = end = 0 start_val = score_vector[0] max_ending_here =", "= labels[0] # average all importance score for the same position importances =", "value minpos = -1 for x in alist: if x > 0: minpos", "importances = [graph.node[u].get('importance', 0) for u in ids] score[pos] = np.mean(importances) return seq,", "x > 0 and x < minpos: minpos = x return minpos def", "minpos: minpos = x return minpos def rebase_to_smallest_positive(alist): base = find_smallest_positive(alist) if base", "x < minpos: minpos = x return minpos def rebase_to_smallest_positive(alist): base = find_smallest_positive(alist)", "original_score = score while True: # find (begin,end) of subarray in each element", "x if output == 'minimal': subarray = {'subarray_string': ''.join(subarray)} else: subarray = {'subarray_string':", "== -1 or subarray_size <= max_subarray_size: # store data acc = 0 for", "= max_so_far = start_val for pos, x in enumerate(score_vector[1:], 1): if max_ending_here <", "same position are identical assert(sum([1 for label in labels if label == labels[0]])", "max_subarray_size=None, output='minimal', margin=1): # extract subarrays for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size,", "defaultdict(list) for u in graph.nodes(): if 'position' not in graph.node[u]: # no position", "- margin) # NOTE: we return + 1 for the rightmost postition to", "= len(subarray) if max_subarray_size == -1 or subarray_size <= max_subarray_size: # store data", "[0] * len(pos_to_ids) for pos in sorted(pos_to_ids): ids = pos_to_ids[pos] labels = [graph.node[u].get('label',", "= find_smallest_positive(alist) if base == -1: return None else: return [x - base", "the retrieved subarray is larger than min_subarray_size if end - begin < min_subarray_size", "= graph.node[u]['position'] # accumulate all node ids pos_to_ids[pos] += [u] # extract sequence", "x > 0: minpos = x break if minpos > 0: # find", "# NOTE: in order to account for border effects we expand on the", "ids as values pos_to_ids = defaultdict(list) for u in graph.nodes(): if 'position' not", "for x in alist] def compute_maximum_subarray(score_vector=None): begin_temp = begin = end = 0", "0 start_val = score_vector[0] max_ending_here = max_so_far = start_val for pos, x in", "subarray = {'subarray_string': ''.join(subarray)} else: subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first,", "score def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): # extract subarrays for subarray", "pos = graph.node[u]['position'] # accumulate all node ids pos_to_ids[pos] += [u] # extract", "compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin): yield subarray def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal',", "identical assert(sum([1 for label in labels if label == labels[0]]) == len(labels) ),", "margin=1): # extract subarrays for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin):", "+ 1]: acc += x if output == 'minimal': subarray = {'subarray_string': ''.join(subarray)}", "output == 'minimal': subarray = {'subarray_string': ''.join(subarray)} else: subarray = {'subarray_string': ''.join(subarray), 'subarray':", "original_score[begin: end + 1]: acc += x if output == 'minimal': subarray =", "+= [u] # extract sequence of labels and importances seq = [None] *", "if max_ending_here < 0: max_ending_here = x begin_temp = pos else: max_ending_here =", "and importances seq = [None] * len(pos_to_ids) score = [0] * len(pos_to_ids) for", "# NOTE: we return + 1 for the rightmost postition to be compliant", "max_ending_here begin = begin_temp end = pos return begin, end def compute_iterated_maximum_subarray(seq=None, score=None,", "begin_temp end = pos return begin, end def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal',", "acc += x if output == 'minimal': subarray = {'subarray_string': ''.join(subarray)} else: subarray", "== 'minimal': subarray = {'subarray_string': ''.join(subarray)} else: subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray,", "labels = [graph.node[u].get('label', 'N/A') for u in ids] # check that all labels", "positive value score = rebase_to_smallest_positive(score) if score is None: break else: # remove", "else: subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first, 'end': last, 'size': subarray_size,", "score = [0] * len(pos_to_ids) for pos in sorted(pos_to_ids): ids = pos_to_ids[pos] labels", "'minimal': subarray = {'subarray_string': ''.join(subarray)} else: subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin':", "of current subarray def extract_sequence_and_score(graph=None): # make dict with positions as keys and", "def rebase_to_smallest_positive(alist): base = find_smallest_positive(alist) if base == -1: return None else: return", "by the smallest positive value score = rebase_to_smallest_positive(score) if score is None: break", "return [x - base for x in alist] def compute_maximum_subarray(score_vector=None): begin_temp = begin", "pos, x in enumerate(score_vector[1:], 1): if max_ending_here < 0: max_ending_here = x begin_temp", "we expand on the left and on the right by 'margin' first =", "1: break else: # extract maximum subarray # NOTE: in order to account", "find_smallest_positive(alist) if base == -1: return None else: return [x - base for", "end + 1]: acc += x if output == 'minimal': subarray = {'subarray_string':", "# iterate after removal of current subarray def extract_sequence_and_score(graph=None): # make dict with", "import numpy as np def find_smallest_positive(alist): # find first positive value minpos =", "'one after the end' semantics last = min(len(seq), end + margin + 1)", "end = pos return begin, end def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):", "[0.0] * subarray_size # iterate after removal of current subarray def extract_sequence_and_score(graph=None): #", "max_subarray_size: # if the subarray is too large then rebase the score list,", "each element begin, end = compute_maximum_subarray(score_vector=score) # check that the retrieved subarray is", "x return minpos def rebase_to_smallest_positive(alist): base = find_smallest_positive(alist) if base == -1: return", "+ 1 for the rightmost postition to be compliant with the 'one after", "semantics last = min(len(seq), end + margin + 1) subarray = seq[first: last]", "remove current subarray by zeroing importance values of subarray score[first: last] = [0.0]", "then rebase the score list, i.e. offset by the smallest positive value score", "with the 'one after the end' semantics last = min(len(seq), end + margin", "extract_sequence_and_score(graph=None): # make dict with positions as keys and lists of ids as", "# accumulate all node ids pos_to_ids[pos] += [u] # extract sequence of labels", "rightmost postition to be compliant with the 'one after the end' semantics last", "rebase the score list, i.e. offset by the smallest positive value score =", "for border effects we expand on the left and on the right by", "for the rightmost postition to be compliant with the 'one after the end'", "compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): seq, score = extract_sequence_and_score(graph) for subarray in compute_max_subarrays_sequence(seq=seq,", "1]: acc += x if output == 'minimal': subarray = {'subarray_string': ''.join(subarray)} else:", "value for x in alist: if x > 0 and x < minpos:", "else: pos = graph.node[u]['position'] # accumulate all node ids pos_to_ids[pos] += [u] #", "begin_temp = pos else: max_ending_here = max_ending_here + x if max_ending_here > max_so_far:", "subarray_size, 'seq': seq, 'score': acc} yield subarray if subarray_size > max_subarray_size: # if", "= defaultdict(list) for u in graph.nodes(): if 'position' not in graph.node[u]: # no", "output='minimal', margin=1): original_score = score while True: # find (begin,end) of subarray in", "we return + 1 for the rightmost postition to be compliant with the", "in enumerate(score_vector[1:], 1): if max_ending_here < 0: max_ending_here = x begin_temp = pos", "are identical assert(sum([1 for label in labels if label == labels[0]]) == len(labels)", "0) for u in ids] score[pos] = np.mean(importances) return seq, score def compute_max_subarrays_sequence(seq=None,", "np.mean(importances) return seq, score def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): # extract", "border effects we expand on the left and on the right by 'margin'", "subarrays for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin): yield subarray def", "return + 1 for the rightmost postition to be compliant with the 'one", "minpos = x break if minpos > 0: # find smallest positive value", "label == labels[0]]) == len(labels) ), 'ERROR: non identical labels referring to same", "by zeroing importance values of subarray score[first: last] = [0.0] * subarray_size #", "%s %s' % (pos, labels) seq[pos] = labels[0] # average all importance score", "max_subarray_size: # store data acc = 0 for x in original_score[begin: end +", "< minpos: minpos = x return minpos def rebase_to_smallest_positive(alist): base = find_smallest_positive(alist) if", "all node ids pos_to_ids[pos] += [u] # extract sequence of labels and importances", "return begin, end def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): original_score = score", "expand on the left and on the right by 'margin' first = max(0,", "alist: if x > 0 and x < minpos: minpos = x return", "import print_function from collections import defaultdict import numpy as np def find_smallest_positive(alist): #", "max_subarray_size == -1 or subarray_size <= max_subarray_size: # store data acc = 0", "of subarray score[first: last] = [0.0] * subarray_size # iterate after removal of", "seq = [None] * len(pos_to_ids) score = [0] * len(pos_to_ids) for pos in", "max_ending_here = max_ending_here + x if max_ending_here > max_so_far: max_so_far = max_ending_here begin", "alist] def compute_maximum_subarray(score_vector=None): begin_temp = begin = end = 0 start_val = score_vector[0]", "vertex id instead raise Exception('Missing \"position\" attribute in node:%s %s' % (u, graph.node[u]))", "labels[0] # average all importance score for the same position importances = [graph.node[u].get('importance',", "= max_ending_here + x if max_ending_here > max_so_far: max_so_far = max_ending_here begin =", "pos_to_ids[pos] labels = [graph.node[u].get('label', 'N/A') for u in ids] # check that all", "subarray def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): seq, score = extract_sequence_and_score(graph) for subarray", "maximum subarray # NOTE: in order to account for border effects we expand", "if subarray_size > max_subarray_size: # if the subarray is too large then rebase", "else: max_ending_here = max_ending_here + x if max_ending_here > max_so_far: max_so_far = max_ending_here", "list, i.e. offset by the smallest positive value score = rebase_to_smallest_positive(score) if score", "no position attributes in graph, use the vertex id instead raise Exception('Missing \"position\"", "compute_maximum_subarray(score_vector=score) # check that the retrieved subarray is larger than min_subarray_size if end", "retrieved subarray is larger than min_subarray_size if end - begin < min_subarray_size -", "and on the right by 'margin' first = max(0, begin - margin) #", "values pos_to_ids = defaultdict(list) for u in graph.nodes(): if 'position' not in graph.node[u]:", "max_ending_here = x begin_temp = pos else: max_ending_here = max_ending_here + x if", "# find first positive value minpos = -1 for x in alist: if", "effects we expand on the left and on the right by 'margin' first", "= begin_temp end = pos return begin, end def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None,", "max_ending_here > max_so_far: max_so_far = max_ending_here begin = begin_temp end = pos return", "of subarray in each element begin, end = compute_maximum_subarray(score_vector=score) # check that the", "'ERROR: non identical labels referring to same position: %s %s' % (pos, labels)", "in alist: if x > 0 and x < minpos: minpos = x", "use the vertex id instead raise Exception('Missing \"position\" attribute in node:%s %s' %", "True: # find (begin,end) of subarray in each element begin, end = compute_maximum_subarray(score_vector=score)", "from collections import defaultdict import numpy as np def find_smallest_positive(alist): # find first", "from __future__ import absolute_import from __future__ import division from __future__ import print_function from", "# store data acc = 0 for x in original_score[begin: end + 1]:", "pos_to_ids = defaultdict(list) for u in graph.nodes(): if 'position' not in graph.node[u]: #", "None else: return [x - base for x in alist] def compute_maximum_subarray(score_vector=None): begin_temp", "as np def find_smallest_positive(alist): # find first positive value minpos = -1 for", "# find (begin,end) of subarray in each element begin, end = compute_maximum_subarray(score_vector=score) #", "u in ids] score[pos] = np.mean(importances) return seq, score def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None,", "{'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first, 'end': last, 'size': subarray_size, 'seq': seq, 'score':", "margin=1): original_score = score while True: # find (begin,end) of subarray in each", "if max_subarray_size == -1 or subarray_size <= max_subarray_size: # store data acc =", "accumulate all node ids pos_to_ids[pos] += [u] # extract sequence of labels and", "extract maximum subarray # NOTE: in order to account for border effects we", "= [graph.node[u].get('label', 'N/A') for u in ids] # check that all labels for", "labels referring to same position: %s %s' % (pos, labels) seq[pos] = labels[0]", "+ 1) subarray = seq[first: last] subarray_size = len(subarray) if max_subarray_size == -1", "labels and importances seq = [None] * len(pos_to_ids) score = [0] * len(pos_to_ids)", "base for x in alist] def compute_maximum_subarray(score_vector=None): begin_temp = begin = end =", "# check that the retrieved subarray is larger than min_subarray_size if end -", "return None else: return [x - base for x in alist] def compute_maximum_subarray(score_vector=None):", "end' semantics last = min(len(seq), end + margin + 1) subarray = seq[first:", "alist: if x > 0: minpos = x break if minpos > 0:", "find_smallest_positive(alist): # find first positive value minpos = -1 for x in alist:", "score = rebase_to_smallest_positive(score) if score is None: break else: # remove current subarray", "importances seq = [None] * len(pos_to_ids) score = [0] * len(pos_to_ids) for pos", "minpos > 0: # find smallest positive value for x in alist: if", "in ids] score[pos] = np.mean(importances) return seq, score def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None,", "after removal of current subarray def extract_sequence_and_score(graph=None): # make dict with positions as", "same position importances = [graph.node[u].get('importance', 0) for u in ids] score[pos] = np.mean(importances)", "min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): seq, score = extract_sequence_and_score(graph) for subarray in compute_max_subarrays_sequence(seq=seq, score=score,", "of ids as values pos_to_ids = defaultdict(list) for u in graph.nodes(): if 'position'", "= pos else: max_ending_here = max_ending_here + x if max_ending_here > max_so_far: max_so_far", "subarray_size <= max_subarray_size: # store data acc = 0 for x in original_score[begin:", "the same position are identical assert(sum([1 for label in labels if label ==", "max_so_far = max_ending_here begin = begin_temp end = pos return begin, end def", "def compute_maximum_subarray(score_vector=None): begin_temp = begin = end = 0 start_val = score_vector[0] max_ending_here", "u in graph.nodes(): if 'position' not in graph.node[u]: # no position attributes in", "order to account for border effects we expand on the left and on", "labels for the same position are identical assert(sum([1 for label in labels if", "(pos, labels) seq[pos] = labels[0] # average all importance score for the same", "minpos def rebase_to_smallest_positive(alist): base = find_smallest_positive(alist) if base == -1: return None else:", "% (u, graph.node[u])) else: pos = graph.node[u]['position'] # accumulate all node ids pos_to_ids[pos]", "== -1: return None else: return [x - base for x in alist]", "# average all importance score for the same position importances = [graph.node[u].get('importance', 0)", "after the end' semantics last = min(len(seq), end + margin + 1) subarray", "max_so_far: max_so_far = max_ending_here begin = begin_temp end = pos return begin, end", "and lists of ids as values pos_to_ids = defaultdict(list) for u in graph.nodes():", "= {'subarray_string': ''.join(subarray)} else: subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first, 'end':", "positive value for x in alist: if x > 0 and x <", "+ x if max_ending_here > max_so_far: max_so_far = max_ending_here begin = begin_temp end", "== labels[0]]) == len(labels) ), 'ERROR: non identical labels referring to same position:", "node:%s %s' % (u, graph.node[u])) else: pos = graph.node[u]['position'] # accumulate all node", "else: return [x - base for x in alist] def compute_maximum_subarray(score_vector=None): begin_temp =", "margin + 1) subarray = seq[first: last] subarray_size = len(subarray) if max_subarray_size ==", "lists of ids as values pos_to_ids = defaultdict(list) for u in graph.nodes(): if", "if the subarray is too large then rebase the score list, i.e. offset", "score_vector[0] max_ending_here = max_so_far = start_val for pos, x in enumerate(score_vector[1:], 1): if", "for x in alist: if x > 0: minpos = x break if", "* len(pos_to_ids) score = [0] * len(pos_to_ids) for pos in sorted(pos_to_ids): ids =", "collections import defaultdict import numpy as np def find_smallest_positive(alist): # find first positive", "subarray # NOTE: in order to account for border effects we expand on", "account for border effects we expand on the left and on the right", "subarray def extract_sequence_and_score(graph=None): # make dict with positions as keys and lists of", "margin) # NOTE: we return + 1 for the rightmost postition to be", "subarray is too large then rebase the score list, i.e. offset by the", "pos return begin, end def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): original_score =", "if max_ending_here > max_so_far: max_so_far = max_ending_here begin = begin_temp end = pos", "same position: %s %s' % (pos, labels) seq[pos] = labels[0] # average all", "check that the retrieved subarray is larger than min_subarray_size if end - begin", "'size': subarray_size, 'seq': seq, 'score': acc} yield subarray if subarray_size > max_subarray_size: #", "score[pos] = np.mean(importances) return seq, score def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):", "-1 for x in alist: if x > 0: minpos = x break", "postition to be compliant with the 'one after the end' semantics last =", "base = find_smallest_positive(alist) if base == -1: return None else: return [x -", "for x in alist: if x > 0 and x < minpos: minpos", "NOTE: we return + 1 for the rightmost postition to be compliant with", "[graph.node[u].get('label', 'N/A') for u in ids] # check that all labels for the", "for the same position are identical assert(sum([1 for label in labels if label", "smallest positive value score = rebase_to_smallest_positive(score) if score is None: break else: #", "position importances = [graph.node[u].get('importance', 0) for u in ids] score[pos] = np.mean(importances) return", "division from __future__ import print_function from collections import defaultdict import numpy as np", "len(subarray) if max_subarray_size == -1 or subarray_size <= max_subarray_size: # store data acc", "if base == -1: return None else: return [x - base for x", "# extract sequence of labels and importances seq = [None] * len(pos_to_ids) score", "in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin): yield subarray def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None,", "for u in ids] # check that all labels for the same position", "to same position: %s %s' % (pos, labels) seq[pos] = labels[0] # average", "break else: # extract maximum subarray # NOTE: in order to account for", "element begin, end = compute_maximum_subarray(score_vector=score) # check that the retrieved subarray is larger", "pos in sorted(pos_to_ids): ids = pos_to_ids[pos] labels = [graph.node[u].get('label', 'N/A') for u in", "x break if minpos > 0: # find smallest positive value for x", "0 and x < minpos: minpos = x return minpos def rebase_to_smallest_positive(alist): base", "__future__ import print_function from collections import defaultdict import numpy as np def find_smallest_positive(alist):", "= begin = end = 0 start_val = score_vector[0] max_ending_here = max_so_far =", "id instead raise Exception('Missing \"position\" attribute in node:%s %s' % (u, graph.node[u])) else:", "in original_score[begin: end + 1]: acc += x if output == 'minimal': subarray", "> max_so_far: max_so_far = max_ending_here begin = begin_temp end = pos return begin,", "# check that all labels for the same position are identical assert(sum([1 for", "output=output, margin=margin): yield subarray def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): seq, score =", "''.join(subarray), 'subarray': subarray, 'begin': first, 'end': last, 'size': subarray_size, 'seq': seq, 'score': acc}", "last] subarray_size = len(subarray) if max_subarray_size == -1 or subarray_size <= max_subarray_size: #", "score[first: last] = [0.0] * subarray_size # iterate after removal of current subarray", "yield subarray def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): seq, score = extract_sequence_and_score(graph) for", "ids pos_to_ids[pos] += [u] # extract sequence of labels and importances seq =", "score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1): # extract subarrays for subarray in compute_iterated_maximum_subarray(seq=seq, score=score,", "# find smallest positive value for x in alist: if x > 0", "= min(len(seq), end + margin + 1) subarray = seq[first: last] subarray_size =", "for pos, x in enumerate(score_vector[1:], 1): if max_ending_here < 0: max_ending_here = x", "subarray = seq[first: last] subarray_size = len(subarray) if max_subarray_size == -1 or subarray_size", "-1: return None else: return [x - base for x in alist] def", "ids = pos_to_ids[pos] labels = [graph.node[u].get('label', 'N/A') for u in ids] # check" ]
[ "is not None) # pylint: disable=E1101 tr1 = self.get_model(model) tr2 = self.get_model(new_model) X", "skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs from", "Binarizer(threshold=0.5)) X = numpy.array([[0.1, 1.1], [0.2, 2.2]]) model.fit(X) model_onnx = convert_sklearn( model, 'binarizer',", "(2, 2)) def test_onnx_helper_load_save_init(self): model = make_pipeline(Binarizer(), OneHotEncoder( sparse=False), StandardScaler()) X = numpy.array([[0.1,", "model = make_pipeline(Binarizer(), OneHotEncoder( sparse=False), StandardScaler()) X = numpy.array([[0.1, 1.1], [0.2, 2.2], [0.4,", "import numpy from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder from", "getLogger('skl2onnx') logger.disabled = True def get_model(self, model): try: import onnxruntime assert onnxruntime is", "= InferenceSession(save_onnx_model(model)) return lambda X: session.run(None, {'input': X})[0] def test_onnx_helper_load_save(self): model = make_pipeline(StandardScaler(),", "numpy.array([[0.1, 1.1], [0.2, 2.2]]) model.fit(X) model_onnx = convert_sklearn( model, 'binarizer', [('input', FloatTensorType([1, 2]))])", "session.run(None, {'input': X})[0] def test_onnx_helper_load_save(self): model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5)) X = numpy.array([[0.1, 1.1],", "sparse=False), StandardScaler()) X = numpy.array([[0.1, 1.1], [0.2, 2.2], [0.4, 2.2], [0.2, 2.4]]) model.fit(X)", "logging import getLogger import numpy from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Binarizer,", "from skl2onnx import convert_sklearn from skl2onnx.common.data_types import FloatTensorType from skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model", "'variable') self.assertTrue(new_model.graph is not None) # pylint: disable=E1101 tr1 = self.get_model(model) tr2 =", "skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs from pyquickhelper.pycode import ExtTestCase class TestOnnxHelper(ExtTestCase): def setUp(self): logger =", "from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder from skl2onnx import", "get_model(self, model): try: import onnxruntime assert onnxruntime is not None except ImportError: return", "X = X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (2, 2)) self.assertEqual(X2.shape,", "X2 = tr2(X) self.assertEqual(X1.shape, (2, 2)) self.assertEqual(X2.shape, (2, 2)) def test_onnx_helper_load_save_init(self): model =", "[('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model = load_onnx_model(filename) list(enumerate_model_node_outputs(model)) new_model", "try: import onnxruntime assert onnxruntime is not None except ImportError: return None from", "not None) # pylint: disable=E1101 tr1 = self.get_model(model) tr2 = self.get_model(new_model) X =", "X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (2, 2)) self.assertEqual(X2.shape, (2, 2))", "InferenceSession session = InferenceSession(save_onnx_model(model)) return lambda X: session.run(None, {'input': X})[0] def test_onnx_helper_load_save(self): model", "make_pipeline(StandardScaler(), Binarizer(threshold=0.5)) X = numpy.array([[0.1, 1.1], [0.2, 2.2]]) model.fit(X) model_onnx = convert_sklearn( model,", "= numpy.array([[0.1, 1.1], [0.2, 2.2]]) model.fit(X) model_onnx = convert_sklearn( model, 'binarizer', [('input', FloatTensorType([1,", "except ImportError: return None from onnxruntime import InferenceSession session = InferenceSession(save_onnx_model(model)) return lambda", "None from onnxruntime import InferenceSession session = InferenceSession(save_onnx_model(model)) return lambda X: session.run(None, {'input':", "[0.2, 2.2], [0.4, 2.2], [0.2, 2.4]]) model.fit(X) model_onnx = convert_sklearn( model, 'pipe3', [('input',", "self.assertEqual(X2.shape, (2, 2)) def test_onnx_helper_load_save_init(self): model = make_pipeline(Binarizer(), OneHotEncoder( sparse=False), StandardScaler()) X =", "StandardScaler, OneHotEncoder from skl2onnx import convert_sklearn from skl2onnx.common.data_types import FloatTensorType from skl2onnx.helpers.onnx_helper import", "model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5)) X = numpy.array([[0.1, 1.1], [0.2, 2.2]]) model.fit(X) model_onnx =", "import InferenceSession session = InferenceSession(save_onnx_model(model)) return lambda X: session.run(None, {'input': X})[0] def test_onnx_helper_load_save(self):", "self.assertEqual(X1.shape, (2, 2)) self.assertEqual(X2.shape, (2, 2)) def test_onnx_helper_load_save_init(self): model = make_pipeline(Binarizer(), OneHotEncoder( sparse=False),", "= True def get_model(self, model): try: import onnxruntime assert onnxruntime is not None", "onnxruntime import InferenceSession session = InferenceSession(save_onnx_model(model)) return lambda X: session.run(None, {'input': X})[0] def", "sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder from skl2onnx import convert_sklearn from skl2onnx.common.data_types import FloatTensorType", "= tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (4, 2)) self.assertEqual(X2.shape, (4, 2)) if __name__", "@brief test log(time=2s) \"\"\" import unittest from logging import getLogger import numpy from", "= make_pipeline(Binarizer(), OneHotEncoder( sparse=False), StandardScaler()) X = numpy.array([[0.1, 1.1], [0.2, 2.2], [0.4, 2.2],", "\"\"\" import unittest from logging import getLogger import numpy from sklearn.pipeline import make_pipeline", "[0.2, 2.2]]) model.fit(X) model_onnx = convert_sklearn( model, 'binarizer', [('input', FloatTensorType([1, 2]))]) filename =", "FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model = load_onnx_model(filename) list(enumerate_model_node_outputs(model)) new_model =", "session = InferenceSession(save_onnx_model(model)) return lambda X: session.run(None, {'input': X})[0] def test_onnx_helper_load_save(self): model =", "filename) model = load_onnx_model(filename) list(enumerate_model_node_outputs(model)) new_model = select_model_inputs_outputs(model, 'variable') self.assertTrue(new_model.graph is not None)", "model_onnx = convert_sklearn( model, 'pipe3', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename)", "X: session.run(None, {'input': X})[0] def test_onnx_helper_load_save(self): model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5)) X = numpy.array([[0.1,", "model = load_onnx_model(filename) list(enumerate_model_node_outputs(model)) new_model = select_model_inputs_outputs(model, 'variable') self.assertTrue(new_model.graph is not None) #", "disable=E1101 tr1 = self.get_model(model) tr2 = self.get_model(new_model) X = X.astype(numpy.float32) X1 = tr1(X)", "= X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (2, 2)) self.assertEqual(X2.shape, (2,", "= self.get_model(model) tr2 = self.get_model(new_model) X = X.astype(numpy.float32) X1 = tr1(X) X2 =", "model, 'binarizer', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model = load_onnx_model(filename)", "2.2], [0.4, 2.2], [0.2, 2.4]]) model.fit(X) model_onnx = convert_sklearn( model, 'pipe3', [('input', FloatTensorType([1,", "= tr2(X) self.assertEqual(X1.shape, (4, 2)) self.assertEqual(X2.shape, (4, 2)) if __name__ == \"__main__\": unittest.main()", "X2 = tr2(X) self.assertEqual(X1.shape, (4, 2)) self.assertEqual(X2.shape, (4, 2)) if __name__ == \"__main__\":", "import Binarizer, StandardScaler, OneHotEncoder from skl2onnx import convert_sklearn from skl2onnx.common.data_types import FloatTensorType from", "None) # pylint: disable=E1101 tr1 = self.get_model(model) tr2 = self.get_model(new_model) X = X.astype(numpy.float32)", "X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (4, 2)) self.assertEqual(X2.shape, (4, 2))", "None except ImportError: return None from onnxruntime import InferenceSession session = InferenceSession(save_onnx_model(model)) return", "new_model = select_model_inputs_outputs(model, 'variable') self.assertTrue(new_model.graph is not None) # pylint: disable=E1101 tr1 =", "tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (2, 2)) self.assertEqual(X2.shape, (2, 2)) def test_onnx_helper_load_save_init(self): model", "X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (2, 2)) self.assertEqual(X2.shape, (2, 2)) def", "unittest from logging import getLogger import numpy from sklearn.pipeline import make_pipeline from sklearn.preprocessing", "1.1], [0.2, 2.2]]) model.fit(X) model_onnx = convert_sklearn( model, 'binarizer', [('input', FloatTensorType([1, 2]))]) filename", "log(time=2s) \"\"\" import unittest from logging import getLogger import numpy from sklearn.pipeline import", "tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (4, 2)) self.assertEqual(X2.shape, (4, 2)) if __name__ ==", "numpy.array([[0.1, 1.1], [0.2, 2.2], [0.4, 2.2], [0.2, 2.4]]) model.fit(X) model_onnx = convert_sklearn( model,", "model.fit(X) model_onnx = convert_sklearn( model, 'pipe3', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx,", "import convert_sklearn from skl2onnx.common.data_types import FloatTensorType from skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model from skl2onnx.helpers.onnx_helper", "\"\"\" @brief test log(time=2s) \"\"\" import unittest from logging import getLogger import numpy", "from skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs", "make_pipeline(Binarizer(), OneHotEncoder( sparse=False), StandardScaler()) X = numpy.array([[0.1, 1.1], [0.2, 2.2], [0.4, 2.2], [0.2,", "model): try: import onnxruntime assert onnxruntime is not None except ImportError: return None", "save_onnx_model(model_onnx, filename) model = load_onnx_model(filename) list(enumerate_model_node_outputs(model)) new_model = select_model_inputs_outputs(model, 'variable') self.assertTrue(new_model.graph is not", "convert_sklearn from skl2onnx.common.data_types import FloatTensorType from skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model from skl2onnx.helpers.onnx_helper import", "return None from onnxruntime import InferenceSession session = InferenceSession(save_onnx_model(model)) return lambda X: session.run(None,", "ExtTestCase class TestOnnxHelper(ExtTestCase): def setUp(self): logger = getLogger('skl2onnx') logger.disabled = True def get_model(self,", "select_model_inputs_outputs(model, 'variable') self.assertTrue(new_model.graph is not None) # pylint: disable=E1101 tr1 = self.get_model(model) tr2", "model_onnx = convert_sklearn( model, 'binarizer', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename)", "tr1 = self.get_model(model) tr2 = self.get_model(new_model) X = X.astype(numpy.float32) X1 = tr1(X) X2", "= \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model = load_onnx_model(filename) list(enumerate_model_node_outputs(model)) new_model = select_model_inputs_outputs(model, 'variable') self.assertTrue(new_model.graph", "self.assertTrue(new_model.graph is not None) # pylint: disable=E1101 tr1 = self.get_model(model) tr2 = self.get_model(new_model)", "from pyquickhelper.pycode import ExtTestCase class TestOnnxHelper(ExtTestCase): def setUp(self): logger = getLogger('skl2onnx') logger.disabled =", "= getLogger('skl2onnx') logger.disabled = True def get_model(self, model): try: import onnxruntime assert onnxruntime", "\"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model = load_onnx_model(filename) list(enumerate_model_node_outputs(model)) new_model = select_model_inputs_outputs(model, 'variable') self.assertTrue(new_model.graph is", "= convert_sklearn( model, 'pipe3', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model", "2)) self.assertEqual(X2.shape, (2, 2)) def test_onnx_helper_load_save_init(self): model = make_pipeline(Binarizer(), OneHotEncoder( sparse=False), StandardScaler()) X", "sklearn.pipeline import make_pipeline from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder from skl2onnx import convert_sklearn", "convert_sklearn( model, 'pipe3', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model =", "X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (4, 2)) self.assertEqual(X2.shape, (4, 2)) if", "from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs from pyquickhelper.pycode import ExtTestCase class", "from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder from skl2onnx import convert_sklearn from skl2onnx.common.data_types import", "(2, 2)) self.assertEqual(X2.shape, (2, 2)) def test_onnx_helper_load_save_init(self): model = make_pipeline(Binarizer(), OneHotEncoder( sparse=False), StandardScaler())", "make_pipeline from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder from skl2onnx import convert_sklearn from skl2onnx.common.data_types", "save_onnx_model from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs from pyquickhelper.pycode import ExtTestCase", "= select_model_inputs_outputs(model, 'variable') self.assertTrue(new_model.graph is not None) # pylint: disable=E1101 tr1 = self.get_model(model)", "TestOnnxHelper(ExtTestCase): def setUp(self): logger = getLogger('skl2onnx') logger.disabled = True def get_model(self, model): try:", "import ExtTestCase class TestOnnxHelper(ExtTestCase): def setUp(self): logger = getLogger('skl2onnx') logger.disabled = True def", "= X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (4, 2)) self.assertEqual(X2.shape, (4,", "2.4]]) model.fit(X) model_onnx = convert_sklearn( model, 'pipe3', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\"", "import select_model_inputs_outputs from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs from pyquickhelper.pycode import ExtTestCase class TestOnnxHelper(ExtTestCase): def", "self.get_model(model) tr2 = self.get_model(new_model) X = X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X)", "assert onnxruntime is not None except ImportError: return None from onnxruntime import InferenceSession", "from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs from pyquickhelper.pycode import ExtTestCase class TestOnnxHelper(ExtTestCase): def setUp(self): logger", "select_model_inputs_outputs from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs from pyquickhelper.pycode import ExtTestCase class TestOnnxHelper(ExtTestCase): def setUp(self):", "= make_pipeline(StandardScaler(), Binarizer(threshold=0.5)) X = numpy.array([[0.1, 1.1], [0.2, 2.2]]) model.fit(X) model_onnx = convert_sklearn(", "tr2 = self.get_model(new_model) X = X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape,", "logger = getLogger('skl2onnx') logger.disabled = True def get_model(self, model): try: import onnxruntime assert", "Binarizer, StandardScaler, OneHotEncoder from skl2onnx import convert_sklearn from skl2onnx.common.data_types import FloatTensorType from skl2onnx.helpers.onnx_helper", "X = numpy.array([[0.1, 1.1], [0.2, 2.2], [0.4, 2.2], [0.2, 2.4]]) model.fit(X) model_onnx =", "= numpy.array([[0.1, 1.1], [0.2, 2.2], [0.4, 2.2], [0.2, 2.4]]) model.fit(X) model_onnx = convert_sklearn(", "True def get_model(self, model): try: import onnxruntime assert onnxruntime is not None except", "from onnxruntime import InferenceSession session = InferenceSession(save_onnx_model(model)) return lambda X: session.run(None, {'input': X})[0]", "ImportError: return None from onnxruntime import InferenceSession session = InferenceSession(save_onnx_model(model)) return lambda X:", "2)) def test_onnx_helper_load_save_init(self): model = make_pipeline(Binarizer(), OneHotEncoder( sparse=False), StandardScaler()) X = numpy.array([[0.1, 1.1],", "= tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (2, 2)) self.assertEqual(X2.shape, (2, 2)) def test_onnx_helper_load_save_init(self):", "onnxruntime assert onnxruntime is not None except ImportError: return None from onnxruntime import", "tr2(X) self.assertEqual(X1.shape, (2, 2)) self.assertEqual(X2.shape, (2, 2)) def test_onnx_helper_load_save_init(self): model = make_pipeline(Binarizer(), OneHotEncoder(", "import enumerate_model_node_outputs from pyquickhelper.pycode import ExtTestCase class TestOnnxHelper(ExtTestCase): def setUp(self): logger = getLogger('skl2onnx')", "load_onnx_model(filename) list(enumerate_model_node_outputs(model)) new_model = select_model_inputs_outputs(model, 'variable') self.assertTrue(new_model.graph is not None) # pylint: disable=E1101", "def get_model(self, model): try: import onnxruntime assert onnxruntime is not None except ImportError:", "{'input': X})[0] def test_onnx_helper_load_save(self): model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5)) X = numpy.array([[0.1, 1.1], [0.2,", "skl2onnx.helpers.onnx_helper import select_model_inputs_outputs from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs from pyquickhelper.pycode import ExtTestCase class TestOnnxHelper(ExtTestCase):", "getLogger import numpy from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder", "self.get_model(new_model) X = X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (4, 2))", "2.2], [0.2, 2.4]]) model.fit(X) model_onnx = convert_sklearn( model, 'pipe3', [('input', FloatTensorType([1, 2]))]) filename", "= load_onnx_model(filename) list(enumerate_model_node_outputs(model)) new_model = select_model_inputs_outputs(model, 'variable') self.assertTrue(new_model.graph is not None) # pylint:", "= self.get_model(new_model) X = X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (4,", "X = X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (4, 2)) self.assertEqual(X2.shape,", "X = numpy.array([[0.1, 1.1], [0.2, 2.2]]) model.fit(X) model_onnx = convert_sklearn( model, 'binarizer', [('input',", "= tr2(X) self.assertEqual(X1.shape, (2, 2)) self.assertEqual(X2.shape, (2, 2)) def test_onnx_helper_load_save_init(self): model = make_pipeline(Binarizer(),", "'binarizer', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model = load_onnx_model(filename) list(enumerate_model_node_outputs(model))", "import unittest from logging import getLogger import numpy from sklearn.pipeline import make_pipeline from", "return lambda X: session.run(None, {'input': X})[0] def test_onnx_helper_load_save(self): model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5)) X", "[0.2, 2.4]]) model.fit(X) model_onnx = convert_sklearn( model, 'pipe3', [('input', FloatTensorType([1, 2]))]) filename =", "enumerate_model_node_outputs from pyquickhelper.pycode import ExtTestCase class TestOnnxHelper(ExtTestCase): def setUp(self): logger = getLogger('skl2onnx') logger.disabled", "X})[0] def test_onnx_helper_load_save(self): model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5)) X = numpy.array([[0.1, 1.1], [0.2, 2.2]])", "InferenceSession(save_onnx_model(model)) return lambda X: session.run(None, {'input': X})[0] def test_onnx_helper_load_save(self): model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5))", "def test_onnx_helper_load_save(self): model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5)) X = numpy.array([[0.1, 1.1], [0.2, 2.2]]) model.fit(X)", "2.2]]) model.fit(X) model_onnx = convert_sklearn( model, 'binarizer', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\"", "import onnxruntime assert onnxruntime is not None except ImportError: return None from onnxruntime", "test_onnx_helper_load_save_init(self): model = make_pipeline(Binarizer(), OneHotEncoder( sparse=False), StandardScaler()) X = numpy.array([[0.1, 1.1], [0.2, 2.2],", "is not None except ImportError: return None from onnxruntime import InferenceSession session =", "test log(time=2s) \"\"\" import unittest from logging import getLogger import numpy from sklearn.pipeline", "test_onnx_helper_load_save(self): model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5)) X = numpy.array([[0.1, 1.1], [0.2, 2.2]]) model.fit(X) model_onnx", "not None except ImportError: return None from onnxruntime import InferenceSession session = InferenceSession(save_onnx_model(model))", "import load_onnx_model, save_onnx_model from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs from pyquickhelper.pycode", "# pylint: disable=E1101 tr1 = self.get_model(model) tr2 = self.get_model(new_model) X = X.astype(numpy.float32) X1", "skl2onnx import convert_sklearn from skl2onnx.common.data_types import FloatTensorType from skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model from", "import make_pipeline from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder from skl2onnx import convert_sklearn from", "'pipe3', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model = load_onnx_model(filename) list(enumerate_model_node_outputs(model))", "[0.4, 2.2], [0.2, 2.4]]) model.fit(X) model_onnx = convert_sklearn( model, 'pipe3', [('input', FloatTensorType([1, 2]))])", "import FloatTensorType from skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs from skl2onnx.helpers.onnx_helper", "pylint: disable=E1101 tr1 = self.get_model(model) tr2 = self.get_model(new_model) X = X.astype(numpy.float32) X1 =", "import getLogger import numpy from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Binarizer, StandardScaler,", "from skl2onnx.common.data_types import FloatTensorType from skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs", "load_onnx_model, save_onnx_model from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs from pyquickhelper.pycode import", "logger.disabled = True def get_model(self, model): try: import onnxruntime assert onnxruntime is not", "skl2onnx.common.data_types import FloatTensorType from skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs from", "OneHotEncoder from skl2onnx import convert_sklearn from skl2onnx.common.data_types import FloatTensorType from skl2onnx.helpers.onnx_helper import load_onnx_model,", "numpy from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder from skl2onnx", "pyquickhelper.pycode import ExtTestCase class TestOnnxHelper(ExtTestCase): def setUp(self): logger = getLogger('skl2onnx') logger.disabled = True", "setUp(self): logger = getLogger('skl2onnx') logger.disabled = True def get_model(self, model): try: import onnxruntime", "convert_sklearn( model, 'binarizer', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model =", "list(enumerate_model_node_outputs(model)) new_model = select_model_inputs_outputs(model, 'variable') self.assertTrue(new_model.graph is not None) # pylint: disable=E1101 tr1", "onnxruntime is not None except ImportError: return None from onnxruntime import InferenceSession session", "self.get_model(new_model) X = X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (2, 2))", "FloatTensorType from skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs from skl2onnx.helpers.onnx_helper import", "2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model = load_onnx_model(filename) list(enumerate_model_node_outputs(model)) new_model = select_model_inputs_outputs(model,", "OneHotEncoder( sparse=False), StandardScaler()) X = numpy.array([[0.1, 1.1], [0.2, 2.2], [0.4, 2.2], [0.2, 2.4]])", "from logging import getLogger import numpy from sklearn.pipeline import make_pipeline from sklearn.preprocessing import", "model.fit(X) model_onnx = convert_sklearn( model, 'binarizer', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx,", "lambda X: session.run(None, {'input': X})[0] def test_onnx_helper_load_save(self): model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5)) X =", "1.1], [0.2, 2.2], [0.4, 2.2], [0.2, 2.4]]) model.fit(X) model_onnx = convert_sklearn( model, 'pipe3',", "model, 'pipe3', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model = load_onnx_model(filename)", "def setUp(self): logger = getLogger('skl2onnx') logger.disabled = True def get_model(self, model): try: import", "StandardScaler()) X = numpy.array([[0.1, 1.1], [0.2, 2.2], [0.4, 2.2], [0.2, 2.4]]) model.fit(X) model_onnx", "class TestOnnxHelper(ExtTestCase): def setUp(self): logger = getLogger('skl2onnx') logger.disabled = True def get_model(self, model):", "filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model = load_onnx_model(filename) list(enumerate_model_node_outputs(model)) new_model = select_model_inputs_outputs(model, 'variable')", "= convert_sklearn( model, 'binarizer', [('input', FloatTensorType([1, 2]))]) filename = \"temp_onnx_helper_load_save.onnx\" save_onnx_model(model_onnx, filename) model", "<filename>_unittests/ut_sklapi/test_onnx_helper.py \"\"\" @brief test log(time=2s) \"\"\" import unittest from logging import getLogger import", "def test_onnx_helper_load_save_init(self): model = make_pipeline(Binarizer(), OneHotEncoder( sparse=False), StandardScaler()) X = numpy.array([[0.1, 1.1], [0.2,", "= self.get_model(new_model) X = X.astype(numpy.float32) X1 = tr1(X) X2 = tr2(X) self.assertEqual(X1.shape, (2," ]
[ ": ... def handle_data( self, data ) : data = string.replace( data, '\\r',", "import string class Stripper( SGMLParser ) : ... def handle_data( self, data )", "Stripper( SGMLParser ) : ... def handle_data( self, data ) : data =", ") : ... def handle_data( self, data ) : data = string.replace( data,", "class Stripper( SGMLParser ) : ... def handle_data( self, data ) : data", "<reponame>tdiprima/code import string class Stripper( SGMLParser ) : ... def handle_data( self, data", "handle_data( self, data ) : data = string.replace( data, '\\r', '' ) ...", "SGMLParser ) : ... def handle_data( self, data ) : data = string.replace(", "... def handle_data( self, data ) : data = string.replace( data, '\\r', ''", "def handle_data( self, data ) : data = string.replace( data, '\\r', '' )", "string class Stripper( SGMLParser ) : ... def handle_data( self, data ) :" ]
[ "# cls_reg_targets = bbox_target( # pos_proposals, # neg_proposals, # pos_gt_bboxes, # pos_gt_labels, #", "i in range(mask_conv): # conv_m = ConvModule(1, 1, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) #", "torch import torch.nn.functional as F import mmcv from mmdet.core import mask_target, mask_bg_target, force_fp32,", "assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs >", "branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \\ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs ==", "(is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i", "is not None: # pos_inds = labels > 0 # if self.reg_class_agnostic: #", "from ..builder import build_loss @HEADS.register_module class ConvFCBBoxHead_MH(BBoxHead): \"\"\"More general bbox head, with shared", "num_shared_convs self.using_mask = using_mask self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs", "in sampling_reuslt] pos_assigned_gt_inds = [ res.pos_gt_assigned_gt_inds for res in sampling_reuslt ] # bbox_overlaps()", "for res in sampling_results] # pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] #", "def get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target): pos_proposals = [res.pos_bboxes for res in sampling_reuslt] pos_assigned_gt_inds", "in self.shared_fcs: x = self.relu(fc(x)) # separate branches x_cls = x x_reg =", "if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None if self.with_IoU:", "mask_pred): # shared part if self.using_mask: # for conv in self.mask_conv: # mask_pred", "sampling_reuslt ] # bbox_overlaps() def get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg): # pos_proposals = [res.pos_bboxes", "only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or", "as nn from ..registry import HEADS from ..utils import ConvModule from .bbox_head import", "x + mask_pred x = self.combine(x) else: x = self.reduce_con(x) x = torch.cat([x,", "part if self.using_mask: # for conv in self.mask_conv: # mask_pred = conv(mask_pred) #", "bbox_targets, # bbox_weights, # reduction_override=None): # losses = dict() # if cls_score is", "res in sampling_results] assigned_gt_inds = [ res.inds for res in sampling_results ] mask_targets", "= \\ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim # add cls", "aquire and loss calculation def get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target): pos_proposals = [res.pos_bboxes for", "ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # add shared convs and fcs if self.proto_combine", "/-> cls convs -> cls fcs -> cls shared convs -> shared fcs", "shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if", "mask_pred = self.hint_conv(mask_pred) if self.proto_combine == 'con': x = torch.cat([x, mask_pred], dim=1) x", "> 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0", "'sum': x = x + mask_pred x = self.combine(x) else: x = self.reduce_con(x)", "losses = dict() # if cls_score is not None: # avg_factor = max(torch.sum(label_weights", ".bbox_head import BBoxHead import torch import torch.nn.functional as F import mmcv from mmdet.core", "x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg)", "nn.Linear(self.cls_last_dim, self.num_classes) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes)", "optional separated branches. /-> cls convs -> cls fcs -> cls shared convs", "== 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu", "isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) # @force_fp32(apply_to=('cls_score', 'bbox_pred')) # def loss(self, # cls_score,", "sampling_results, gt_masks, rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in sampling_results] # pos_assigned_gt_inds", "0: # for shared branch, only consider self.with_avg_pool # for separated branches, also", "neg_proposals, # pos_gt_bboxes, # pos_gt_labels, # rcnn_train_cfg, # reg_classes, # target_means=self.target_means, # target_stds=self.target_stds)", "# losses['loss_bbox_refine'] = self.loss_bbox( # pos_bbox_pred, # bbox_targets[pos_inds], # bbox_weights[pos_inds], # avg_factor=bbox_targets.size(0), #", "SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs): assert num_fcs >= 1 super(SharedFCBBoxHead_MH, self).__init__(", "if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for", "pos_gt_labels, # rcnn_train_cfg, # reg_classes, # target_means=self.target_means, # target_stds=self.target_stds) # return cls_reg_targets def", "**kwargs): super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs +", "self.feature_reduce = feature_reduce if with_IoU: self.iou_loss = build_loss(loss_iou) # self.hint_conv = ConvModule(self.mask_channels, self.mask_channels,", "gt_masks, rcnn_train_cfg) mask_bg_targets = mask_bg_target(proposals, gt_masks, rcnn_train_cfg) return mask_targets, mask_bg_targets # def get_target(self,", "self.hint_conv = ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # add shared convs and fcs", "4 * self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) if self.with_IoU: self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes)", "self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True)", "self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg", "= feature_reduce if with_IoU: self.iou_loss = build_loss(loss_iou) # self.hint_conv = ConvModule(self.mask_channels, self.mask_channels, 1,", "for m in module_list.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) # @force_fp32(apply_to=('cls_score', 'bbox_pred'))", "self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and", "self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # add shared convs and fcs if self.proto_combine ==", "in sampling_results] # neg_proposals = [torch.tensor([]) for res in sampling_results] # pos_gt_bboxes =", "assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs", "bbox_pred @HEADS.register_module class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs): assert num_fcs >=", "\"\"\"More general bbox head, with shared conv and fc layers and two optional", "conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.view(x_cls.size(0),", "# conv_m = ConvModule(1, 1, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv.append(conv_m) self.shared_convs, self.shared_fcs,", "self.cls_fcs, self.reg_fcs]: for m in module_list.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) #", "if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.view(x.size(0), -1)", "= with_IoU self.mask_channels = mask_channels self.proto_combine = proto_combine self.feature_reduce = feature_reduce if with_IoU:", "gt_masks, rcnn_train_cfg) proposals = [res.bboxes for res in sampling_results] assigned_gt_inds = [ res.inds", "self.hint_conv(mask_pred) if self.proto_combine == 'con': x = torch.cat([x, mask_pred], dim=1) x = self.combine(x)", "label_weights, # avg_factor=avg_factor, # reduction_override=reduction_override) # losses['acc_refine'] = accuracy(cls_score, labels) # if bbox_pred", "for res in sampling_results # ] # mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, # gt_masks,", "assigned_gt_inds = [ res.inds for res in sampling_results ] mask_targets = mask_target(proposals, assigned_gt_inds,", "for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool:", "= num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels =", "self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) if self.with_IoU: self.IoU_reg =", "else: x = self.reduce_con(x) x = torch.cat([x, mask_pred], dim=1) if self.num_shared_convs > 0:", "for res in sampling_results] # neg_proposals = [torch.tensor([]) for res in sampling_results] #", "self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim # add cls specific branch self.cls_convs,", "self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since input", "convs -> avg pool (optional) -> fcs \"\"\" last_layer_dim = in_channels # add", "# label_weights, # bbox_targets, # bbox_weights, # reduction_override=None): # losses = dict() #", "= [res.pos_bboxes for res in sampling_results] # pos_assigned_gt_inds = [ # res.pos_assigned_gt_inds for", "+ self.mask_channels if proto_combine == 'con' else self.in_channels self.combine = ConvModule(combine_channels, conv_out_channels, 1,", "self.norm_cfg = norm_cfg self.using_bg = using_bg self.using_refine = using_refine self.with_IoU = with_IoU self.mask_channels", "labels) # if bbox_pred is not None: # pos_inds = labels > 0", "neg_proposals = [torch.tensor([]) for res in sampling_results] # pos_gt_bboxes = [res.pos_gt_bboxes for res", "self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs", "dict() # if cls_score is not None: # avg_factor = max(torch.sum(label_weights > 0).float().item(),", "in sampling_results] # pos_gt_labels = [res.pos_gt_labels for res in sampling_results] # reg_classes =", "if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg =", "conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.using_bg = using_bg", "== 'con': x = torch.cat([x, mask_pred], dim=1) x = self.combine(x) elif self.proto_combine ==", "self.combine(x) else: x = self.reduce_con(x) x = torch.cat([x, mask_pred], dim=1) if self.num_shared_convs >", "self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.using_bg = using_bg self.using_refine", "conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels,", "norm_cfg=norm_cfg) else: combine_channels = self.in_channels + self.mask_channels if proto_combine == 'con' else self.in_channels", "last_layer_dim # add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \\ self._add_conv_fc_branch( self.num_cls_convs,", "# mask_pred = conv(mask_pred) # mask_pred = self.hint_conv(mask_pred) if self.proto_combine == 'con': x", "x = torch.cat([x, mask_pred], dim=1) if self.num_shared_convs > 0: for conv in self.shared_convs:", "self.proto_combine == 'sum': x = x + mask_pred x = self.combine(x) else: x", "self.num_shared_convs = num_shared_convs self.using_mask = using_mask self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs", "torch.nn as nn from ..registry import HEADS from ..utils import ConvModule from .bbox_head", "x_cls.view(x_cls.size(0), -1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs:", "losses['acc_refine'] = accuracy(cls_score, labels) # if bbox_pred is not None: # pos_inds =", "conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv = nn.ModuleList() # for i in range(mask_conv):", "sampling_results ] mask_targets = mask_target(proposals, assigned_gt_inds, gt_masks, rcnn_train_cfg) mask_bg_targets = mask_bg_target(proposals, gt_masks, rcnn_train_cfg)", "= [torch.tensor([]) for res in sampling_results] # pos_gt_bboxes = [res.pos_gt_bboxes for res in", "+ mask_pred x = self.combine(x) else: x = self.reduce_con(x) x = torch.cat([x, mask_pred],", "import torch.nn.functional as F import mmcv from mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target,", "# for conv in self.mask_conv: # mask_pred = conv(mask_pred) # mask_pred = self.hint_conv(mask_pred)", "F import mmcv from mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps from ..losses", "> 0 # if self.reg_class_agnostic: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] # else: #", "= [res.pos_gt_labels for res in sampling_results] # reg_classes = 1 if self.reg_class_agnostic else", "x = self.combine(x) else: x = self.reduce_con(x) x = torch.cat([x, mask_pred], dim=1) if", "# add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0:", "conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.view(x_reg.size(0),", "# pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, # 4)[pos_inds, labels[pos_inds]] # losses['loss_bbox_refine'] = self.loss_bbox( #", "+ num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) if", "# bbox_overlaps() def get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res", "if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.view(x_reg.size(0), -1)", "bbox_pred, bbox_target): pos_proposals = [res.pos_bboxes for res in sampling_reuslt] pos_assigned_gt_inds = [ res.pos_gt_assigned_gt_inds", "assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs", "= mask_target(proposals, assigned_gt_inds, gt_masks, rcnn_train_cfg) mask_bg_targets = mask_bg_target(proposals, gt_masks, rcnn_train_cfg) return mask_targets, mask_bg_targets", "bbox head, with shared conv and fc layers and two optional separated branches.", "assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.using_mask =", "= num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg =", "range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule(", "= accuracy(cls_score, labels) # if bbox_pred is not None: # pos_inds = labels", "sampling_results] # reg_classes = 1 if self.reg_class_agnostic else self.num_classes # cls_reg_targets = bbox_target(", "# rcnn_train_cfg, # reg_classes, # target_means=self.target_means, # target_stds=self.target_stds) # return cls_reg_targets def forward(self,", "self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) if self.with_IoU: self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes) def _add_conv_fc_branch(self, num_branch_convs,", "and fc layers and two optional separated branches. /-> cls convs -> cls", "self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0", "-1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg", "= ConvModule(self.in_channels, conv_out_channels - mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) else: combine_channels = self.in_channels +", "= (4 if self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) if", "num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not", "# bbox_weights[pos_inds], # avg_factor=bbox_targets.size(0), # reduction_override=reduction_override) # return losses #TODO: add IoU target", "\\-> reg convs -> reg fcs -> reg \"\"\" # noqa: W605 def", "res in sampling_results ] mask_targets = mask_target(proposals, assigned_gt_inds, gt_masks, rcnn_train_cfg) mask_bg_targets = mask_bg_target(proposals,", "rcnn_train_cfg) mask_bg_targets = mask_bg_target(proposals, gt_masks, rcnn_train_cfg) return mask_targets, mask_bg_targets # def get_target(self, sampling_results,", "0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim =", "return branch_convs, branch_fcs, last_layer_dim def init_weights(self): super(ConvFCBBoxHead_MH, self).init_weights() for module_list in [self.shared_fcs, self.cls_fcs,", "res in sampling_results] # neg_proposals = [torch.tensor([]) for res in sampling_results] # pos_gt_bboxes", "self.with_avg_pool: x = self.avg_pool(x) x = x.view(x.size(0), -1) for fc in self.shared_fcs: x", "0 self.num_shared_convs = num_shared_convs self.using_mask = using_mask self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs", "branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels =", "mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, # gt_masks, rcnn_train_cfg) proposals = [res.bboxes for res in", "-1, # 4)[pos_inds, labels[pos_inds]] # losses['loss_bbox_refine'] = self.loss_bbox( # pos_bbox_pred, # bbox_targets[pos_inds], #", "ConvModule(self.in_channels, conv_out_channels - mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) else: combine_channels = self.in_channels + self.mask_channels", "if self.proto_combine == 'con': x = torch.cat([x, mask_pred], dim=1) x = self.combine(x) elif", "self.with_IoU = with_IoU self.mask_channels = mask_channels self.proto_combine = proto_combine self.feature_reduce = feature_reduce if", "self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else", "rcnn_train_cfg, # reg_classes, # target_means=self.target_means, # target_stds=self.target_stds) # return cls_reg_targets def forward(self, x,", "= self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2:", "self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None if", "num_reg_convs == 0 and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.using_mask = using_mask", "channels are changed if self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) if self.with_reg: out_dim_reg =", "res in sampling_results] # pos_gt_labels = [res.pos_gt_labels for res in sampling_results] # reg_classes", "reg \"\"\" # noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0,", "1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) else: combine_channels = self.in_channels + self.mask_channels if proto_combine == 'con'", "= x + mask_pred x = self.combine(x) else: x = self.reduce_con(x) x =", "# add shared convs and fcs if self.proto_combine == 'None': if self.feature_reduce: self.reduce_con", "import accuracy from ..builder import build_loss @HEADS.register_module class ConvFCBBoxHead_MH(BBoxHead): \"\"\"More general bbox head,", "[res.pos_gt_bboxes for res in sampling_results] # pos_gt_labels = [res.pos_gt_labels for res in sampling_results]", "in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if", "[res.pos_bboxes for res in sampling_results] # neg_proposals = [torch.tensor([]) for res in sampling_results]", "branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add", "> 0) if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs ==", "conv_cfg=conv_cfg, norm_cfg=norm_cfg) else: combine_channels = self.in_channels + self.mask_channels if proto_combine == 'con' else", "self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4", "in sampling_results ] mask_targets = mask_target(proposals, assigned_gt_inds, gt_masks, rcnn_train_cfg) mask_bg_targets = mask_bg_target(proposals, gt_masks,", "if self.reg_class_agnostic else self.num_classes # cls_reg_targets = bbox_target( # pos_proposals, # neg_proposals, #", "self.num_classes) def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): \"\"\"Add shared or separable branch convs", "self.IoU_reg(x_reg) return cls_score, bbox_pred, IoU_pred return cls_score, bbox_pred @HEADS.register_module class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def __init__(self,", "sampling_results] # pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] # pos_gt_labels = [res.pos_gt_labels", "0) # @force_fp32(apply_to=('cls_score', 'bbox_pred')) # def loss(self, # cls_score, # bbox_pred, # labels,", "fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels))", "not None: # pos_inds = labels > 0 # if self.reg_class_agnostic: # pos_bbox_pred", "= self.in_channels + self.mask_channels if proto_combine == 'con' else self.in_channels self.combine = ConvModule(combine_channels,", "are changed if self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) if self.with_reg: out_dim_reg = (4", "1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv = nn.ModuleList() # for i in range(mask_conv): #", "# pos_gt_bboxes, # pos_gt_labels, # rcnn_train_cfg, # reg_classes, # target_means=self.target_means, # target_stds=self.target_stds) #", "self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs ==", "pos_assigned_gt_inds = [ res.pos_gt_assigned_gt_inds for res in sampling_reuslt ] # bbox_overlaps() def get_mask_target(self,", "# pos_inds = labels > 0 # if self.reg_class_agnostic: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0),", "pos_gt_bboxes, # pos_gt_labels, # rcnn_train_cfg, # reg_classes, # target_means=self.target_means, # target_stds=self.target_stds) # return", "conv in self.mask_conv: # mask_pred = conv(mask_pred) # mask_pred = self.hint_conv(mask_pred) if self.proto_combine", "reg convs -> reg fcs -> reg \"\"\" # noqa: W605 def __init__(self,", "> 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0:", "== 0 and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.using_mask = using_mask self.num_shared_fcs", "reduction_override=reduction_override) # losses['acc_refine'] = accuracy(cls_score, labels) # if bbox_pred is not None: #", "self.iou_loss = build_loss(loss_iou) # self.hint_conv = ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # add", "nn.Linear(self.reg_last_dim, out_dim_reg) if self.with_IoU: self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes) def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels,", "avg_factor=bbox_targets.size(0), # reduction_override=reduction_override) # return losses #TODO: add IoU target aquire and loss", "self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area", "shared conv and fc layers and two optional separated branches. /-> cls convs", "-> shared fcs \\-> reg convs -> reg fcs -> reg \"\"\" #", "bbox_target( # pos_proposals, # neg_proposals, # pos_gt_bboxes, # pos_gt_labels, # rcnn_train_cfg, # reg_classes,", "0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0", "self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.view(x_reg.size(0), -1) for fc in self.reg_fcs: x_reg", "+ num_reg_convs + num_reg_fcs > 0) if num_cls_convs > 0 or num_reg_convs >", "self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim", "shared convs and fcs if self.proto_combine == 'None': if self.feature_reduce: self.reduce_con = ConvModule(self.in_channels,", "super(ConvFCBBoxHead_MH, self).init_weights() for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: for m in module_list.modules(): if", "combine_channels = self.in_channels + self.mask_channels if proto_combine == 'con' else self.in_channels self.combine =", "bbox_pred.view(bbox_pred.size(0), -1, # 4)[pos_inds, labels[pos_inds]] # losses['loss_bbox_refine'] = self.loss_bbox( # pos_bbox_pred, # bbox_targets[pos_inds],", "= self.avg_pool(x_reg) x_reg = x_reg.view(x_reg.size(0), -1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg))", "if self.with_IoU: self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes) def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): \"\"\"Add", "# bbox_targets[pos_inds], # bbox_weights[pos_inds], # avg_factor=bbox_targets.size(0), # reduction_override=reduction_override) # return losses #TODO: add", "for res in sampling_results] # pos_gt_labels = [res.pos_gt_labels for res in sampling_results] #", "cls convs -> cls fcs -> cls shared convs -> shared fcs \\->", "# labels, # label_weights, # bbox_targets, # bbox_weights, # reduction_override=None): # losses =", "if proto_combine == 'con' else self.in_channels self.combine = ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)", "conv_m = ConvModule(1, 1, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv.append(conv_m) self.shared_convs, self.shared_fcs, last_layer_dim", "= build_loss(loss_iou) # self.hint_conv = ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # add shared", "self.in_channels self.combine = ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv = nn.ModuleList() #", "num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs ==", "res in sampling_results] # pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] # pos_gt_labels", "avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) # losses['loss_cls_refine'] = self.loss_cls( # cls_score, #", "== 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if", "# losses['acc_refine'] = accuracy(cls_score, labels) # if bbox_pred is not None: # pos_inds", "[res.pos_gt_labels for res in sampling_results] # reg_classes = 1 if self.reg_class_agnostic else self.num_classes", "= self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs", "(4 if self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) if self.with_IoU:", "x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if", "elif self.proto_combine == 'sum': x = x + mask_pred x = self.combine(x) else:", "self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if", "shared fcs \\-> reg convs -> reg fcs -> reg \"\"\" # noqa:", "reduction_override=reduction_override) # return losses #TODO: add IoU target aquire and loss calculation def", "self.feature_reduce: self.reduce_con = ConvModule(self.in_channels, conv_out_channels - mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) else: combine_channels =", "return cls_reg_targets def forward(self, x, mask_pred): # shared part if self.using_mask: # for", "# reconstruct fc_cls and fc_reg since input channels are changed if self.with_cls: self.fc_cls", "= [res.bboxes for res in sampling_results] assigned_gt_inds = [ res.inds for res in", "mask_channels self.proto_combine = proto_combine self.feature_reduce = feature_reduce if with_IoU: self.iou_loss = build_loss(loss_iou) #", "in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg =", "def forward(self, x, mask_pred): # shared part if self.using_mask: # for conv in", "specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared", "if bbox_pred is not None: # pos_inds = labels > 0 # if", "self.reduce_con = ConvModule(self.in_channels, conv_out_channels - mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) else: combine_channels = self.in_channels", "# pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] # else: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, #", "[ # res.pos_assigned_gt_inds for res in sampling_results # ] # mask_targets = mask_target(pos_proposals,", "else: combine_channels = self.in_channels + self.mask_channels if proto_combine == 'con' else self.in_channels self.combine", "res in sampling_results] # pos_assigned_gt_inds = [ # res.pos_assigned_gt_inds for res in sampling_results", "[ res.pos_gt_assigned_gt_inds for res in sampling_reuslt ] # bbox_overlaps() def get_mask_target(self, sampling_results, gt_masks,", "# res.pos_assigned_gt_inds for res in sampling_results # ] # mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,", "norm_cfg=norm_cfg) # self.mask_conv = nn.ModuleList() # for i in range(mask_conv): # conv_m =", "rcnn_train_cfg) return mask_targets, mask_bg_targets # def get_target(self, sampling_results, gt_bboxes, gt_labels, # rcnn_train_cfg): #", "for conv in self.mask_conv: # mask_pred = conv(mask_pred) # mask_pred = self.hint_conv(mask_pred) if", "in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x =", "cls_score, bbox_pred @HEADS.register_module class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs): assert num_fcs", "x = self.combine(x) elif self.proto_combine == 'sum': x = x + mask_pred x", "norm_cfg=None, using_bg=False, using_refine=True, loss_iou = dict(type='MSELoss', loss_weight=0.5), *args, **kwargs): super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs) assert", "out_dim_reg) if self.with_IoU: self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes) def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False):", "is_shared=False): \"\"\"Add shared or separable branch convs -> avg pool (optional) -> fcs", "if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if", "ConvModule from .bbox_head import BBoxHead import torch import torch.nn.functional as F import mmcv", "import mmcv from mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps from ..losses import", "# return losses #TODO: add IoU target aquire and loss calculation def get_iou_target(self,", "self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.view(x_cls.size(0), -1) for fc in self.cls_fcs: x_cls", "and fc_reg since input channels are changed if self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes)", "nn.Linear(self.reg_last_dim, self.num_classes) def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): \"\"\"Add shared or separable branch", "# 4)[pos_inds, labels[pos_inds]] # losses['loss_bbox_refine'] = self.loss_bbox( # pos_bbox_pred, # bbox_targets[pos_inds], # bbox_weights[pos_inds],", "self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim # add cls specific branch", "+ num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) if num_cls_convs >", "* self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) if self.with_IoU: self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes) def", "module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: for m in module_list.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight)", "bbox_weights[pos_inds], # avg_factor=bbox_targets.size(0), # reduction_override=reduction_override) # return losses #TODO: add IoU target aquire", "__init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, mask_channels=256, using_mask = True, with_IoU =", "1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # add shared convs and fcs if self.proto_combine == 'None':", "with shared conv and fc layers and two optional separated branches. /-> cls", "= mask_bg_target(proposals, gt_masks, rcnn_train_cfg) return mask_targets, mask_bg_targets # def get_target(self, sampling_results, gt_bboxes, gt_labels,", "x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() >", "fcs \"\"\" last_layer_dim = in_channels # add branch specific conv layers branch_convs =", "bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] # else: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, # 4)[pos_inds, labels[pos_inds]] #", "layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only", "0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if", "0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and", "self.combine = ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv = nn.ModuleList() # for", "@force_fp32(apply_to=('cls_score', 'bbox_pred')) # def loss(self, # cls_score, # bbox_pred, # labels, # label_weights,", "x = self.avg_pool(x) x = x.view(x.size(0), -1) for fc in self.shared_fcs: x =", "= self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None", "= \\ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool:", "\"\"\" # noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, mask_channels=256,", "# if self.reg_class_agnostic: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] # else: # pos_bbox_pred =", "conv(mask_pred) # mask_pred = self.hint_conv(mask_pred) if self.proto_combine == 'con': x = torch.cat([x, mask_pred],", "num_branch_fcs, in_channels, is_shared=False): \"\"\"Add shared or separable branch convs -> avg pool (optional)", "= self.combine(x) else: x = self.reduce_con(x) x = torch.cat([x, mask_pred], dim=1) if self.num_shared_convs", "cls fcs -> cls shared convs -> shared fcs \\-> reg convs ->", "branches. /-> cls convs -> cls fcs -> cls shared convs -> shared", "fc_out_channels=1024, proto_combine='con', feature_reduce=False, # mask_conv=3, conv_cfg=None, norm_cfg=None, using_bg=False, using_refine=True, loss_iou = dict(type='MSELoss', loss_weight=0.5),", "in_channels, is_shared=False): \"\"\"Add shared or separable branch convs -> avg pool (optional) ->", "self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels", "self.reg_class_agnostic else self.num_classes # cls_reg_targets = bbox_target( # pos_proposals, # neg_proposals, # pos_gt_bboxes,", "head, with shared conv and fc layers and two optional separated branches. /->", "self.relu(fc(x)) # separate branches x_cls = x x_reg = x for conv in", "in sampling_results] # reg_classes = 1 if self.reg_class_agnostic else self.num_classes # cls_reg_targets =", "separate branches x_cls = x x_reg = x for conv in self.cls_convs: x_cls", "# avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) # losses['loss_cls_refine'] = self.loss_cls( # cls_score,", "using_refine=True, loss_iou = dict(type='MSELoss', loss_weight=0.5), *args, **kwargs): super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs) assert (num_shared_convs +", "# for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0)", "== 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim", "branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def init_weights(self): super(ConvFCBBoxHead_MH,", "mask_pred], dim=1) if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x)", "proposals = [res.bboxes for res in sampling_results] assigned_gt_inds = [ res.inds for res", "= self.avg_pool(x) x = x.view(x.size(0), -1) for fc in self.shared_fcs: x = self.relu(fc(x))", "and fcs if self.proto_combine == 'None': if self.feature_reduce: self.reduce_con = ConvModule(self.in_channels, conv_out_channels -", "fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg)", "num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for separated", "num_reg_fcs=0, mask_channels=256, using_mask = True, with_IoU = False, conv_out_channels=256, fc_out_channels=1024, proto_combine='con', feature_reduce=False, #", "loss_iou = dict(type='MSELoss', loss_weight=0.5), *args, **kwargs): super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs) assert (num_shared_convs + num_shared_fcs", "self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None if self.with_IoU: IoU_pred", "self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) if self.with_IoU: self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes) def _add_conv_fc_branch(self,", "target_stds=self.target_stds) # return cls_reg_targets def forward(self, x, mask_pred): # shared part if self.using_mask:", "calculation def get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target): pos_proposals = [res.pos_bboxes for res in sampling_reuslt]", "else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels", "conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch specific", "] mask_targets = mask_target(proposals, assigned_gt_inds, gt_masks, rcnn_train_cfg) mask_bg_targets = mask_bg_target(proposals, gt_masks, rcnn_train_cfg) return", "cls_score is not None: # avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) # losses['loss_cls_refine']", "forward(self, x, mask_pred): # shared part if self.using_mask: # for conv in self.mask_conv:", "x = x + mask_pred x = self.combine(x) else: x = self.reduce_con(x) x", "0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels =", "num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.using_mask = using_mask self.num_shared_fcs = num_shared_fcs self.num_cls_convs", "'con': x = torch.cat([x, mask_pred], dim=1) x = self.combine(x) elif self.proto_combine == 'sum':", "= last_layer_dim # add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \\ self._add_conv_fc_branch(", "self.using_mask: # for conv in self.mask_conv: # mask_pred = conv(mask_pred) # mask_pred =", "in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append(", "sampling_results] # pos_assigned_gt_inds = [ # res.pos_assigned_gt_inds for res in sampling_results # ]", "== 0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg", "self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def init_weights(self):", "num_reg_convs + num_reg_fcs > 0) if num_cls_convs > 0 or num_reg_convs > 0:", "general bbox head, with shared conv and fc layers and two optional separated", "*args, **kwargs): super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs", "if self.with_avg_pool: x = self.avg_pool(x) x = x.view(x.size(0), -1) for fc in self.shared_fcs:", "for i in range(mask_conv): # conv_m = ConvModule(1, 1, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)", "branch_convs, branch_fcs, last_layer_dim def init_weights(self): super(ConvFCBBoxHead_MH, self).init_weights() for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:", "self.reg_fcs, self.reg_last_dim = \\ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and", "= mask_target(pos_proposals, pos_assigned_gt_inds, # gt_masks, rcnn_train_cfg) proposals = [res.bboxes for res in sampling_results]", "self.loss_bbox( # pos_bbox_pred, # bbox_targets[pos_inds], # bbox_weights[pos_inds], # avg_factor=bbox_targets.size(0), # reduction_override=reduction_override) # return", "= self.hint_conv(mask_pred) if self.proto_combine == 'con': x = torch.cat([x, mask_pred], dim=1) x =", "nn.init.constant_(m.bias, 0) # @force_fp32(apply_to=('cls_score', 'bbox_pred')) # def loss(self, # cls_score, # bbox_pred, #", "losses['loss_bbox_refine'] = self.loss_bbox( # pos_bbox_pred, # bbox_targets[pos_inds], # bbox_weights[pos_inds], # avg_factor=bbox_targets.size(0), # reduction_override=reduction_override)", "self.mask_conv = nn.ModuleList() # for i in range(mask_conv): # conv_m = ConvModule(1, 1,", "dim=1) if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if", "num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, mask_channels=256, using_mask = True, with_IoU = False, conv_out_channels=256, fc_out_channels=1024,", "= [ res.inds for res in sampling_results ] mask_targets = mask_target(proposals, assigned_gt_inds, gt_masks,", "None if self.with_IoU: IoU_pred = self.IoU_reg(x_reg) return cls_score, bbox_pred, IoU_pred return cls_score, bbox_pred", "1, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv.append(conv_m) self.shared_convs, self.shared_fcs, last_layer_dim = \\ self._add_conv_fc_branch(", "3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv.append(conv_m) self.shared_convs, self.shared_fcs, last_layer_dim = \\ self._add_conv_fc_branch( self.num_shared_convs,", "else 4 * self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) if self.with_IoU: self.IoU_reg = nn.Linear(self.reg_last_dim,", "= False, conv_out_channels=256, fc_out_channels=1024, proto_combine='con', feature_reduce=False, # mask_conv=3, conv_cfg=None, norm_cfg=None, using_bg=False, using_refine=True, loss_iou", "self.using_mask = using_mask self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs", "x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x", "self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert", "nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since input channels are changed if self.with_cls:", "self.num_classes) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg", "+ num_cls_fcs + num_reg_convs + num_reg_fcs > 0) if num_cls_convs > 0 or", "# losses['loss_cls_refine'] = self.loss_cls( # cls_score, # labels, # label_weights, # avg_factor=avg_factor, #", "= x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if", "last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if", "torch.cat([x, mask_pred], dim=1) x = self.combine(x) elif self.proto_combine == 'sum': x = x", "with_IoU: self.iou_loss = build_loss(loss_iou) # self.hint_conv = ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) #", "in range(mask_conv): # conv_m = ConvModule(1, 1, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv.append(conv_m)", "init_weights(self): super(ConvFCBBoxHead_MH, self).init_weights() for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: for m in module_list.modules():", "# pos_gt_labels = [res.pos_gt_labels for res in sampling_results] # reg_classes = 1 if", "for res in sampling_results] # reg_classes = 1 if self.reg_class_agnostic else self.num_classes #", "pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, # 4)[pos_inds, labels[pos_inds]] # losses['loss_bbox_refine'] = self.loss_bbox( # pos_bbox_pred,", "branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool:", "0 and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.using_mask = using_mask self.num_shared_fcs =", "res in sampling_reuslt] pos_assigned_gt_inds = [ res.pos_gt_assigned_gt_inds for res in sampling_reuslt ] #", "and two optional separated branches. /-> cls convs -> cls fcs -> cls", "branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \\ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg", "self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since input channels are changed", "# bbox_pred, # labels, # label_weights, # bbox_targets, # bbox_weights, # reduction_override=None): #", "self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.view(x.size(0), -1) for", "IoU_pred = self.IoU_reg(x_reg) return cls_score, bbox_pred, IoU_pred return cls_score, bbox_pred @HEADS.register_module class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH):", "@HEADS.register_module class ConvFCBBoxHead_MH(BBoxHead): \"\"\"More general bbox head, with shared conv and fc layers", "gt_labels, # rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in sampling_results] # neg_proposals", "return mask_targets, mask_bg_targets # def get_target(self, sampling_results, gt_bboxes, gt_labels, # rcnn_train_cfg): # pos_proposals", "\"\"\" last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList()", "self.reduce_con(x) x = torch.cat([x, mask_pred], dim=1) if self.num_shared_convs > 0: for conv in", "conv and fc layers and two optional separated branches. /-> cls convs ->", "shared or separable branch convs -> avg pool (optional) -> fcs \"\"\" last_layer_dim", "in sampling_results] # pos_assigned_gt_inds = [ # res.pos_assigned_gt_inds for res in sampling_results #", "(num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0)", "self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels", "0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if", "2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.view(x_cls.size(0), -1) for fc in", "bbox_pred, IoU_pred return cls_score, bbox_pred @HEADS.register_module class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def __init__(self, num_fcs=2, fc_out_channels=1024, *args,", "and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs ==", "if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls", "rcnn_train_cfg) proposals = [res.bboxes for res in sampling_results] assigned_gt_inds = [ res.inds for", "pos_assigned_gt_inds, # gt_masks, rcnn_train_cfg) proposals = [res.bboxes for res in sampling_results] assigned_gt_inds =", "IoU_pred return cls_score, bbox_pred @HEADS.register_module class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs):", "self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim # add cls specific branch self.cls_convs, self.cls_fcs,", "reg fcs -> reg \"\"\" # noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0,", "last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i", "= max(torch.sum(label_weights > 0).float().item(), 1.) # losses['loss_cls_refine'] = self.loss_cls( # cls_score, # labels,", "= self.combine(x) elif self.proto_combine == 'sum': x = x + mask_pred x =", "__init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs): assert num_fcs >= 1 super(SharedFCBBoxHead_MH, self).__init__( num_shared_convs=0, num_shared_fcs=num_fcs,", "4)[pos_inds, labels[pos_inds]] # losses['loss_bbox_refine'] = self.loss_bbox( # pos_bbox_pred, # bbox_targets[pos_inds], # bbox_weights[pos_inds], #", "[self.shared_fcs, self.cls_fcs, self.reg_fcs]: for m in module_list.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0)", "branch convs -> avg pool (optional) -> fcs \"\"\" last_layer_dim = in_channels #", "= nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since input channels are changed if", "num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs ==", "max(torch.sum(label_weights > 0).float().item(), 1.) # losses['loss_cls_refine'] = self.loss_cls( # cls_score, # labels, #", "self.avg_pool(x_reg) x_reg = x_reg.view(x_reg.size(0), -1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score", "self).init_weights() for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: for m in module_list.modules(): if isinstance(m,", "bbox_overlaps from ..losses import accuracy from ..builder import build_loss @HEADS.register_module class ConvFCBBoxHead_MH(BBoxHead): \"\"\"More", "not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg:", "res.pos_gt_assigned_gt_inds for res in sampling_reuslt ] # bbox_overlaps() def get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg):", "mask_bg_target(proposals, gt_masks, rcnn_train_cfg) return mask_targets, mask_bg_targets # def get_target(self, sampling_results, gt_bboxes, gt_labels, #", "fc_out_channels=1024, *args, **kwargs): assert num_fcs >= 1 super(SharedFCBBoxHead_MH, self).__init__( num_shared_convs=0, num_shared_fcs=num_fcs, num_cls_convs=0, num_cls_fcs=0,", "torch.cat([x, mask_pred], dim=1) if self.num_shared_convs > 0: for conv in self.shared_convs: x =", "gt_bboxes, gt_labels, # rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in sampling_results] #", "conv_cfg=None, norm_cfg=None, using_bg=False, using_refine=True, loss_iou = dict(type='MSELoss', loss_weight=0.5), *args, **kwargs): super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs)", "-> avg pool (optional) -> fcs \"\"\" last_layer_dim = in_channels # add branch", "add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: #", "and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = (", "self.reg_class_agnostic: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] # else: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1,", "self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def init_weights(self): super(ConvFCBBoxHead_MH, self).init_weights() for", "self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim()", "last_layer_dim = \\ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim # add", "# add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0:", "= self.loss_cls( # cls_score, # labels, # label_weights, # avg_factor=avg_factor, # reduction_override=reduction_override) #", "# pos_gt_labels, # rcnn_train_cfg, # reg_classes, # target_means=self.target_means, # target_stds=self.target_stds) # return cls_reg_targets", "self.combine(x) elif self.proto_combine == 'sum': x = x + mask_pred x = self.combine(x)", "# losses = dict() # if cls_score is not None: # avg_factor =", "] # mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, # gt_masks, rcnn_train_cfg) proposals = [res.bboxes for", "from ..registry import HEADS from ..utils import ConvModule from .bbox_head import BBoxHead import", "also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim", "for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg =", "if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return", "x_reg = x_reg.view(x_reg.size(0), -1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score =", "None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None if self.with_IoU: IoU_pred = self.IoU_reg(x_reg)", "== 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs,", "in sampling_results] assigned_gt_inds = [ res.inds for res in sampling_results ] mask_targets =", "# reg_classes, # target_means=self.target_means, # target_stds=self.target_stds) # return cls_reg_targets def forward(self, x, mask_pred):", "1.) # losses['loss_cls_refine'] = self.loss_cls( # cls_score, # labels, # label_weights, # avg_factor=avg_factor,", "res.pos_assigned_gt_inds for res in sampling_results # ] # mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, #", "if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs", "conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs):", "# mask_pred = self.hint_conv(mask_pred) if self.proto_combine == 'con': x = torch.cat([x, mask_pred], dim=1)", "*= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since input channels", "0 # if self.reg_class_agnostic: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] # else: # pos_bbox_pred", "= [ # res.pos_assigned_gt_inds for res in sampling_results # ] # mask_targets =", "self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area", "self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs):", "else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None if self.with_IoU: IoU_pred =", "accuracy(cls_score, labels) # if bbox_pred is not None: # pos_inds = labels >", "= using_refine self.with_IoU = with_IoU self.mask_channels = mask_channels self.proto_combine = proto_combine self.feature_reduce =", "feature_reduce=False, # mask_conv=3, conv_cfg=None, norm_cfg=None, using_bg=False, using_refine=True, loss_iou = dict(type='MSELoss', loss_weight=0.5), *args, **kwargs):", "conv_out_channels=256, fc_out_channels=1024, proto_combine='con', feature_reduce=False, # mask_conv=3, conv_cfg=None, norm_cfg=None, using_bg=False, using_refine=True, loss_iou = dict(type='MSELoss',", "and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs", "fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None", "using_bg=False, using_refine=True, loss_iou = dict(type='MSELoss', loss_weight=0.5), *args, **kwargs): super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs) assert (num_shared_convs", "pos_proposals = [res.pos_bboxes for res in sampling_results] # neg_proposals = [torch.tensor([]) for res", "self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since input channels are", "self.proto_combine = proto_combine self.feature_reduce = feature_reduce if with_IoU: self.iou_loss = build_loss(loss_iou) # self.hint_conv", "# for i in range(mask_conv): # conv_m = ConvModule(1, 1, 3, padding=1, conv_cfg=conv_cfg,", "x = self.reduce_con(x) x = torch.cat([x, mask_pred], dim=1) if self.num_shared_convs > 0: for", "not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0:", "( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3,", "module_list.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) # @force_fp32(apply_to=('cls_score', 'bbox_pred')) # def loss(self,", "gt_masks, rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in sampling_results] # pos_assigned_gt_inds =", "pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] # pos_gt_labels = [res.pos_gt_labels for res", "norm_cfg=norm_cfg) # add shared convs and fcs if self.proto_combine == 'None': if self.feature_reduce:", "x_reg = self.avg_pool(x_reg) x_reg = x_reg.view(x_reg.size(0), -1) for fc in self.reg_fcs: x_reg =", "self.num_cls_fcs, self.shared_out_channels) # add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \\ self._add_conv_fc_branch(", "= self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if", "0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu =", "self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.using_bg", "changed if self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) if self.with_reg: out_dim_reg = (4 if", "self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs >", "# reg_classes = 1 if self.reg_class_agnostic else self.num_classes # cls_reg_targets = bbox_target( #", "# else: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, # 4)[pos_inds, labels[pos_inds]] # losses['loss_bbox_refine'] =", "= [res.pos_gt_bboxes for res in sampling_results] # pos_gt_labels = [res.pos_gt_labels for res in", "bbox_weights, # reduction_override=None): # losses = dict() # if cls_score is not None:", "= conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x =", "== 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0", "for res in sampling_results ] mask_targets = mask_target(proposals, assigned_gt_inds, gt_masks, rcnn_train_cfg) mask_bg_targets =", "avg pool (optional) -> fcs \"\"\" last_layer_dim = in_channels # add branch specific", "= [res.pos_bboxes for res in sampling_reuslt] pos_assigned_gt_inds = [ res.pos_gt_assigned_gt_inds for res in", "and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.using_mask = using_mask self.num_shared_fcs = num_shared_fcs", "> 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.view(x.size(0), -1) for fc", "using_bg self.using_refine = using_refine self.with_IoU = with_IoU self.mask_channels = mask_channels self.proto_combine = proto_combine", "sampling_results] # neg_proposals = [torch.tensor([]) for res in sampling_results] # pos_gt_bboxes = [res.pos_gt_bboxes", "] # bbox_overlaps() def get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for", "with_IoU self.mask_channels = mask_channels self.proto_combine = proto_combine self.feature_reduce = feature_reduce if with_IoU: self.iou_loss", "-> cls fcs -> cls shared convs -> shared fcs \\-> reg convs", "conv_out_channels - mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) else: combine_channels = self.in_channels + self.mask_channels if", "self.with_IoU: IoU_pred = self.IoU_reg(x_reg) return cls_score, bbox_pred, IoU_pred return cls_score, bbox_pred @HEADS.register_module class", "# neg_proposals = [torch.tensor([]) for res in sampling_results] # pos_gt_bboxes = [res.pos_gt_bboxes for", "layers and two optional separated branches. /-> cls convs -> cls fcs ->", "x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg", "self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels #", "add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \\ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)", "if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not", "if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_shared_convs =", "nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool #", "self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area", "# pos_bbox_pred, # bbox_targets[pos_inds], # bbox_weights[pos_inds], # avg_factor=bbox_targets.size(0), # reduction_override=reduction_override) # return losses", "self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg)", "*= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) #", "fcs if self.proto_combine == 'None': if self.feature_reduce: self.reduce_con = ConvModule(self.in_channels, conv_out_channels - mask_channels,", "[res.pos_bboxes for res in sampling_results] # pos_assigned_gt_inds = [ # res.pos_assigned_gt_inds for res", "from ..utils import ConvModule from .bbox_head import BBoxHead import torch import torch.nn.functional as", "= bbox_target( # pos_proposals, # neg_proposals, # pos_gt_bboxes, # pos_gt_labels, # rcnn_train_cfg, #", "= x_reg.view(x_reg.size(0), -1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls)", "self).__init__(*args, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs +", "= \\ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch self.reg_convs, self.reg_fcs,", "fc in self.shared_fcs: x = self.relu(fc(x)) # separate branches x_cls = x x_reg", "gt_masks, rcnn_train_cfg) return mask_targets, mask_bg_targets # def get_target(self, sampling_results, gt_bboxes, gt_labels, # rcnn_train_cfg):", "avg_factor=avg_factor, # reduction_override=reduction_override) # losses['acc_refine'] = accuracy(cls_score, labels) # if bbox_pred is not", "self.reg_fcs]: for m in module_list.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) # @force_fp32(apply_to=('cls_score',", "0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_shared_convs", "and loss calculation def get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target): pos_proposals = [res.pos_bboxes for res", "self.mask_channels = mask_channels self.proto_combine = proto_combine self.feature_reduce = feature_reduce if with_IoU: self.iou_loss =", "# pos_proposals, # neg_proposals, # pos_gt_bboxes, # pos_gt_labels, # rcnn_train_cfg, # reg_classes, #", "reg_classes = 1 if self.reg_class_agnostic else self.num_classes # cls_reg_targets = bbox_target( # pos_proposals,", "# return cls_reg_targets def forward(self, x, mask_pred): # shared part if self.using_mask: #", "proto_combine self.feature_reduce = feature_reduce if with_IoU: self.iou_loss = build_loss(loss_iou) # self.hint_conv = ConvModule(self.mask_channels,", "# labels, # label_weights, # avg_factor=avg_factor, # reduction_override=reduction_override) # losses['acc_refine'] = accuracy(cls_score, labels)", "def get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in sampling_results]", "build_loss(loss_iou) # self.hint_conv = ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # add shared convs", "i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))", "= 1 if self.reg_class_agnostic else self.num_classes # cls_reg_targets = bbox_target( # pos_proposals, #", "in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append(", "num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) if num_cls_convs", "= num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg =", "if self.with_IoU: IoU_pred = self.IoU_reg(x_reg) return cls_score, bbox_pred, IoU_pred return cls_score, bbox_pred @HEADS.register_module", "= self.fc_reg(x_reg) if self.with_reg else None if self.with_IoU: IoU_pred = self.IoU_reg(x_reg) return cls_score,", "# self.hint_conv = ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # add shared convs and", "in sampling_results] # pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] # pos_gt_labels =", "for res in sampling_reuslt ] # bbox_overlaps() def get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg): #", "using_mask = True, with_IoU = False, conv_out_channels=256, fc_out_channels=1024, proto_combine='con', feature_reduce=False, # mask_conv=3, conv_cfg=None,", "import HEADS from ..utils import ConvModule from .bbox_head import BBoxHead import torch import", "for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: for m in module_list.modules(): if isinstance(m, nn.Linear):", "self.using_refine = using_refine self.with_IoU = with_IoU self.mask_channels = mask_channels self.proto_combine = proto_combine self.feature_reduce", "# def loss(self, # cls_score, # bbox_pred, # labels, # label_weights, # bbox_targets,", "bbox_targets[pos_inds], # bbox_weights[pos_inds], # avg_factor=bbox_targets.size(0), # reduction_override=reduction_override) # return losses #TODO: add IoU", "mmcv from mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps from ..losses import accuracy", "conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv = nn.ModuleList() # for i in range(mask_conv): # conv_m", "# target_stds=self.target_stds) # return cls_reg_targets def forward(self, x, mask_pred): # shared part if", "shared part if self.using_mask: # for conv in self.mask_conv: # mask_pred = conv(mask_pred)", "self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs >", "# add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \\ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs,", "# for shared branch, only consider self.with_avg_pool # for separated branches, also consider", "convs -> shared fcs \\-> reg convs -> reg fcs -> reg \"\"\"", "nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) # @force_fp32(apply_to=('cls_score', 'bbox_pred')) # def loss(self, # cls_score, # bbox_pred,", "# pos_proposals = [res.pos_bboxes for res in sampling_results] # neg_proposals = [torch.tensor([]) for", "if self.proto_combine == 'None': if self.feature_reduce: self.reduce_con = ConvModule(self.in_channels, conv_out_channels - mask_channels, 1,", "loss_weight=0.5), *args, **kwargs): super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs +", "build_loss @HEADS.register_module class ConvFCBBoxHead_MH(BBoxHead): \"\"\"More general bbox head, with shared conv and fc", "x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.view(x_cls.size(0), -1) for", "None: # pos_inds = labels > 0 # if self.reg_class_agnostic: # pos_bbox_pred =", "2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.view(x_reg.size(0), -1) for fc in", "for res in sampling_results] # pos_assigned_gt_inds = [ # res.pos_assigned_gt_inds for res in", "> 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.view(x_reg.size(0), -1) for fc", "get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in sampling_results] #", "last_layer_dim def init_weights(self): super(ConvFCBBoxHead_MH, self).init_weights() for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: for m", "-> reg fcs -> reg \"\"\" # noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0,", "fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.using_bg = using_bg self.using_refine = using_refine", "get_target(self, sampling_results, gt_bboxes, gt_labels, # rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in", "if self.feature_reduce: self.reduce_con = ConvModule(self.in_channels, conv_out_channels - mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) else: combine_channels", "= conv(mask_pred) # mask_pred = self.hint_conv(mask_pred) if self.proto_combine == 'con': x = torch.cat([x,", "= nn.Linear(self.reg_last_dim, self.num_classes) def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): \"\"\"Add shared or separable", "True, with_IoU = False, conv_out_channels=256, fc_out_channels=1024, proto_combine='con', feature_reduce=False, # mask_conv=3, conv_cfg=None, norm_cfg=None, using_bg=False,", "for fc in self.shared_fcs: x = self.relu(fc(x)) # separate branches x_cls = x", "add shared convs and fcs if self.proto_combine == 'None': if self.feature_reduce: self.reduce_con =", "in self.mask_conv: # mask_pred = conv(mask_pred) # mask_pred = self.hint_conv(mask_pred) if self.proto_combine ==", "self.loss_cls( # cls_score, # labels, # label_weights, # avg_factor=avg_factor, # reduction_override=reduction_override) # losses['acc_refine']", "num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs ==", "ConvFCBBoxHead_MH(BBoxHead): \"\"\"More general bbox head, with shared conv and fc layers and two", "-> fcs \"\"\" last_layer_dim = in_channels # add branch specific conv layers branch_convs", "= conv_cfg self.norm_cfg = norm_cfg self.using_bg = using_bg self.using_refine = using_refine self.with_IoU =", "self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs ==", "pos_gt_labels = [res.pos_gt_labels for res in sampling_results] # reg_classes = 1 if self.reg_class_agnostic", "i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels)", "..registry import HEADS from ..utils import ConvModule from .bbox_head import BBoxHead import torch", "assigned_gt_inds, gt_masks, rcnn_train_cfg) mask_bg_targets = mask_bg_target(proposals, gt_masks, rcnn_train_cfg) return mask_targets, mask_bg_targets # def", "-1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls", "= self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def init_weights(self): super(ConvFCBBoxHead_MH, self).init_weights() for module_list in", "mask_bg_targets = mask_bg_target(proposals, gt_masks, rcnn_train_cfg) return mask_targets, mask_bg_targets # def get_target(self, sampling_results, gt_bboxes,", "in sampling_reuslt ] # bbox_overlaps() def get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg): # pos_proposals =", "# pos_assigned_gt_inds = [ # res.pos_assigned_gt_inds for res in sampling_results # ] #", "def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): \"\"\"Add shared or separable branch convs ->", "torch.nn.functional as F import mmcv from mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps", "mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) else: combine_channels = self.in_channels + self.mask_channels if proto_combine ==", "= dict() # if cls_score is not None: # avg_factor = max(torch.sum(label_weights >", "# label_weights, # avg_factor=avg_factor, # reduction_override=reduction_override) # losses['acc_refine'] = accuracy(cls_score, labels) # if", "bbox_pred is not None: # pos_inds = labels > 0 # if self.reg_class_agnostic:", "x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls", "= nn.ModuleList() # for i in range(mask_conv): # conv_m = ConvModule(1, 1, 3,", "= ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels,", "if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if", "res.inds for res in sampling_results ] mask_targets = mask_target(proposals, assigned_gt_inds, gt_masks, rcnn_train_cfg) mask_bg_targets", "if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.view(x_cls.size(0), -1)", "mask_pred], dim=1) x = self.combine(x) elif self.proto_combine == 'sum': x = x +", "layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels", "False, conv_out_channels=256, fc_out_channels=1024, proto_combine='con', feature_reduce=False, # mask_conv=3, conv_cfg=None, norm_cfg=None, using_bg=False, using_refine=True, loss_iou =", "last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels", "conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv.append(conv_m) self.shared_convs, self.shared_fcs, last_layer_dim = \\ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels,", "self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct", "= conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls =", "# reduction_override=reduction_override) # losses['acc_refine'] = accuracy(cls_score, labels) # if bbox_pred is not None:", "reg_classes, # target_means=self.target_means, # target_stds=self.target_stds) # return cls_reg_targets def forward(self, x, mask_pred): #", "num_fcs >= 1 super(SharedFCBBoxHead_MH, self).__init__( num_shared_convs=0, num_shared_fcs=num_fcs, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, *args,", "from mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps from ..losses import accuracy from", "== 'sum': x = x + mask_pred x = self.combine(x) else: x =", "0) if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0", "x_cls = x_cls.view(x_cls.size(0), -1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv", "import ConvModule from .bbox_head import BBoxHead import torch import torch.nn.functional as F import", "shared convs -> shared fcs \\-> reg convs -> reg fcs -> reg", "pos_proposals, # neg_proposals, # pos_gt_bboxes, # pos_gt_labels, # rcnn_train_cfg, # reg_classes, # target_means=self.target_means,", "self.with_IoU: self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes) def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): \"\"\"Add shared", "for shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs", "labels > 0 # if self.reg_class_agnostic: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] # else:", "# ] # mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, # gt_masks, rcnn_train_cfg) proposals = [res.bboxes", "num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels", "labels, # label_weights, # avg_factor=avg_factor, # reduction_override=reduction_override) # losses['acc_refine'] = accuracy(cls_score, labels) #", "bbox_target): pos_proposals = [res.pos_bboxes for res in sampling_reuslt] pos_assigned_gt_inds = [ res.pos_gt_assigned_gt_inds for", "if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *=", "sampling_reuslt] pos_assigned_gt_inds = [ res.pos_gt_assigned_gt_inds for res in sampling_reuslt ] # bbox_overlaps() def", "self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if", "= True, with_IoU = False, conv_out_channels=256, fc_out_channels=1024, proto_combine='con', feature_reduce=False, # mask_conv=3, conv_cfg=None, norm_cfg=None,", "with_IoU = False, conv_out_channels=256, fc_out_channels=1024, proto_combine='con', feature_reduce=False, # mask_conv=3, conv_cfg=None, norm_cfg=None, using_bg=False, using_refine=True,", "def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, mask_channels=256, using_mask = True, with_IoU", "self.in_channels, True) self.shared_out_channels = last_layer_dim # add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim", "add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for", "loss calculation def get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target): pos_proposals = [res.pos_bboxes for res in", "self.avg_pool(x) x = x.view(x.size(0), -1) for fc in self.shared_fcs: x = self.relu(fc(x)) #", "..utils import ConvModule from .bbox_head import BBoxHead import torch import torch.nn.functional as F", "_add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): \"\"\"Add shared or separable branch convs -> avg", "bbox_pred, # labels, # label_weights, # bbox_targets, # bbox_weights, # reduction_override=None): # losses", "in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls =", "0: if self.with_avg_pool: x = self.avg_pool(x) x = x.view(x.size(0), -1) for fc in", "== 0 self.num_shared_convs = num_shared_convs self.using_mask = using_mask self.num_shared_fcs = num_shared_fcs self.num_cls_convs =", "consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs", "**kwargs): assert num_fcs >= 1 super(SharedFCBBoxHead_MH, self).__init__( num_shared_convs=0, num_shared_fcs=num_fcs, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0,", "label_weights, # bbox_targets, # bbox_weights, # reduction_override=None): # losses = dict() # if", "# add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \\ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs,", "cls_score, bbox_pred, IoU_pred return cls_score, bbox_pred @HEADS.register_module class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def __init__(self, num_fcs=2, fc_out_channels=1024,", "== 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels", "num_reg_convs=0, num_reg_fcs=0, mask_channels=256, using_mask = True, with_IoU = False, conv_out_channels=256, fc_out_channels=1024, proto_combine='con', feature_reduce=False,", "sampling_reuslt, bbox_pred, bbox_target): pos_proposals = [res.pos_bboxes for res in sampling_reuslt] pos_assigned_gt_inds = [", "self.num_classes # cls_reg_targets = bbox_target( # pos_proposals, # neg_proposals, # pos_gt_bboxes, # pos_gt_labels,", "= conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg =", "# bbox_targets, # bbox_weights, # reduction_override=None): # losses = dict() # if cls_score", "'bbox_pred')) # def loss(self, # cls_score, # bbox_pred, # labels, # label_weights, #", "..builder import build_loss @HEADS.register_module class ConvFCBBoxHead_MH(BBoxHead): \"\"\"More general bbox head, with shared conv", "self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0:", "self.shared_out_channels) # add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \\ self._add_conv_fc_branch( self.num_reg_convs,", "res in sampling_results] # reg_classes = 1 if self.reg_class_agnostic else self.num_classes # cls_reg_targets", "else self.num_classes # cls_reg_targets = bbox_target( # pos_proposals, # neg_proposals, # pos_gt_bboxes, #", "num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) if num_cls_convs > 0", "m in module_list.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) # @force_fp32(apply_to=('cls_score', 'bbox_pred')) #", "else: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, # 4)[pos_inds, labels[pos_inds]] # losses['loss_bbox_refine'] = self.loss_bbox(", "for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else", "self.reg_last_dim = \\ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not", "using_mask self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs", "# pos_proposals = [res.pos_bboxes for res in sampling_results] # pos_assigned_gt_inds = [ #", "mask_target(pos_proposals, pos_assigned_gt_inds, # gt_masks, rcnn_train_cfg) proposals = [res.bboxes for res in sampling_results] assigned_gt_inds", "= x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2:", "self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls)", "# gt_masks, rcnn_train_cfg) proposals = [res.bboxes for res in sampling_results] assigned_gt_inds = [", "cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else", "pos_proposals = [res.pos_bboxes for res in sampling_results] # pos_assigned_gt_inds = [ # res.pos_assigned_gt_inds", "if self.with_reg else None if self.with_IoU: IoU_pred = self.IoU_reg(x_reg) return cls_score, bbox_pred, IoU_pred", "@HEADS.register_module class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs): assert num_fcs >= 1", "self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.using_bg = using_bg self.using_refine = using_refine self.with_IoU", "= fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.using_bg = using_bg self.using_refine =", "x, mask_pred): # shared part if self.using_mask: # for conv in self.mask_conv: #", "x_cls = self.avg_pool(x_cls) x_cls = x_cls.view(x_cls.size(0), -1) for fc in self.cls_fcs: x_cls =", "# avg_factor=bbox_targets.size(0), # reduction_override=reduction_override) # return losses #TODO: add IoU target aquire and", "mask_targets = mask_target(proposals, assigned_gt_inds, gt_masks, rcnn_train_cfg) mask_bg_targets = mask_bg_target(proposals, gt_masks, rcnn_train_cfg) return mask_targets,", "IoU target aquire and loss calculation def get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target): pos_proposals =", "nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) # @force_fp32(apply_to=('cls_score', 'bbox_pred')) # def loss(self, # cls_score, #", "# rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in sampling_results] # neg_proposals =", "separable branch convs -> avg pool (optional) -> fcs \"\"\" last_layer_dim = in_channels", "if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.view(x_cls.size(0), -1) for fc in self.cls_fcs:", "x = torch.cat([x, mask_pred], dim=1) x = self.combine(x) elif self.proto_combine == 'sum': x", "== 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0", "[ res.inds for res in sampling_results ] mask_targets = mask_target(proposals, assigned_gt_inds, gt_masks, rcnn_train_cfg)", "self.cls_convs, self.cls_fcs, self.cls_last_dim = \\ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific", "# if bbox_pred is not None: # pos_inds = labels > 0 #", "num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg", "bbox_target, bbox_overlaps from ..losses import accuracy from ..builder import build_loss @HEADS.register_module class ConvFCBBoxHead_MH(BBoxHead):", "self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \\", "conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.view(x.size(0),", "padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs", "fcs -> cls shared convs -> shared fcs \\-> reg convs -> reg", "fcs -> reg \"\"\" # noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0,", "= num_shared_convs self.using_mask = using_mask self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs =", "losses #TODO: add IoU target aquire and loss calculation def get_iou_target(self, sampling_reuslt, bbox_pred,", "i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs,", "# reduction_override=reduction_override) # return losses #TODO: add IoU target aquire and loss calculation", "target_means=self.target_means, # target_stds=self.target_stds) # return cls_reg_targets def forward(self, x, mask_pred): # shared part", "or separable branch convs -> avg pool (optional) -> fcs \"\"\" last_layer_dim =", "convs and fcs if self.proto_combine == 'None': if self.feature_reduce: self.reduce_con = ConvModule(self.in_channels, conv_out_channels", "\"\"\"Add shared or separable branch convs -> avg pool (optional) -> fcs \"\"\"", "= norm_cfg self.using_bg = using_bg self.using_refine = using_refine self.with_IoU = with_IoU self.mask_channels =", "using_refine self.with_IoU = with_IoU self.mask_channels = mask_channels self.proto_combine = proto_combine self.feature_reduce = feature_reduce", "mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps from ..losses import accuracy from ..builder", "last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1,", "ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch", "separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not", "in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: for m in module_list.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias,", "# if cls_score is not None: # avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)", "pos_bbox_pred, # bbox_targets[pos_inds], # bbox_weights[pos_inds], # avg_factor=bbox_targets.size(0), # reduction_override=reduction_override) # return losses #TODO:", "-1) for fc in self.shared_fcs: x = self.relu(fc(x)) # separate branches x_cls =", "= num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels =", "self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim =", "mask_target(proposals, assigned_gt_inds, gt_masks, rcnn_train_cfg) mask_bg_targets = mask_bg_target(proposals, gt_masks, rcnn_train_cfg) return mask_targets, mask_bg_targets #", "= self.loss_bbox( # pos_bbox_pred, # bbox_targets[pos_inds], # bbox_weights[pos_inds], # avg_factor=bbox_targets.size(0), # reduction_override=reduction_override) #", "# @force_fp32(apply_to=('cls_score', 'bbox_pred')) # def loss(self, # cls_score, # bbox_pred, # labels, #", "# cls_score, # labels, # label_weights, # avg_factor=avg_factor, # reduction_override=reduction_override) # losses['acc_refine'] =", "in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs >", "if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *=", "num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels", "pos_inds = labels > 0 # if self.reg_class_agnostic: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds]", "self.mask_conv.append(conv_m) self.shared_convs, self.shared_fcs, last_layer_dim = \\ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels =", "x = x.view(x.size(0), -1) for fc in self.shared_fcs: x = self.relu(fc(x)) # separate", "mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps from ..losses import accuracy from ..builder import build_loss", "for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and", "as F import mmcv from mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps from", "= bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] # else: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, # 4)[pos_inds, labels[pos_inds]]", "bbox_pred = self.fc_reg(x_reg) if self.with_reg else None if self.with_IoU: IoU_pred = self.IoU_reg(x_reg) return", "num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs", "accuracy from ..builder import build_loss @HEADS.register_module class ConvFCBBoxHead_MH(BBoxHead): \"\"\"More general bbox head, with", "0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs", "# mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, # gt_masks, rcnn_train_cfg) proposals = [res.bboxes for res", "convs -> reg fcs -> reg \"\"\" # noqa: W605 def __init__(self, num_shared_convs=0,", "if with_IoU: self.iou_loss = build_loss(loss_iou) # self.hint_conv = ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)", "== 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0", "if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for", "cls_score, # bbox_pred, # labels, # label_weights, # bbox_targets, # bbox_weights, # reduction_override=None):", "reduction_override=None): # losses = dict() # if cls_score is not None: # avg_factor", "def get_target(self, sampling_results, gt_bboxes, gt_labels, # rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res", "# pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] # pos_gt_labels = [res.pos_gt_labels for", "fcs \\-> reg convs -> reg fcs -> reg \"\"\" # noqa: W605", "assert num_fcs >= 1 super(SharedFCBBoxHead_MH, self).__init__( num_shared_convs=0, num_shared_fcs=num_fcs, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels,", "specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in", "for res in sampling_results] assigned_gt_inds = [ res.inds for res in sampling_results ]", "range(mask_conv): # conv_m = ConvModule(1, 1, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv.append(conv_m) self.shared_convs,", "fc_cls and fc_reg since input channels are changed if self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim,", "separated branches. /-> cls convs -> cls fcs -> cls shared convs ->", "= using_bg self.using_refine = using_refine self.with_IoU = with_IoU self.mask_channels = mask_channels self.proto_combine =", "if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg,", "rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in sampling_results] # neg_proposals = [torch.tensor([])", "since input channels are changed if self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) if self.with_reg:", "in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred", "self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.using_mask", "cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \\ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) #", "'con' else self.in_channels self.combine = ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv =", "[torch.tensor([]) for res in sampling_results] # pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]", "# self.mask_conv = nn.ModuleList() # for i in range(mask_conv): # conv_m = ConvModule(1,", "branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider", "num_fcs=2, fc_out_channels=1024, *args, **kwargs): assert num_fcs >= 1 super(SharedFCBBoxHead_MH, self).__init__( num_shared_convs=0, num_shared_fcs=num_fcs, num_cls_convs=0,", "= dict(type='MSELoss', loss_weight=0.5), *args, **kwargs): super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs) assert (num_shared_convs + num_shared_fcs +", "self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg", "*args, **kwargs): assert num_fcs >= 1 super(SharedFCBBoxHead_MH, self).__init__( num_shared_convs=0, num_shared_fcs=num_fcs, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0,", "-> reg \"\"\" # noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0,", "x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls)", "# reduction_override=None): # losses = dict() # if cls_score is not None: #", "self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg", "> 0).float().item(), 1.) # losses['loss_cls_refine'] = self.loss_cls( # cls_score, # labels, # label_weights,", "self.fc_reg(x_reg) if self.with_reg else None if self.with_IoU: IoU_pred = self.IoU_reg(x_reg) return cls_score, bbox_pred,", "return cls_score, bbox_pred, IoU_pred return cls_score, bbox_pred @HEADS.register_module class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def __init__(self, num_fcs=2,", "= labels > 0 # if self.reg_class_agnostic: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] #", "= torch.cat([x, mask_pred], dim=1) if self.num_shared_convs > 0: for conv in self.shared_convs: x", "mask_targets, mask_bg_targets # def get_target(self, sampling_results, gt_bboxes, gt_labels, # rcnn_train_cfg): # pos_proposals =", "num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, mask_channels=256, using_mask = True, with_IoU = False,", "= self.relu(fc(x)) # separate branches x_cls = x x_reg = x for conv", "# separate branches x_cls = x x_reg = x for conv in self.cls_convs:", "padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv.append(conv_m) self.shared_convs, self.shared_fcs, last_layer_dim = \\ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs,", "else self.in_channels self.combine = ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv = nn.ModuleList()", "num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, mask_channels=256, using_mask = True, with_IoU = False, conv_out_channels=256,", "import torch.nn as nn from ..registry import HEADS from ..utils import ConvModule from", "self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred =", "self.reg_convs, self.reg_fcs, self.reg_last_dim = \\ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0", "fc_reg since input channels are changed if self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) if", "self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs", "> 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.view(x_cls.size(0), -1) for fc", "branch_fcs, last_layer_dim def init_weights(self): super(ConvFCBBoxHead_MH, self).init_weights() for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: for", "\\ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool: if", "def loss(self, # cls_score, # bbox_pred, # labels, # label_weights, # bbox_targets, #", "last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if", "in module_list.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) # @force_fp32(apply_to=('cls_score', 'bbox_pred')) # def", "nn.ModuleList() # for i in range(mask_conv): # conv_m = ConvModule(1, 1, 3, padding=1,", "norm_cfg self.using_bg = using_bg self.using_refine = using_refine self.with_IoU = with_IoU self.mask_channels = mask_channels", "mask_channels=256, using_mask = True, with_IoU = False, conv_out_channels=256, fc_out_channels=1024, proto_combine='con', feature_reduce=False, # mask_conv=3,", "class ConvFCBBoxHead_MH(BBoxHead): \"\"\"More general bbox head, with shared conv and fc layers and", "or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs", "self.cls_fcs, self.cls_last_dim = \\ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch", "def init_weights(self): super(ConvFCBBoxHead_MH, self).init_weights() for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: for m in", "target aquire and loss calculation def get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target): pos_proposals = [res.pos_bboxes", "conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg", "branches x_cls = x x_reg = x for conv in self.cls_convs: x_cls =", "convs -> cls fcs -> cls shared convs -> shared fcs \\-> reg", "(optional) -> fcs \"\"\" last_layer_dim = in_channels # add branch specific conv layers", "# shared part if self.using_mask: # for conv in self.mask_conv: # mask_pred =", "self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def init_weights(self): super(ConvFCBBoxHead_MH, self).init_weights() for module_list in [self.shared_fcs,", "**kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs", "dict(type='MSELoss', loss_weight=0.5), *args, **kwargs): super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs", "nn from ..registry import HEADS from ..utils import ConvModule from .bbox_head import BBoxHead", "import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps from ..losses import accuracy from ..builder import", "W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, mask_channels=256, using_mask = True,", "self.shared_fcs, last_layer_dim = \\ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim #", "= self.IoU_reg(x_reg) return cls_score, bbox_pred, IoU_pred return cls_score, bbox_pred @HEADS.register_module class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def", "= using_mask self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs =", "# noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, mask_channels=256, using_mask", "branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for", "for res in sampling_reuslt] pos_assigned_gt_inds = [ res.pos_gt_assigned_gt_inds for res in sampling_reuslt ]", "loss(self, # cls_score, # bbox_pred, # labels, # label_weights, # bbox_targets, # bbox_weights,", "pos_assigned_gt_inds = [ # res.pos_assigned_gt_inds for res in sampling_results # ] # mask_targets", "= torch.cat([x, mask_pred], dim=1) x = self.combine(x) elif self.proto_combine == 'sum': x =", "0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and", "= ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # add shared convs and fcs if", "= x_cls.view(x_cls.size(0), -1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in", "cls_reg_targets = bbox_target( # pos_proposals, # neg_proposals, # pos_gt_bboxes, # pos_gt_labels, # rcnn_train_cfg,", "# neg_proposals, # pos_gt_bboxes, # pos_gt_labels, # rcnn_train_cfg, # reg_classes, # target_means=self.target_means, #", "x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() >", "= mask_channels self.proto_combine = proto_combine self.feature_reduce = feature_reduce if with_IoU: self.iou_loss = build_loss(loss_iou)", ">= 1 super(SharedFCBBoxHead_MH, self).__init__( num_shared_convs=0, num_shared_fcs=num_fcs, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, *args, **kwargs)", "super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs", "num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg", "= self.reduce_con(x) x = torch.cat([x, mask_pred], dim=1) if self.num_shared_convs > 0: for conv", "0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim", "# avg_factor=avg_factor, # reduction_override=reduction_override) # losses['acc_refine'] = accuracy(cls_score, labels) # if bbox_pred is", "3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch specific fc layers", "x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim()", "# mask_conv=3, conv_cfg=None, norm_cfg=None, using_bg=False, using_refine=True, loss_iou = dict(type='MSELoss', loss_weight=0.5), *args, **kwargs): super(ConvFCBBoxHead_MH,", "'None': if self.feature_reduce: self.reduce_con = ConvModule(self.in_channels, conv_out_channels - mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) else:", "BBoxHead import torch import torch.nn.functional as F import mmcv from mmdet.core import mask_target,", "self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim", "last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def init_weights(self): super(ConvFCBBoxHead_MH, self).init_weights() for module_list", "if self.using_mask: # for conv in self.mask_conv: # mask_pred = conv(mask_pred) # mask_pred", "conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x", "None: # avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) # losses['loss_cls_refine'] = self.loss_cls( #", "[res.bboxes for res in sampling_results] assigned_gt_inds = [ res.inds for res in sampling_results", "return losses #TODO: add IoU target aquire and loss calculation def get_iou_target(self, sampling_reuslt,", "True) self.shared_out_channels = last_layer_dim # add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim =", "x_reg.view(x_reg.size(0), -1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if", "mask_bg_target, force_fp32, bbox_target, bbox_overlaps from ..losses import accuracy from ..builder import build_loss @HEADS.register_module", "\\ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim # add cls specific", "# target_means=self.target_means, # target_stds=self.target_stds) # return cls_reg_targets def forward(self, x, mask_pred): # shared", "get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target): pos_proposals = [res.pos_bboxes for res in sampling_reuslt] pos_assigned_gt_inds =", "cls shared convs -> shared fcs \\-> reg convs -> reg fcs ->", "if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.view(x_reg.size(0), -1) for fc in self.reg_fcs:", "*= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i ==", "cls_reg_targets def forward(self, x, mask_pred): # shared part if self.using_mask: # for conv", "self.mask_channels if proto_combine == 'con' else self.in_channels self.combine = ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg,", "x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.view(x_reg.size(0), -1) for", "# def get_target(self, sampling_results, gt_bboxes, gt_labels, # rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for", "is not None: # avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) # losses['loss_cls_refine'] =", "self.with_reg else None if self.with_IoU: IoU_pred = self.IoU_reg(x_reg) return cls_score, bbox_pred, IoU_pred return", "if cls_score is not None: # avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) #", "self.proto_combine == 'None': if self.feature_reduce: self.reduce_con = ConvModule(self.in_channels, conv_out_channels - mask_channels, 1, conv_cfg=conv_cfg,", "consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *=", "sampling_results, gt_bboxes, gt_labels, # rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in sampling_results]", "class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs): assert num_fcs >= 1 super(SharedFCBBoxHead_MH,", "== 'None': if self.feature_reduce: self.reduce_con = ConvModule(self.in_channels, conv_out_channels - mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)", "num_branch_convs, num_branch_fcs, in_channels, is_shared=False): \"\"\"Add shared or separable branch convs -> avg pool", "= ConvModule(1, 1, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv.append(conv_m) self.shared_convs, self.shared_fcs, last_layer_dim =", "= [res.pos_bboxes for res in sampling_results] # neg_proposals = [torch.tensor([]) for res in", "> 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i ==", "from .bbox_head import BBoxHead import torch import torch.nn.functional as F import mmcv from", "+ num_reg_fcs > 0) if num_cls_convs > 0 or num_reg_convs > 0: assert", "self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch specific fc", "= x.view(x.size(0), -1) for fc in self.shared_fcs: x = self.relu(fc(x)) # separate branches", "specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \\ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs", "from ..losses import accuracy from ..builder import build_loss @HEADS.register_module class ConvFCBBoxHead_MH(BBoxHead): \"\"\"More general", "= num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs =", "not None: # avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) # losses['loss_cls_refine'] = self.loss_cls(", "# self.mask_conv.append(conv_m) self.shared_convs, self.shared_fcs, last_layer_dim = \\ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels", "mask_pred x = self.combine(x) else: x = self.reduce_con(x) x = torch.cat([x, mask_pred], dim=1)", "sampling_results # ] # mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, # gt_masks, rcnn_train_cfg) proposals =", "cls_score, # labels, # label_weights, # avg_factor=avg_factor, # reduction_override=reduction_override) # losses['acc_refine'] = accuracy(cls_score,", "self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim,", "x.view(x.size(0), -1) for fc in self.shared_fcs: x = self.relu(fc(x)) # separate branches x_cls", "labels[pos_inds]] # losses['loss_bbox_refine'] = self.loss_bbox( # pos_bbox_pred, # bbox_targets[pos_inds], # bbox_weights[pos_inds], # avg_factor=bbox_targets.size(0),", "res in sampling_results # ] # mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, # gt_masks, rcnn_train_cfg)", "1 if self.reg_class_agnostic else self.num_classes # cls_reg_targets = bbox_target( # pos_proposals, # neg_proposals,", "if self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic", "num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, mask_channels=256, using_mask = True, with_IoU = False, conv_out_channels=256, fc_out_channels=1024, proto_combine='con',", "x = self.relu(fc(x)) # separate branches x_cls = x x_reg = x for", "[res.pos_bboxes for res in sampling_reuslt] pos_assigned_gt_inds = [ res.pos_gt_assigned_gt_inds for res in sampling_reuslt", "conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls", "import build_loss @HEADS.register_module class ConvFCBBoxHead_MH(BBoxHead): \"\"\"More general bbox head, with shared conv and", "else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def", "i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels)", "conv_cfg=conv_cfg, norm_cfg=norm_cfg) # add shared convs and fcs if self.proto_combine == 'None': if", "not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim", "specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \\ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add", "= nn.Linear(self.cls_last_dim, self.num_classes) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 *", "# bbox_weights, # reduction_override=None): # losses = dict() # if cls_score is not", "pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] # else: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, # 4)[pos_inds,", "pos_proposals = [res.pos_bboxes for res in sampling_reuslt] pos_assigned_gt_inds = [ res.pos_gt_assigned_gt_inds for res", "= self.avg_pool(x_cls) x_cls = x_cls.view(x_cls.size(0), -1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls))", "else None if self.with_IoU: IoU_pred = self.IoU_reg(x_reg) return cls_score, bbox_pred, IoU_pred return cls_score,", "num_cls_fcs + num_reg_convs + num_reg_fcs > 0) if num_cls_convs > 0 or num_reg_convs", "= nn.Linear(self.reg_last_dim, out_dim_reg) if self.with_IoU: self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes) def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs,", "- mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) else: combine_channels = self.in_channels + self.mask_channels if proto_combine", "num_reg_fcs > 0) if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs", "== 'con' else self.in_channels self.combine = ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv", "nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim", "ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv = nn.ModuleList() # for i in", "nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def init_weights(self): super(ConvFCBBoxHead_MH, self).init_weights()", "self.shared_fcs: x = self.relu(fc(x)) # separate branches x_cls = x x_reg = x", "num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs ==", "add IoU target aquire and loss calculation def get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target): pos_proposals", "for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool:", "if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) # @force_fp32(apply_to=('cls_score', 'bbox_pred')) # def loss(self, #", "for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else", "0).float().item(), 1.) # losses['loss_cls_refine'] = self.loss_cls( # cls_score, # labels, # label_weights, #", "= in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs", "self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes) def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): \"\"\"Add shared or", "out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)", "pool (optional) -> fcs \"\"\" last_layer_dim = in_channels # add branch specific conv", "range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels,", "mask_pred = conv(mask_pred) # mask_pred = self.hint_conv(mask_pred) if self.proto_combine == 'con': x =", "self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x)", "4)[pos_inds] # else: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, # 4)[pos_inds, labels[pos_inds]] # losses['loss_bbox_refine']", "= bbox_pred.view(bbox_pred.size(0), -1, # 4)[pos_inds, labels[pos_inds]] # losses['loss_bbox_refine'] = self.loss_bbox( # pos_bbox_pred, #", "losses['loss_cls_refine'] = self.loss_cls( # cls_score, # labels, # label_weights, # avg_factor=avg_factor, # reduction_override=reduction_override)", "import torch import torch.nn.functional as F import mmcv from mmdet.core import mask_target, mask_bg_target,", "norm_cfg=norm_cfg) # self.mask_conv.append(conv_m) self.shared_convs, self.shared_fcs, last_layer_dim = \\ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True)", "# cls_score, # bbox_pred, # labels, # label_weights, # bbox_targets, # bbox_weights, #", "add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \\ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)", "bbox_overlaps() def get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in", "branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i", "def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs): assert num_fcs >= 1 super(SharedFCBBoxHead_MH, self).__init__( num_shared_convs=0,", "noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, mask_channels=256, using_mask =", "> 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls:", "-> cls shared convs -> shared fcs \\-> reg convs -> reg fcs", "proto_combine == 'con' else self.in_channels self.combine = ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) #", "norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList()", "if self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) if self.with_IoU: self.IoU_reg", "= ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv = nn.ModuleList() # for i", "num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i", "self.in_channels + self.mask_channels if proto_combine == 'con' else self.in_channels self.combine = ConvModule(combine_channels, conv_out_channels,", "conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs =", "mask_bg_targets # def get_target(self, sampling_results, gt_bboxes, gt_labels, # rcnn_train_cfg): # pos_proposals = [res.pos_bboxes", "branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared", "fc layers and two optional separated branches. /-> cls convs -> cls fcs", "> 0: # for shared branch, only consider self.with_avg_pool # for separated branches,", "dim=1) x = self.combine(x) elif self.proto_combine == 'sum': x = x + mask_pred", "= ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim", "self.using_bg = using_bg self.using_refine = using_refine self.with_IoU = with_IoU self.mask_channels = mask_channels self.proto_combine", "feature_reduce if with_IoU: self.iou_loss = build_loss(loss_iou) # self.hint_conv = ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg,", "sampling_results] assigned_gt_inds = [ res.inds for res in sampling_results ] mask_targets = mask_target(proposals,", "0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert", "reconstruct fc_cls and fc_reg since input channels are changed if self.with_cls: self.fc_cls =", "import BBoxHead import torch import torch.nn.functional as F import mmcv from mmdet.core import", "= [ res.pos_gt_assigned_gt_inds for res in sampling_reuslt ] # bbox_overlaps() def get_mask_target(self, sampling_results,", "fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch,", "ConvModule(1, 1, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # self.mask_conv.append(conv_m) self.shared_convs, self.shared_fcs, last_layer_dim = \\", "sampling_results] # pos_gt_labels = [res.pos_gt_labels for res in sampling_results] # reg_classes = 1", "= nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool", "two optional separated branches. /-> cls convs -> cls fcs -> cls shared", "mask_conv=3, conv_cfg=None, norm_cfg=None, using_bg=False, using_refine=True, loss_iou = dict(type='MSELoss', loss_weight=0.5), *args, **kwargs): super(ConvFCBBoxHead_MH, self).__init__(*args,", "self.cls_last_dim = \\ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch self.reg_convs,", "#TODO: add IoU target aquire and loss calculation def get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target):", "self.mask_conv: # mask_pred = conv(mask_pred) # mask_pred = self.hint_conv(mask_pred) if self.proto_combine == 'con':", "self.avg_pool(x_cls) x_cls = x_cls.view(x_cls.size(0), -1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for", "for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else", "( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim =", "if self.reg_class_agnostic: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] # else: # pos_bbox_pred = bbox_pred.view(bbox_pred.size(0),", "force_fp32, bbox_target, bbox_overlaps from ..losses import accuracy from ..builder import build_loss @HEADS.register_module class", "or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in", "reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \\ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if", "res in sampling_reuslt ] # bbox_overlaps() def get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg): # pos_proposals", "proto_combine='con', feature_reduce=False, # mask_conv=3, conv_cfg=None, norm_cfg=None, using_bg=False, using_refine=True, loss_iou = dict(type='MSELoss', loss_weight=0.5), *args,", "conv_cfg self.norm_cfg = norm_cfg self.using_bg = using_bg self.using_refine = using_refine self.with_IoU = with_IoU", "labels, # label_weights, # bbox_targets, # bbox_weights, # reduction_override=None): # losses = dict()", "return cls_score, bbox_pred @HEADS.register_module class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH): def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs): assert", "HEADS from ..utils import ConvModule from .bbox_head import BBoxHead import torch import torch.nn.functional", "..losses import accuracy from ..builder import build_loss @HEADS.register_module class ConvFCBBoxHead_MH(BBoxHead): \"\"\"More general bbox", "= conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.using_bg =", "in sampling_results # ] # mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, # gt_masks, rcnn_train_cfg) proposals", "= nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = (", "0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since", "not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs", "= proto_combine self.feature_reduce = feature_reduce if with_IoU: self.iou_loss = build_loss(loss_iou) # self.hint_conv =", "self.shared_out_channels = last_layer_dim # add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \\", "self.shared_convs, self.shared_fcs, last_layer_dim = \\ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim", "input channels are changed if self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) if self.with_reg: out_dim_reg", "self.proto_combine == 'con': x = torch.cat([x, mask_pred], dim=1) x = self.combine(x) elif self.proto_combine", "rcnn_train_cfg): # pos_proposals = [res.pos_bboxes for res in sampling_results] # pos_assigned_gt_inds = [", "for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool:", "\\ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim" ]
[ "match[\"score\"] = request.get(\"score\").replace(' ', '') return match def parse_ended_match_to_db(request): parsed_match = { \"home_team\":", "logger = LoggerService().logger def create_match(self, data): return self.client.FMT[\"matches\"].insert_one(data).inserted_id def update_match(self, data): return self.client.FMT[\"matches\"].update_one(data,", "create_match(self, data): return self.client.FMT[\"matches\"].insert_one(data).inserted_id def update_match(self, data): return self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id def find_match(self, data):", "self.client.FMT[\"matches\"].find_one(data) def parse_match_from_request(request): return { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\") } def", "parsed_match['away_team'] parsed_match['team_lose_score'] = away_team_score else: parsed_match['team_won'] = parsed_match['away_team'] parsed_match['team_won_score'] = away_team_score parsed_match['team_lost'] =", "str(request.get(\"_id\")), \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\", None), \"score\": request.get(\"score\", None), \"is_draw\": request.get(\"is_draw\",", "home_team_score > away_team_score: parsed_match['team_won'] = parsed_match['home_team'] parsed_match['team_won_score'] = home_team_score parsed_match['team_lost'] = parsed_match['away_team'] parsed_match['team_lose_score']", "away_team_score = parsed_match[\"score\"].split('-')[1] # parse match result if home_team_score == away_team_score: parsed_match['is_draw'] =", "parsed_match['team_won_score'] = home_team_score parsed_match['team_lost'] = parsed_match['away_team'] parsed_match['team_lose_score'] = away_team_score else: parsed_match['team_won'] = parsed_match['away_team']", "\"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\"), \"score\": request.get(\"score\") } home_team_score = parsed_match[\"score\"].split('-')[0] away_team_score = parsed_match[\"score\"].split('-')[1]", "= parsed_match['home_team'] parsed_match['team_won_score'] = home_team_score parsed_match['team_lost'] = parsed_match['away_team'] parsed_match['team_lose_score'] = away_team_score else: parsed_match['team_won']", "LoggerService().logger def create_match(self, data): return self.client.FMT[\"matches\"].insert_one(data).inserted_id def update_match(self, data): return self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id def", "return self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id def find_match(self, data): return self.client.FMT[\"matches\"].find_one(data) def parse_match_from_request(request): return { \"home_team\":", "parsed_match[\"score\"].split('-')[1] # parse match result if home_team_score == away_team_score: parsed_match['is_draw'] = True parsed_match['team_won_score']", "\"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\") } def parse_ended_match_from_request(request): match = parse_match_from_request(request) match[\"score\"] = request.get(\"score\").replace('", "data): return self.client.FMT[\"matches\"].insert_one(data).inserted_id def update_match(self, data): return self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id def find_match(self, data): return", "home_team_score == away_team_score: parsed_match['is_draw'] = True parsed_match['team_won_score'] = home_team_score else: parsed_match['is_draw'] = False", "\"score\": request.get(\"score\") } home_team_score = parsed_match[\"score\"].split('-')[0] away_team_score = parsed_match[\"score\"].split('-')[1] # parse match result", "home_team_score else: parsed_match['is_draw'] = False if home_team_score > away_team_score: parsed_match['team_won'] = parsed_match['home_team'] parsed_match['team_won_score']", "parsed_match['home_team'] parsed_match['team_won_score'] = home_team_score parsed_match['team_lost'] = parsed_match['away_team'] parsed_match['team_lose_score'] = away_team_score else: parsed_match['team_won'] =", "parsed_match['team_won'] = parsed_match['home_team'] parsed_match['team_won_score'] = home_team_score parsed_match['team_lost'] = parsed_match['away_team'] parsed_match['team_lose_score'] = away_team_score else:", "= LoggerService().logger def create_match(self, data): return self.client.FMT[\"matches\"].insert_one(data).inserted_id def update_match(self, data): return self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id", "parse match result if home_team_score == away_team_score: parsed_match['is_draw'] = True parsed_match['team_won_score'] = home_team_score", "'') return match def parse_ended_match_to_db(request): parsed_match = { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\":", "parsed_match['team_lost'] = parsed_match['home_team'] parsed_match['team_lose_score'] = home_team_score return parsed_match def parse_match_from_db(request): return { \"id\":", "= home_team_score else: parsed_match['is_draw'] = False if home_team_score > away_team_score: parsed_match['team_won'] = parsed_match['home_team']", "request.get(\"date\", None), \"score\": request.get(\"score\", None), \"is_draw\": request.get(\"is_draw\", None), \"team_won\": request.get(\"team_won\", None), \"team_lost\": request.get(\"team_lost\",", "} home_team_score = parsed_match[\"score\"].split('-')[0] away_team_score = parsed_match[\"score\"].split('-')[1] # parse match result if home_team_score", "\"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\") } def parse_ended_match_from_request(request): match = parse_match_from_request(request) match[\"score\"]", "match = parse_match_from_request(request) match[\"score\"] = request.get(\"score\").replace(' ', '') return match def parse_ended_match_to_db(request): parsed_match", "data): return self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id def find_match(self, data): return self.client.FMT[\"matches\"].find_one(data) def parse_match_from_request(request): return {", "== away_team_score: parsed_match['is_draw'] = True parsed_match['team_won_score'] = home_team_score else: parsed_match['is_draw'] = False if", "if home_team_score == away_team_score: parsed_match['is_draw'] = True parsed_match['team_won_score'] = home_team_score else: parsed_match['is_draw'] =", "self.client.FMT[\"matches\"].insert_one(data).inserted_id def update_match(self, data): return self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id def find_match(self, data): return self.client.FMT[\"matches\"].find_one(data) def", "request.get(\"away_team\"), \"date\": request.get(\"date\"), \"score\": request.get(\"score\") } home_team_score = parsed_match[\"score\"].split('-')[0] away_team_score = parsed_match[\"score\"].split('-')[1] #", "True parsed_match['team_won_score'] = home_team_score else: parsed_match['is_draw'] = False if home_team_score > away_team_score: parsed_match['team_won']", "data): return self.client.FMT[\"matches\"].find_one(data) def parse_match_from_request(request): return { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\")", "return match def parse_ended_match_to_db(request): parsed_match = { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\"),", "= request.get(\"score\").replace(' ', '') return match def parse_ended_match_to_db(request): parsed_match = { \"home_team\": request.get(\"home_team\"),", "parsed_match['team_won'] = parsed_match['away_team'] parsed_match['team_won_score'] = away_team_score parsed_match['team_lost'] = parsed_match['home_team'] parsed_match['team_lose_score'] = home_team_score return", "parsed_match['team_won_score'] = away_team_score parsed_match['team_lost'] = parsed_match['home_team'] parsed_match['team_lose_score'] = home_team_score return parsed_match def parse_match_from_db(request):", "\"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\", None), \"score\": request.get(\"score\", None), \"is_draw\": request.get(\"is_draw\", None),", "None), \"score\": request.get(\"score\", None), \"is_draw\": request.get(\"is_draw\", None), \"team_won\": request.get(\"team_won\", None), \"team_lost\": request.get(\"team_lost\", None)", "parsed_match['team_lost'] = parsed_match['away_team'] parsed_match['team_lose_score'] = away_team_score else: parsed_match['team_won'] = parsed_match['away_team'] parsed_match['team_won_score'] = away_team_score", "away_team_score: parsed_match['team_won'] = parsed_match['home_team'] parsed_match['team_won_score'] = home_team_score parsed_match['team_lost'] = parsed_match['away_team'] parsed_match['team_lose_score'] = away_team_score", "else: parsed_match['is_draw'] = False if home_team_score > away_team_score: parsed_match['team_won'] = parsed_match['home_team'] parsed_match['team_won_score'] =", "parsed_match = { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\"), \"score\": request.get(\"score\") } home_team_score", "parsed_match['is_draw'] = False if home_team_score > away_team_score: parsed_match['team_won'] = parsed_match['home_team'] parsed_match['team_won_score'] = home_team_score", "else: parsed_match['team_won'] = parsed_match['away_team'] parsed_match['team_won_score'] = away_team_score parsed_match['team_lost'] = parsed_match['home_team'] parsed_match['team_lose_score'] = home_team_score", "request.get(\"date\"), \"score\": request.get(\"score\") } home_team_score = parsed_match[\"score\"].split('-')[0] away_team_score = parsed_match[\"score\"].split('-')[1] # parse match", "request.get(\"score\") } home_team_score = parsed_match[\"score\"].split('-')[0] away_team_score = parsed_match[\"score\"].split('-')[1] # parse match result if", "match def parse_ended_match_to_db(request): parsed_match = { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\"), \"score\":", "= { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\"), \"score\": request.get(\"score\") } home_team_score =", "parse_ended_match_from_request(request): match = parse_match_from_request(request) match[\"score\"] = request.get(\"score\").replace(' ', '') return match def parse_ended_match_to_db(request):", "def parse_ended_match_from_request(request): match = parse_match_from_request(request) match[\"score\"] = request.get(\"score\").replace(' ', '') return match def", "\"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\", None), \"score\": request.get(\"score\", None), \"is_draw\": request.get(\"is_draw\", None), \"team_won\": request.get(\"team_won\",", "parse_match_from_request(request) match[\"score\"] = request.get(\"score\").replace(' ', '') return match def parse_ended_match_to_db(request): parsed_match = {", "def parse_match_from_db(request): return { \"id\": str(request.get(\"_id\")), \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\", None),", "LoggerService logger = LoggerService().logger def create_match(self, data): return self.client.FMT[\"matches\"].insert_one(data).inserted_id def update_match(self, data): return", "request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\", None), \"score\": request.get(\"score\", None), \"is_draw\": request.get(\"is_draw\", None), \"team_won\":", "request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\"), \"score\": request.get(\"score\") } home_team_score = parsed_match[\"score\"].split('-')[0] away_team_score =", "match result if home_team_score == away_team_score: parsed_match['is_draw'] = True parsed_match['team_won_score'] = home_team_score else:", "\"date\": request.get(\"date\"), \"score\": request.get(\"score\") } home_team_score = parsed_match[\"score\"].split('-')[0] away_team_score = parsed_match[\"score\"].split('-')[1] # parse", "parsed_match['team_lose_score'] = away_team_score else: parsed_match['team_won'] = parsed_match['away_team'] parsed_match['team_won_score'] = away_team_score parsed_match['team_lost'] = parsed_match['home_team']", "\"score\": request.get(\"score\", None), \"is_draw\": request.get(\"is_draw\", None), \"team_won\": request.get(\"team_won\", None), \"team_lost\": request.get(\"team_lost\", None) }", "{ \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\"), \"score\": request.get(\"score\") } home_team_score = parsed_match[\"score\"].split('-')[0]", "= away_team_score parsed_match['team_lost'] = parsed_match['home_team'] parsed_match['team_lose_score'] = home_team_score return parsed_match def parse_match_from_db(request): return", "request.get(\"score\").replace(' ', '') return match def parse_ended_match_to_db(request): parsed_match = { \"home_team\": request.get(\"home_team\"), \"away_team\":", "parsed_match def parse_match_from_db(request): return { \"id\": str(request.get(\"_id\")), \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\",", "def parse_ended_match_to_db(request): parsed_match = { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\"), \"score\": request.get(\"score\")", "= False if home_team_score > away_team_score: parsed_match['team_won'] = parsed_match['home_team'] parsed_match['team_won_score'] = home_team_score parsed_match['team_lost']", "', '') return match def parse_ended_match_to_db(request): parsed_match = { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"),", "= True parsed_match['team_won_score'] = home_team_score else: parsed_match['is_draw'] = False if home_team_score > away_team_score:", "if home_team_score > away_team_score: parsed_match['team_won'] = parsed_match['home_team'] parsed_match['team_won_score'] = home_team_score parsed_match['team_lost'] = parsed_match['away_team']", "parsed_match['away_team'] parsed_match['team_won_score'] = away_team_score parsed_match['team_lost'] = parsed_match['home_team'] parsed_match['team_lose_score'] = home_team_score return parsed_match def", "= parsed_match['home_team'] parsed_match['team_lose_score'] = home_team_score return parsed_match def parse_match_from_db(request): return { \"id\": str(request.get(\"_id\")),", "= parsed_match[\"score\"].split('-')[1] # parse match result if home_team_score == away_team_score: parsed_match['is_draw'] = True", "\"date\": request.get(\"date\", None), \"score\": request.get(\"score\", None), \"is_draw\": request.get(\"is_draw\", None), \"team_won\": request.get(\"team_won\", None), \"team_lost\":", "home_team_score parsed_match['team_lost'] = parsed_match['away_team'] parsed_match['team_lose_score'] = away_team_score else: parsed_match['team_won'] = parsed_match['away_team'] parsed_match['team_won_score'] =", "= parse_match_from_request(request) match[\"score\"] = request.get(\"score\").replace(' ', '') return match def parse_ended_match_to_db(request): parsed_match =", "return self.client.FMT[\"matches\"].find_one(data) def parse_match_from_request(request): return { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\") }", "away_team_score else: parsed_match['team_won'] = parsed_match['away_team'] parsed_match['team_won_score'] = away_team_score parsed_match['team_lost'] = parsed_match['home_team'] parsed_match['team_lose_score'] =", "parse_ended_match_to_db(request): parsed_match = { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\"), \"score\": request.get(\"score\") }", "> away_team_score: parsed_match['team_won'] = parsed_match['home_team'] parsed_match['team_won_score'] = home_team_score parsed_match['team_lost'] = parsed_match['away_team'] parsed_match['team_lose_score'] =", "False if home_team_score > away_team_score: parsed_match['team_won'] = parsed_match['home_team'] parsed_match['team_won_score'] = home_team_score parsed_match['team_lost'] =", "\"id\": str(request.get(\"_id\")), \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\", None), \"score\": request.get(\"score\", None), \"is_draw\":", "parsed_match['team_won_score'] = home_team_score else: parsed_match['is_draw'] = False if home_team_score > away_team_score: parsed_match['team_won'] =", "services.loggerServices.loggerService import LoggerService logger = LoggerService().logger def create_match(self, data): return self.client.FMT[\"matches\"].insert_one(data).inserted_id def update_match(self,", "self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id def find_match(self, data): return self.client.FMT[\"matches\"].find_one(data) def parse_match_from_request(request): return { \"home_team\": request.get(\"home_team\"),", "result if home_team_score == away_team_score: parsed_match['is_draw'] = True parsed_match['team_won_score'] = home_team_score else: parsed_match['is_draw']", "upsert=True).inserted_id def find_match(self, data): return self.client.FMT[\"matches\"].find_one(data) def parse_match_from_request(request): return { \"home_team\": request.get(\"home_team\"), \"away_team\":", "find_match(self, data): return self.client.FMT[\"matches\"].find_one(data) def parse_match_from_request(request): return { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\":", "away_team_score parsed_match['team_lost'] = parsed_match['home_team'] parsed_match['team_lose_score'] = home_team_score return parsed_match def parse_match_from_db(request): return {", "{ \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\") } def parse_ended_match_from_request(request): match = parse_match_from_request(request)", "parse_match_from_request(request): return { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\") } def parse_ended_match_from_request(request): match", "def find_match(self, data): return self.client.FMT[\"matches\"].find_one(data) def parse_match_from_request(request): return { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"),", "} def parse_ended_match_from_request(request): match = parse_match_from_request(request) match[\"score\"] = request.get(\"score\").replace(' ', '') return match", "away_team_score: parsed_match['is_draw'] = True parsed_match['team_won_score'] = home_team_score else: parsed_match['is_draw'] = False if home_team_score", "#!/usr/bin/python3 from services.loggerServices.loggerService import LoggerService logger = LoggerService().logger def create_match(self, data): return self.client.FMT[\"matches\"].insert_one(data).inserted_id", "return { \"id\": str(request.get(\"_id\")), \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\", None), \"score\": request.get(\"score\",", "= home_team_score parsed_match['team_lost'] = parsed_match['away_team'] parsed_match['team_lose_score'] = away_team_score else: parsed_match['team_won'] = parsed_match['away_team'] parsed_match['team_won_score']", "def update_match(self, data): return self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id def find_match(self, data): return self.client.FMT[\"matches\"].find_one(data) def parse_match_from_request(request):", "return { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\") } def parse_ended_match_from_request(request): match =", "request.get(\"away_team\"), \"date\": request.get(\"date\", None), \"score\": request.get(\"score\", None), \"is_draw\": request.get(\"is_draw\", None), \"team_won\": request.get(\"team_won\", None),", "def create_match(self, data): return self.client.FMT[\"matches\"].insert_one(data).inserted_id def update_match(self, data): return self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id def find_match(self,", "home_team_score = parsed_match[\"score\"].split('-')[0] away_team_score = parsed_match[\"score\"].split('-')[1] # parse match result if home_team_score ==", "request.get(\"date\") } def parse_ended_match_from_request(request): match = parse_match_from_request(request) match[\"score\"] = request.get(\"score\").replace(' ', '') return", "= parsed_match['away_team'] parsed_match['team_won_score'] = away_team_score parsed_match['team_lost'] = parsed_match['home_team'] parsed_match['team_lose_score'] = home_team_score return parsed_match", "request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\") } def parse_ended_match_from_request(request): match = parse_match_from_request(request) match[\"score\"] =", "# parse match result if home_team_score == away_team_score: parsed_match['is_draw'] = True parsed_match['team_won_score'] =", "= home_team_score return parsed_match def parse_match_from_db(request): return { \"id\": str(request.get(\"_id\")), \"home_team\": request.get(\"home_team\"), \"away_team\":", "def parse_match_from_request(request): return { \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\") } def parse_ended_match_from_request(request):", "update_match(self, data): return self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id def find_match(self, data): return self.client.FMT[\"matches\"].find_one(data) def parse_match_from_request(request): return", "parsed_match['home_team'] parsed_match['team_lose_score'] = home_team_score return parsed_match def parse_match_from_db(request): return { \"id\": str(request.get(\"_id\")), \"home_team\":", "return self.client.FMT[\"matches\"].insert_one(data).inserted_id def update_match(self, data): return self.client.FMT[\"matches\"].update_one(data, upsert=True).inserted_id def find_match(self, data): return self.client.FMT[\"matches\"].find_one(data)", "request.get(\"away_team\"), \"date\": request.get(\"date\") } def parse_ended_match_from_request(request): match = parse_match_from_request(request) match[\"score\"] = request.get(\"score\").replace(' ',", "{ \"id\": str(request.get(\"_id\")), \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\", None), \"score\": request.get(\"score\", None),", "parsed_match['team_lose_score'] = home_team_score return parsed_match def parse_match_from_db(request): return { \"id\": str(request.get(\"_id\")), \"home_team\": request.get(\"home_team\"),", "= parsed_match['away_team'] parsed_match['team_lose_score'] = away_team_score else: parsed_match['team_won'] = parsed_match['away_team'] parsed_match['team_won_score'] = away_team_score parsed_match['team_lost']", "= away_team_score else: parsed_match['team_won'] = parsed_match['away_team'] parsed_match['team_won_score'] = away_team_score parsed_match['team_lost'] = parsed_match['home_team'] parsed_match['team_lose_score']", "\"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\"), \"score\": request.get(\"score\") } home_team_score = parsed_match[\"score\"].split('-')[0] away_team_score", "parse_match_from_db(request): return { \"id\": str(request.get(\"_id\")), \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\": request.get(\"date\", None), \"score\":", "\"date\": request.get(\"date\") } def parse_ended_match_from_request(request): match = parse_match_from_request(request) match[\"score\"] = request.get(\"score\").replace(' ', '')", "import LoggerService logger = LoggerService().logger def create_match(self, data): return self.client.FMT[\"matches\"].insert_one(data).inserted_id def update_match(self, data):", "= parsed_match[\"score\"].split('-')[0] away_team_score = parsed_match[\"score\"].split('-')[1] # parse match result if home_team_score == away_team_score:", "from services.loggerServices.loggerService import LoggerService logger = LoggerService().logger def create_match(self, data): return self.client.FMT[\"matches\"].insert_one(data).inserted_id def", "parsed_match[\"score\"].split('-')[0] away_team_score = parsed_match[\"score\"].split('-')[1] # parse match result if home_team_score == away_team_score: parsed_match['is_draw']", "return parsed_match def parse_match_from_db(request): return { \"id\": str(request.get(\"_id\")), \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\"), \"date\":", "parsed_match['is_draw'] = True parsed_match['team_won_score'] = home_team_score else: parsed_match['is_draw'] = False if home_team_score >", "home_team_score return parsed_match def parse_match_from_db(request): return { \"id\": str(request.get(\"_id\")), \"home_team\": request.get(\"home_team\"), \"away_team\": request.get(\"away_team\")," ]
[ "o preço do produto: R$')) calc = preco - ((5 / 100) *", "preço do produto: R$')) calc = preco - ((5 / 100) * preco)", "preco - ((5 / 100) * preco) print(f'Preço digitado R${preco}\\nDesconto: 5%\\nPreço com desconto:", "produto: R$')) calc = preco - ((5 / 100) * preco) print(f'Preço digitado", "- ((5 / 100) * preco) print(f'Preço digitado R${preco}\\nDesconto: 5%\\nPreço com desconto: R${calc}')", "print('========= Desconto =========') preco = float(input('Digite o preço do produto: R$')) calc =", "=========') preco = float(input('Digite o preço do produto: R$')) calc = preco -", "preco = float(input('Digite o preço do produto: R$')) calc = preco - ((5", "= float(input('Digite o preço do produto: R$')) calc = preco - ((5 /", "do produto: R$')) calc = preco - ((5 / 100) * preco) print(f'Preço", "R$')) calc = preco - ((5 / 100) * preco) print(f'Preço digitado R${preco}\\nDesconto:", "calc = preco - ((5 / 100) * preco) print(f'Preço digitado R${preco}\\nDesconto: 5%\\nPreço", "= preco - ((5 / 100) * preco) print(f'Preço digitado R${preco}\\nDesconto: 5%\\nPreço com", "float(input('Digite o preço do produto: R$')) calc = preco - ((5 / 100)", "Desconto =========') preco = float(input('Digite o preço do produto: R$')) calc = preco" ]
[ "= None, processes=1) -> None: \"\"\" Iterates over each ASFProduct and downloads them", "in self] } def __str__(self): return json.dumps(self.geojson(), indent=2, sort_keys=True) def download(self, path: str,", "product in self] pool.map(_download_product, args) pool.close() pool.join() def _download_product(args): product, path, session =", "= ASFSession() if processes == 1: for product in self: product.download(path=path, session=session) else:", "None: \"\"\" Iterates over each ASFProduct and downloads them to the specified path.", "None \"\"\" if session is None: session = ASFSession() if processes == 1:", "[(product, path, session) for product in self] pool.map(_download_product, args) pool.close() pool.join() def _download_product(args):", "asf_search import ASFSession class ASFSearchResults(UserList): def geojson(self): return { 'type': 'FeatureCollection', 'features': [product.geojson()", "The session to use, in most cases should be authenticated beforehand :param processes:", "Pool import json from asf_search import ASFSession class ASFSearchResults(UserList): def geojson(self): return {", "1 (i.e. sequential download) :return: None \"\"\" if session is None: session =", "session = ASFSession() if processes == 1: for product in self: product.download(path=path, session=session)", "else: pool = Pool(processes=processes) args = [(product, path, session) for product in self]", "return { 'type': 'FeatureCollection', 'features': [product.geojson() for product in self] } def __str__(self):", "path: str, session: ASFSession = None, processes=1) -> None: \"\"\" Iterates over each", "of download processes to use. Defaults to 1 (i.e. sequential download) :return: None", "args = [(product, path, session) for product in self] pool.map(_download_product, args) pool.close() pool.join()", "in self: product.download(path=path, session=session) else: pool = Pool(processes=processes) args = [(product, path, session)", "<gh_stars>10-100 from collections import UserList from multiprocessing import Pool import json from asf_search", "beforehand :param processes: Number of download processes to use. Defaults to 1 (i.e.", "to use, in most cases should be authenticated beforehand :param processes: Number of", "to the specified path. :param path: The directory into which the products should", "from multiprocessing import Pool import json from asf_search import ASFSession class ASFSearchResults(UserList): def", "be authenticated beforehand :param processes: Number of download processes to use. Defaults to", "session=session) else: pool = Pool(processes=processes) args = [(product, path, session) for product in", "authenticated beforehand :param processes: Number of download processes to use. Defaults to 1", "def __str__(self): return json.dumps(self.geojson(), indent=2, sort_keys=True) def download(self, path: str, session: ASFSession =", "-> None: \"\"\" Iterates over each ASFProduct and downloads them to the specified", "== 1: for product in self: product.download(path=path, session=session) else: pool = Pool(processes=processes) args", "\"\"\" if session is None: session = ASFSession() if processes == 1: for", "which the products should be downloaded. :param session: The session to use, in", "processes=1) -> None: \"\"\" Iterates over each ASFProduct and downloads them to the", "product.download(path=path, session=session) else: pool = Pool(processes=processes) args = [(product, path, session) for product", "directory into which the products should be downloaded. :param session: The session to", "Defaults to 1 (i.e. sequential download) :return: None \"\"\" if session is None:", "ASFProduct and downloads them to the specified path. :param path: The directory into", "sort_keys=True) def download(self, path: str, session: ASFSession = None, processes=1) -> None: \"\"\"", "__str__(self): return json.dumps(self.geojson(), indent=2, sort_keys=True) def download(self, path: str, session: ASFSession = None,", "and downloads them to the specified path. :param path: The directory into which", "to 1 (i.e. sequential download) :return: None \"\"\" if session is None: session", "'FeatureCollection', 'features': [product.geojson() for product in self] } def __str__(self): return json.dumps(self.geojson(), indent=2,", "ASFSearchResults(UserList): def geojson(self): return { 'type': 'FeatureCollection', 'features': [product.geojson() for product in self]", "from asf_search import ASFSession class ASFSearchResults(UserList): def geojson(self): return { 'type': 'FeatureCollection', 'features':", "(i.e. sequential download) :return: None \"\"\" if session is None: session = ASFSession()", "[product.geojson() for product in self] } def __str__(self): return json.dumps(self.geojson(), indent=2, sort_keys=True) def", "session: The session to use, in most cases should be authenticated beforehand :param", "product in self: product.download(path=path, session=session) else: pool = Pool(processes=processes) args = [(product, path,", "cases should be authenticated beforehand :param processes: Number of download processes to use.", "1: for product in self: product.download(path=path, session=session) else: pool = Pool(processes=processes) args =", ":param processes: Number of download processes to use. Defaults to 1 (i.e. sequential", "downloads them to the specified path. :param path: The directory into which the", "import UserList from multiprocessing import Pool import json from asf_search import ASFSession class", "import Pool import json from asf_search import ASFSession class ASFSearchResults(UserList): def geojson(self): return", "them to the specified path. :param path: The directory into which the products", "str, session: ASFSession = None, processes=1) -> None: \"\"\" Iterates over each ASFProduct", "\"\"\" Iterates over each ASFProduct and downloads them to the specified path. :param", "for product in self: product.download(path=path, session=session) else: pool = Pool(processes=processes) args = [(product,", "into which the products should be downloaded. :param session: The session to use,", "pool.map(_download_product, args) pool.close() pool.join() def _download_product(args): product, path, session = args product.download(path=path, session=session)", "for product in self] pool.map(_download_product, args) pool.close() pool.join() def _download_product(args): product, path, session", "processes: Number of download processes to use. Defaults to 1 (i.e. sequential download)", "import json from asf_search import ASFSession class ASFSearchResults(UserList): def geojson(self): return { 'type':", "session) for product in self] pool.map(_download_product, args) pool.close() pool.join() def _download_product(args): product, path,", ":return: None \"\"\" if session is None: session = ASFSession() if processes ==", "download(self, path: str, session: ASFSession = None, processes=1) -> None: \"\"\" Iterates over", "the specified path. :param path: The directory into which the products should be", "ASFSession class ASFSearchResults(UserList): def geojson(self): return { 'type': 'FeatureCollection', 'features': [product.geojson() for product", "product in self] } def __str__(self): return json.dumps(self.geojson(), indent=2, sort_keys=True) def download(self, path:", "download) :return: None \"\"\" if session is None: session = ASFSession() if processes", "The directory into which the products should be downloaded. :param session: The session", "be downloaded. :param session: The session to use, in most cases should be", "None, processes=1) -> None: \"\"\" Iterates over each ASFProduct and downloads them to", "session is None: session = ASFSession() if processes == 1: for product in", "for product in self] } def __str__(self): return json.dumps(self.geojson(), indent=2, sort_keys=True) def download(self,", "sequential download) :return: None \"\"\" if session is None: session = ASFSession() if", "None: session = ASFSession() if processes == 1: for product in self: product.download(path=path,", "if processes == 1: for product in self: product.download(path=path, session=session) else: pool =", "Iterates over each ASFProduct and downloads them to the specified path. :param path:", ":param session: The session to use, in most cases should be authenticated beforehand", "self: product.download(path=path, session=session) else: pool = Pool(processes=processes) args = [(product, path, session) for", "{ 'type': 'FeatureCollection', 'features': [product.geojson() for product in self] } def __str__(self): return", "processes == 1: for product in self: product.download(path=path, session=session) else: pool = Pool(processes=processes)", "session to use, in most cases should be authenticated beforehand :param processes: Number", "specified path. :param path: The directory into which the products should be downloaded.", "json from asf_search import ASFSession class ASFSearchResults(UserList): def geojson(self): return { 'type': 'FeatureCollection',", "should be authenticated beforehand :param processes: Number of download processes to use. Defaults", "geojson(self): return { 'type': 'FeatureCollection', 'features': [product.geojson() for product in self] } def", "to use. Defaults to 1 (i.e. sequential download) :return: None \"\"\" if session", ":param path: The directory into which the products should be downloaded. :param session:", "path, session) for product in self] pool.map(_download_product, args) pool.close() pool.join() def _download_product(args): product,", "Pool(processes=processes) args = [(product, path, session) for product in self] pool.map(_download_product, args) pool.close()", "most cases should be authenticated beforehand :param processes: Number of download processes to", "self] pool.map(_download_product, args) pool.close() pool.join() def _download_product(args): product, path, session = args product.download(path=path,", "import ASFSession class ASFSearchResults(UserList): def geojson(self): return { 'type': 'FeatureCollection', 'features': [product.geojson() for", "= [(product, path, session) for product in self] pool.map(_download_product, args) pool.close() pool.join() def", "collections import UserList from multiprocessing import Pool import json from asf_search import ASFSession", "def download(self, path: str, session: ASFSession = None, processes=1) -> None: \"\"\" Iterates", "self] } def __str__(self): return json.dumps(self.geojson(), indent=2, sort_keys=True) def download(self, path: str, session:", "processes to use. Defaults to 1 (i.e. sequential download) :return: None \"\"\" if", "Number of download processes to use. Defaults to 1 (i.e. sequential download) :return:", "} def __str__(self): return json.dumps(self.geojson(), indent=2, sort_keys=True) def download(self, path: str, session: ASFSession", "path. :param path: The directory into which the products should be downloaded. :param", "UserList from multiprocessing import Pool import json from asf_search import ASFSession class ASFSearchResults(UserList):", "use, in most cases should be authenticated beforehand :param processes: Number of download", "if session is None: session = ASFSession() if processes == 1: for product", "in self] pool.map(_download_product, args) pool.close() pool.join() def _download_product(args): product, path, session = args", "ASFSession() if processes == 1: for product in self: product.download(path=path, session=session) else: pool", "indent=2, sort_keys=True) def download(self, path: str, session: ASFSession = None, processes=1) -> None:", "session: ASFSession = None, processes=1) -> None: \"\"\" Iterates over each ASFProduct and", "is None: session = ASFSession() if processes == 1: for product in self:", "download processes to use. Defaults to 1 (i.e. sequential download) :return: None \"\"\"", "class ASFSearchResults(UserList): def geojson(self): return { 'type': 'FeatureCollection', 'features': [product.geojson() for product in", "json.dumps(self.geojson(), indent=2, sort_keys=True) def download(self, path: str, session: ASFSession = None, processes=1) ->", "return json.dumps(self.geojson(), indent=2, sort_keys=True) def download(self, path: str, session: ASFSession = None, processes=1)", "downloaded. :param session: The session to use, in most cases should be authenticated", "'features': [product.geojson() for product in self] } def __str__(self): return json.dumps(self.geojson(), indent=2, sort_keys=True)", "products should be downloaded. :param session: The session to use, in most cases", "= Pool(processes=processes) args = [(product, path, session) for product in self] pool.map(_download_product, args)", "path: The directory into which the products should be downloaded. :param session: The", "use. Defaults to 1 (i.e. sequential download) :return: None \"\"\" if session is", "pool = Pool(processes=processes) args = [(product, path, session) for product in self] pool.map(_download_product,", "in most cases should be authenticated beforehand :param processes: Number of download processes", "def geojson(self): return { 'type': 'FeatureCollection', 'features': [product.geojson() for product in self] }", "multiprocessing import Pool import json from asf_search import ASFSession class ASFSearchResults(UserList): def geojson(self):", "the products should be downloaded. :param session: The session to use, in most", "should be downloaded. :param session: The session to use, in most cases should", "'type': 'FeatureCollection', 'features': [product.geojson() for product in self] } def __str__(self): return json.dumps(self.geojson(),", "each ASFProduct and downloads them to the specified path. :param path: The directory", "ASFSession = None, processes=1) -> None: \"\"\" Iterates over each ASFProduct and downloads", "from collections import UserList from multiprocessing import Pool import json from asf_search import", "over each ASFProduct and downloads them to the specified path. :param path: The" ]
[ "and high is not None and low > high: raise InvalidSubsettingException( \"Invalid bounds:", "False return True def _check_subset(self, subset): if not isinstance(subset, Subset): raise ValueError(\"Supplied argument", "== \"contains\": if high is not None: qs = qs.filter( end_time__lte=high ) if", "InvalidSubsettingException( \"Invalid bounds: lower bound greater than upper bound.\" ) self.low = low", "return False elif low is not None and high is None: if end_time", "== \"contains\": qs = qs.filter(footprint__within=poly) return qs def matches(self, eo_object, containment=\"overlaps\"): if not", "(value, max_extent[3]) ) else: line = Line( (max_extent[0], value), (max_extent[2], value) ) line.srid", "self).append(subset) def append(self, subset): self._check_subset(subset) super(Subsets, self).append(subset) def insert(self, i, subset): self._check_subset(subset) super(Subsets,", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "Y-axis given.\" ) if self.has_t and subset.is_temporal: raise InvalidSubsettingException( \"Multiple subsets for time-axis", "high=None): super(Trim, self).__init__(axis) if low is not None and high is not None", "subset.low bbox[2] = subset.high else: bbox[1] = subset.low bbox[3] = subset.high if bbox", "temporal subsets. \"\"\" def __init__(self, iterable, crs=None, allowed_types=None): \"\"\" Constructor. Allows to add", "None: l = max(float(subset.high) / float(size_y), 0.0) bbox[3] = extent[3] - l *", "True def _check_subset(self, subset): if not isinstance(subset, Subset): raise ValueError(\"Supplied argument is not", "related stuff @property def has_x(self): return any(map(lambda s: s.is_x, self)) @property def has_y(self):", "if not len(self): return True bbox = [None, None, None, None] srid =", "= self.crs if crs is not None: srid = crss.parseEPSGCode(crs, (crss.fromURL, crss.fromURN, crss.fromShortCode)", "Polygon.from_bbox(bbox) poly.srid = subset_srid return poly class Subset(object): def __init__(self, axis): axis =", "Y subsets. \"\"\" bbox = [None, None, None, None] for subset in self:", "to permit persons to whom the Software is # furnished to do so,", "low: return False else: if is_slice: if subset.is_x: line = Line( (value, max_extent[1]),", "OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- import logging from django.contrib.gis.geos import Polygon,", "self.crs if crs is not None: srid = crss.parseEPSGCode(crs, (crss.fromURL, crss.fromURN, crss.fromShortCode) )", "coverage.size footprint = coverage.footprint subset_srid = self.srid if subset_srid is None: bbox =", "l * (extent[2] - extent[0]) elif subset.is_y: if subset.low is not None: l", "super(Slice, self).__init__(axis) self.value = value def __repr__(self): return \"Slice: %s[%s]\" % (self.axis, self.value)", "Line( (max_extent[0], value), (max_extent[2], value) ) line.srid = srid if srid != 4326:", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "s: s.is_temporal, self)) @property def crs(self): return self._crs @crs.setter def crs(self, value): self._crs", "__init__(self, iterable, crs=None, allowed_types=None): \"\"\" Constructor. Allows to add set the initial subsets", "bbox[2]) if subset.is_y: if subset.low is not None: bbox[1] = max(subset.low, bbox[1]) if", "IN # THE SOFTWARE. #------------------------------------------------------------------------------- import logging from django.contrib.gis.geos import Polygon, LineString from", "eo_object.end_time for subset in self: if isinstance(subset, Slice): is_slice = True value =", "None: # transform coordinates from imageCRS to coverages CRS if subset.is_x: if subset.low", "Line( (value, max_extent[1]), (value, max_extent[3]) ) else: line = Line( (max_extent[0], value), (max_extent[2],", "> high: raise InvalidSubsettingException( \"Invalid bounds: lower bound greater than upper bound.\" )", "= qs.filter( end_time__lte=high ) if low is not None: qs = qs.filter( begin_time__gte=low", ") else: line = LineString( (max_extent[0], value), (max_extent[2], value) ) line.srid = srid", "qs.filter( end_time__lte=high ) if low is not None: qs = qs.filter( begin_time__gte=low )", ") if self.has_y and subset.is_y: raise InvalidSubsettingException( \"Multiple subsets for Y-axis given.\" )", "crss.is_image_crs(crs): raise InvalidSubsettingException( \"Could not parse EPSG code from URI '%s'\" % crs", "subset_srid = self.srid if subset_srid is None: bbox = list(extent) else: bbox =", "= crs # List API def extend(self, iterable): for subset in iterable: self._check_subset(subset)", "(extent[2] - extent[0]) if subset.high is not None: l = max(float(subset.high) / float(size_x),", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "max_extent[1]), (value, max_extent[3]) ) else: line = Line( (max_extent[0], value), (max_extent[2], value) )", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "== \"overlaps\": if not footprint.intersects(poly): return False elif containment == \"contains\": if not", "crs(self, value): self._crs = value @property def srid(self): \"\"\" Tries to find the", "if isinstance(subset, Slice): is_slice = True value = subset.value elif isinstance(subset, Trim): is_slice", "is_temporal(self): return self.axis in temporal_axes @property def is_x(self): return self.axis in x_axes @property", "high is None: if end_time < low: return False else: if begin_time >", "if not footprint.within(poly): return False return True def _check_subset(self, subset): if not isinstance(subset,", "in all # copies of this Software or works derived from this Software.", "the correct integer SRID for the crs. \"\"\" crs = self.crs if crs", "Subset): raise ValueError(\"Supplied argument is not a subset.\") if not isinstance(subset, self.allowed_types): raise", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "= subset.high else: bbox[1] = bbox[3] = subset.value return bbox def bounding_polygon(self, coverage):", "= srid if srid != 4326: line.transform(4326) qs = qs.filter(footprint__intersects=line) else: if subset.is_x:", "if is_slice: if subset.is_x: line = LineString( (value, max_extent[1]), (value, max_extent[3]) ) else:", "= qs.filter( end_time__gte=low ) # check if the temporal bounds must be strictly", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "software and associated documentation files (the \"Software\"), to deal # in the Software", "None: bbox = list(extent) else: bbox = list(footprint.extent) for subset in self: if", "= extent[3] - l * (extent[3] - extent[1]) else: if subset.is_x: if subset.low", "\"height\") all_axes = temporal_axes + x_axes + y_axes + z_axes def is_temporal(axis): \"\"\"", "and to permit persons to whom the Software is # furnished to do", "False else: if is_slice: if subset.is_x: line = Line( (value, max_extent[1]), (value, max_extent[3])", "if subset_srid is None: # transform coordinates from imageCRS to coverages CRS if", "# furnished to do so, subject to the following conditions: # # The", "the Software, and to permit persons to whom the Software is # furnished", "None] srid = self.srid if srid is None: srid = 4326 max_extent =", "max(float(subset.high) / float(size_x), 0.0) bbox[2] = extent[0] + l * (extent[2] - extent[0])", "self.value) class Trim(Subset): def __init__(self, axis, low=None, high=None): super(Trim, self).__init__(axis) if low is", "line = LineString( (max_extent[0], value), (max_extent[2], value) ) line.srid = srid if srid", "containment '%s'.\" % (bbox, containment) ) poly = Polygon.from_bbox(bbox) poly.srid = srid if", "= map( lambda v: v[0] if v[0] is not None else v[1], zip(bbox,", "+ y_axes + z_axes def is_temporal(axis): \"\"\" Returns whether or not an axis", "self: if subset.is_x: if isinstance(subset, Trim): bbox[0] = subset.low bbox[2] = subset.high else:", "qs = queryset bbox = [None, None, None, None] srid = self.srid if", "from this Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "low: return False else: if begin_time > high or end_time < low: return", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "InvalidSubsettingException( \"Multiple subsets for X-axis given.\" ) if self.has_y and subset.is_y: raise InvalidSubsettingException(", "v[0] is not None else v[1], zip(bbox, max_extent) ) bbox[0] -= tolerance; bbox[1]", "qs.filter( begin_time__lte=value, end_time__gte=value ) else: if high is not None: qs = qs.filter(", "= subset.value elif isinstance(subset, Trim): is_slice = False low = subset.low high =", "containment=\"overlaps\"): if not len(self): return queryset qs = queryset bbox = [None, None,", "> value or end_time < value: return False elif low is None and", "is not None: qs = qs.filter( end_time__gte=low ) # check if the temporal", "<NAME> <<EMAIL>> # #------------------------------------------------------------------------------- # Copyright (C) 2013 EOX IT Services GmbH #", "/ float(size_x), 0.0) bbox[0] = extent[0] + l * (extent[2] - extent[0]) if", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "@property def has_y(self): return any(map(lambda s: s.is_y, self)) @property def has_t(self): return any(map(lambda", "low = subset.low high = subset.high if subset.is_temporal: if is_slice: qs = qs.filter(", "(\"x\", \"lon\", \"long\") y_axes = (\"y\", \"lat\") z_axes = (\"z\", \"height\") all_axes =", "= eo_object.end_time for subset in self: if isinstance(subset, Slice): is_slice = True value", "__init__(self, axis): axis = axis.lower() if axis not in all_axes: raise InvalidAxisLabelException(axis) self.axis", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "subset.low high = subset.high if subset.is_temporal: if is_slice: if begin_time > value or", "is not None: qs = qs.filter( end_time__lte=high ) if low is not None:", "= subset.high if subset.is_temporal: if is_slice: qs = qs.filter( begin_time__lte=value, end_time__gte=value ) else:", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", ") temporal_axes = (\"t\", \"time\", \"phenomenontime\") x_axes = (\"x\", \"lon\", \"long\") y_axes =", "__all__ = [\"Subsets\", \"Trim\", \"Slice\"] logger = logging.getLogger(__name__) class Subsets(list): \"\"\" Convenience class", "to deal # in the Software without restriction, including without limitation the rights", "to any person obtaining a copy # of this software and associated documentation", "Trim, Slice ) # Do a manual insertion here to assure integrity for", "queryset bbox = [None, None, None, None] srid = self.srid if srid is", "is None and not crss.is_image_crs(crs): raise InvalidSubsettingException( \"Could not parse EPSG code from", "srid if srid != 4326: line.transform(4326) qs = qs.filter(footprint__intersects=line) else: if subset.is_x: bbox[0]", "not in all_axes: raise InvalidAxisLabelException(axis) self.axis = axis @property def is_temporal(self): return self.axis", "subset.low bbox[3] = subset.high else: bbox[1] = bbox[3] = subset.value return bbox def", "self.has_t and subset.is_temporal: raise InvalidSubsettingException( \"Multiple subsets for time-axis given.\" ) @property def", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) footprint = eo_object.footprint begin_time = eo_object.begin_time", "max(subset.low, bbox[0]) if subset.high is not None: bbox[2] = min(subset.high, bbox[2]) if subset.is_y:", "None, None] for subset in self: if subset.is_x: if isinstance(subset, Trim): bbox[0] =", "self).insert(i, subset) # Subset related stuff @property def has_x(self): return any(map(lambda s: s.is_x,", "THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- import logging from", "_check_subset(self, subset): if not isinstance(subset, Subset): raise ValueError(\"Supplied argument is not a subset.\")", "poly.srid = subset_srid return poly class Subset(object): def __init__(self, axis): axis = axis.lower()", "4326: line.transform(4326) qs = qs.filter(footprint__intersects=line) else: if subset.is_x: bbox[0] = subset.low bbox[2] =", "Polygon.from_bbox(bbox) poly.srid = srid if srid != 4326: poly.transform(4326) if containment == \"overlaps\":", "if is_slice: if begin_time > value or end_time < value: return False elif", "in temporal_axes @property def is_x(self): return self.axis in x_axes @property def is_y(self): return", "and Y subsets. \"\"\" bbox = [None, None, None, None] for subset in", "subset.value elif subset.is_y: if isinstance(subset, Trim): bbox[1] = subset.low bbox[3] = subset.high else:", "this software and associated documentation files (the \"Software\"), to deal # in the", "l * (extent[3] - extent[1]) else: if subset.is_x: if subset.low is not None:", "else: bbox[1] = bbox[3] = subset.value return bbox def bounding_polygon(self, coverage): srid =", "else: if is_slice: if subset.is_x: line = LineString( (value, max_extent[1]), (value, max_extent[3]) )", "= max(float(subset.high) / float(size_y), 0.0) bbox[3] = extent[3] - l * (extent[3] -", "this Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", ") if low is not None: qs = qs.filter( end_time__gte=low ) # check", "float(size_y), 0.0) bbox[3] = extent[3] - l * (extent[3] - extent[1]) else: if", "None, None]: bbox = map( lambda v: v[0] if v[0] is not None", "X and Y subsets. \"\"\" bbox = [None, None, None, None] for subset", "return \"Slice: %s[%s]\" % (self.axis, self.value) class Trim(Subset): def __init__(self, axis, low=None, high=None):", "granted, free of charge, to any person obtaining a copy # of this", "else ( Trim, Slice ) # Do a manual insertion here to assure", "subset.high else: bbox[0] = bbox[2] = subset.value elif subset.is_y: if isinstance(subset, Trim): bbox[1]", "eo_object, containment=\"overlaps\"): if not len(self): return True bbox = [None, None, None, None]", "= low self.high = high def __repr__(self): return \"Trim: %s[%s:%s]\" % ( self.axis,", "# check if the temporal bounds must be strictly contained if containment ==", "containment == \"contains\": if high is not None: qs = qs.filter( end_time__lte=high )", "parse EPSG code from URI '%s'\" % crs ) return srid return None", "\"\"\" bbox = [None, None, None, None] for subset in self: if subset.is_x:", "# Permission is hereby granted, free of charge, to any person obtaining a", "def extend(self, iterable): for subset in iterable: self._check_subset(subset) super(Subsets, self).append(subset) def append(self, subset):", "max_extent[1]), (value, max_extent[3]) ) else: line = LineString( (max_extent[0], value), (max_extent[2], value) )", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "the initial subsets \"\"\" self.allowed_types = allowed_types if allowed_types is not None else", "to handle a variety of spatial and/or temporal subsets. \"\"\" def __init__(self, iterable,", "DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- import logging from django.contrib.gis.geos import Polygon, LineString", "elif low is not None and high is None: if end_time < low:", "high is not None: qs = qs.filter( begin_time__lte=high ) if low is not", "subset.is_y: raise InvalidSubsettingException( \"Multiple subsets for Y-axis given.\" ) if self.has_t and subset.is_temporal:", "is not None: l = max(float(subset.low) / float(size_x), 0.0) bbox[0] = extent[0] +", "return any(map(lambda s: s.is_y, self)) @property def has_t(self): return any(map(lambda s: s.is_temporal, self))", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "end_time__gte=low ) # check if the temporal bounds must be strictly contained if", "elif containment == \"contains\": if not footprint.within(poly): return False return True def _check_subset(self,", "subset in self: if isinstance(subset, Slice): is_slice = True value = subset.value elif", "given.\" ) @property def xy_bbox(self): \"\"\" Returns the minimum bounding box for all", "subset) # Subset related stuff @property def has_x(self): return any(map(lambda s: s.is_x, self))", "- extent[1]) else: if subset.is_x: if subset.low is not None: bbox[0] = max(subset.low,", "not None: bbox[3] = min(subset.high, bbox[3]) if subset_srid is None: poly = Polygon.from_bbox(bbox)", "self.append(subset) self._crs = crs # List API def extend(self, iterable): for subset in", "(bbox, containment) ) poly = Polygon.from_bbox(bbox) poly.srid = srid if srid != 4326:", "tolerance logger.debug( \"Applying BBox %s with containment '%s'.\" % (bbox, containment) ) poly", "srid != 4326: poly.transform(4326) if containment == \"overlaps\": qs = qs.filter(footprint__intersects=poly) elif containment", "def is_temporal(axis): \"\"\" Returns whether or not an axis is a temporal one.", "copies of the Software, and to permit persons to whom the Software is", "is_slice: qs = qs.filter( begin_time__lte=value, end_time__gte=value ) else: if high is not None:", "if begin_time > value or end_time < value: return False elif low is", ") # Do a manual insertion here to assure integrity for subset in", "= temporal_axes + x_axes + y_axes + z_axes def is_temporal(axis): \"\"\" Returns whether", "not line.intersects(footprint): return False else: if subset.is_x: bbox[0] = subset.low bbox[2] = subset.high", "def is_x(self): return self.axis in x_axes @property def is_y(self): return self.axis in y_axes", "find the correct integer SRID for the crs. \"\"\" crs = self.crs if", "def insert(self, i, subset): self._check_subset(subset) super(Subsets, self).insert(i, subset) # Subset related stuff @property", "srid = coverage.srid extent = coverage.extent size_x, size_y = coverage.size footprint = coverage.footprint", "not None: qs = qs.filter( end_time__gte=low ) # check if the temporal bounds", "notice shall be included in all # copies of this Software or works", "\"Slice: %s[%s]\" % (self.axis, self.value) class Trim(Subset): def __init__(self, axis, low=None, high=None): super(Trim,", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "axis, low=None, high=None): super(Trim, self).__init__(axis) if low is not None and high is", "len(self): return queryset qs = queryset bbox = [None, None, None, None] srid", "bbox = map( lambda v: v[0] if v[0] is not None else v[1],", "list(extent) else: bbox = list(footprint.extent) for subset in self: if not isinstance(subset, Trim)", "isinstance(subset, Trim): is_slice = False low = subset.low high = subset.high if subset.is_temporal:", "l = max(float(subset.high) / float(size_y), 0.0) bbox[3] = extent[3] - l * (extent[3]", "super(Subsets, self).insert(i, subset) # Subset related stuff @property def has_x(self): return any(map(lambda s:", "crs. \"\"\" crs = self.crs if crs is not None: srid = crss.parseEPSGCode(crs,", ") poly = Polygon.from_bbox(bbox) poly.srid = srid if srid != 4326: poly.transform(4326) if", "bbox = [None, None, None, None] for subset in self: if subset.is_x: if", "False elif low is None and high is not None: if begin_time >", "crss.crs_tolerance(srid) for subset in self: if isinstance(subset, Slice): is_slice = True value =", "return True bbox = [None, None, None, None] srid = self.srid if srid", "value) ) line.srid = srid if srid != 4326: line.transform(4326) qs = qs.filter(footprint__intersects=line)", "not isinstance(subset, self.allowed_types): raise InvalidSubsettingException( \"Supplied subset is not allowed.\" ) if self.has_x", "line.intersects(footprint): return False else: if subset.is_x: bbox[0] = subset.low bbox[2] = subset.high else:", "Trim): bbox[0] = subset.low bbox[2] = subset.high else: bbox[0] = bbox[2] = subset.value", "raise InvalidSubsettingException( \"Supplied subset is not allowed.\" ) if self.has_x and subset.is_x: raise", "any person obtaining a copy # of this software and associated documentation files", "class Slice(Subset): def __init__(self, axis, value): super(Slice, self).__init__(axis) self.value = value def __repr__(self):", "is not None: bbox[2] = min(subset.high, bbox[2]) if subset.is_y: if subset.low is not", "= list(footprint.extent) for subset in self: if not isinstance(subset, Trim) or subset.is_temporal: continue", "#------------------------------------------------------------------------------- import logging from django.contrib.gis.geos import Polygon, LineString from eoxserver.resources.coverages import crss from", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "a copy # of this software and associated documentation files (the \"Software\"), to", "containment == \"contains\": if not footprint.within(poly): return False return True def _check_subset(self, subset):", "bbox[3] = min(subset.high, bbox[3]) if subset_srid is None: poly = Polygon.from_bbox(bbox) poly.srid =", "\"Supplied subset is not allowed.\" ) if self.has_x and subset.is_x: raise InvalidSubsettingException( \"Multiple", "!= 4326: line.transform(4326) qs = qs.filter(footprint__intersects=line) else: if subset.is_x: bbox[0] = subset.low bbox[2]", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "low self.high = high def __repr__(self): return \"Trim: %s[%s:%s]\" % ( self.axis, self.low,", "all # copies of this Software or works derived from this Software. #", ") else: line = Line( (max_extent[0], value), (max_extent[2], value) ) line.srid = srid", "begin_time > high: return False elif low is not None and high is", "low is not None: qs = qs.filter( end_time__gte=low ) # check if the", "charge, to any person obtaining a copy # of this software and associated", "= max(subset.low, bbox[0]) if subset.high is not None: bbox[2] = min(subset.high, bbox[2]) if", "self.axis in y_axes class Slice(Subset): def __init__(self, axis, value): super(Slice, self).__init__(axis) self.value =", "a subset.\") if not isinstance(subset, self.allowed_types): raise InvalidSubsettingException( \"Supplied subset is not allowed.\"", "line.transform(4326) qs = qs.filter(footprint__intersects=line) else: if subset.is_x: bbox[0] = subset.low bbox[2] = subset.high", "subset.high is not None: l = max(float(subset.high) / float(size_y), 0.0) bbox[3] = extent[3]", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "raise ValueError(\"Supplied argument is not a subset.\") if not isinstance(subset, self.allowed_types): raise InvalidSubsettingException(", "end_time = eo_object.end_time for subset in self: if isinstance(subset, Slice): is_slice = True", "v[1], zip(bbox, max_extent) ) bbox[0] -= tolerance; bbox[1] -= tolerance bbox[2] += tolerance;", "self._check_subset(subset) super(Subsets, self).insert(i, subset) # Subset related stuff @property def has_x(self): return any(map(lambda", "tolerance = crss.crs_tolerance(srid) footprint = eo_object.footprint begin_time = eo_object.begin_time end_time = eo_object.end_time for", "# # Project: EOxServer <http://eoxserver.org> # Authors: <NAME> <<EMAIL>> # #------------------------------------------------------------------------------- # Copyright", "high: return False elif low is not None and high is None: if", "begin_time > high or end_time < low: return False else: if is_slice: if", "subset in self: if not isinstance(subset, Trim) or subset.is_temporal: continue if subset_srid is", "__init__(self, axis, value): super(Slice, self).__init__(axis) self.value = value def __repr__(self): return \"Slice: %s[%s]\"", "has_t(self): return any(map(lambda s: s.is_temporal, self)) @property def crs(self): return self._crs @crs.setter def", "\"overlaps\": if not footprint.intersects(poly): return False elif containment == \"contains\": if not footprint.within(poly):", "X-axis given.\" ) if self.has_y and subset.is_y: raise InvalidSubsettingException( \"Multiple subsets for Y-axis", "= value @property def srid(self): \"\"\" Tries to find the correct integer SRID", "None, None, None] srid = self.srid if srid is None: srid = 4326", ") if srid is None and not crss.is_image_crs(crs): raise InvalidSubsettingException( \"Could not parse", "iterable: self.append(subset) self._crs = crs # List API def extend(self, iterable): for subset", "(value, max_extent[1]), (value, max_extent[3]) ) else: line = Line( (max_extent[0], value), (max_extent[2], value)", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "here to assure integrity for subset in iterable: self.append(subset) self._crs = crs #", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "bbox = [None, None, None, None] srid = self.srid if srid is None:", "footprint = eo_object.footprint begin_time = eo_object.begin_time end_time = eo_object.end_time for subset in self:", "self.low = low self.high = high def __repr__(self): return \"Trim: %s[%s:%s]\" % (", "= 4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) footprint = eo_object.footprint begin_time =", "all_axes = temporal_axes + x_axes + y_axes + z_axes def is_temporal(axis): \"\"\" Returns", "imageCRS to coverages CRS if subset.is_x: if subset.low is not None: l =", "$Id$ # # Project: EOxServer <http://eoxserver.org> # Authors: <NAME> <<EMAIL>> # #------------------------------------------------------------------------------- #", "not crss.is_image_crs(crs): raise InvalidSubsettingException( \"Could not parse EPSG code from URI '%s'\" %", "return bbox def bounding_polygon(self, coverage): srid = coverage.srid extent = coverage.extent size_x, size_y", "containment=\"overlaps\"): if not len(self): return True bbox = [None, None, None, None] srid", "above copyright notice and this permission notice shall be included in all #", "subset.is_x: bbox[0] = subset.low bbox[2] = subset.high else: bbox[1] = subset.low bbox[3] =", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE", "else: if is_slice: if subset.is_x: line = Line( (value, max_extent[1]), (value, max_extent[3]) )", "Returns the minimum bounding box for all X and Y subsets. \"\"\" bbox", "extent[0]) elif subset.is_y: if subset.low is not None: l = max(float(subset.low) / float(size_y),", "= eo_object.footprint begin_time = eo_object.begin_time end_time = eo_object.end_time for subset in self: if", "l = max(float(subset.high) / float(size_x), 0.0) bbox[2] = extent[0] + l * (extent[2]", "srid = 4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) for subset in self:", "lambda v: v[0] if v[0] is not None else v[1], zip(bbox, max_extent) )", "and not crss.is_image_crs(crs): raise InvalidSubsettingException( \"Could not parse EPSG code from URI '%s'\"", "if subset.high is not None: l = max(float(subset.high) / float(size_x), 0.0) bbox[2] =", "if subset_srid is None: bbox = list(extent) else: bbox = list(footprint.extent) for subset", "# # Permission is hereby granted, free of charge, to any person obtaining", "self).__init__(axis) self.value = value def __repr__(self): return \"Slice: %s[%s]\" % (self.axis, self.value) class", "subset.value elif isinstance(subset, Trim): is_slice = False low = subset.low high = subset.high", "extent[3] - l * (extent[3] - extent[1]) if subset.high is not None: l", "\"Trim: %s[%s:%s]\" % ( self.axis, self.low, self.high ) temporal_axes = (\"t\", \"time\", \"phenomenontime\")", "is_temporal(axis): \"\"\" Returns whether or not an axis is a temporal one. \"\"\"", "iterable: self._check_subset(subset) super(Subsets, self).append(subset) def append(self, subset): self._check_subset(subset) super(Subsets, self).append(subset) def insert(self, i,", "subset in self: if subset.is_x: if isinstance(subset, Trim): bbox[0] = subset.low bbox[2] =", "begin_time__gte=low ) else: if is_slice: if subset.is_x: line = LineString( (value, max_extent[1]), (value,", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "#------------------------------------------------------------------------------- # $Id$ # # Project: EOxServer <http://eoxserver.org> # Authors: <NAME> <<EMAIL>> #", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "isinstance(subset, Trim): bbox[0] = subset.low bbox[2] = subset.high else: bbox[0] = bbox[2] =", "[None, None, None, None]: bbox = map( lambda v: v[0] if v[0] is", "return queryset qs = queryset bbox = [None, None, None, None] srid =", "z_axes = (\"z\", \"height\") all_axes = temporal_axes + x_axes + y_axes + z_axes", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "= max(subset.low, bbox[1]) if subset.high is not None: bbox[3] = min(subset.high, bbox[3]) if", "of the Software, and to permit persons to whom the Software is #", "manual insertion here to assure integrity for subset in iterable: self.append(subset) self._crs =", "srid return None def filter(self, queryset, containment=\"overlaps\"): if not len(self): return queryset qs", "qs = qs.filter(footprint__intersects=line) else: if subset.is_x: bbox[0] = subset.low bbox[2] = subset.high else:", "return \"Trim: %s[%s:%s]\" % ( self.axis, self.low, self.high ) temporal_axes = (\"t\", \"time\",", "crss.fromShortCode) ) if srid is None and not crss.is_image_crs(crs): raise InvalidSubsettingException( \"Could not", "is not None and low > high: raise InvalidSubsettingException( \"Invalid bounds: lower bound", "4326: line.transform(4326) if not line.intersects(footprint): return False else: if subset.is_x: bbox[0] = subset.low", "strictly contained if containment == \"contains\": if high is not None: qs =", "if self.has_t and subset.is_temporal: raise InvalidSubsettingException( \"Multiple subsets for time-axis given.\" ) @property", "- extent[0]) if subset.high is not None: l = max(float(subset.high) / float(size_x), 0.0)", "= list(extent) else: bbox = list(footprint.extent) for subset in self: if not isinstance(subset,", "low is not None and high is not None and low > high:", "continue if subset_srid is None: # transform coordinates from imageCRS to coverages CRS", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "\"\"\" Convenience class to handle a variety of spatial and/or temporal subsets. \"\"\"", "\"Applying BBox %s with containment '%s'.\" % (bbox, containment) ) poly = Polygon.from_bbox(bbox)", "max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) footprint = eo_object.footprint begin_time = eo_object.begin_time end_time", "any(map(lambda s: s.is_x, self)) @property def has_y(self): return any(map(lambda s: s.is_y, self)) @property", "if subset_srid is None: poly = Polygon.from_bbox(bbox) poly.srid = srid else: poly =", "subset.is_y: if isinstance(subset, Trim): bbox[1] = subset.low bbox[3] = subset.high else: bbox[1] =", "< low: return False else: if is_slice: if subset.is_x: line = Line( (value,", "if isinstance(subset, Trim): bbox[0] = subset.low bbox[2] = subset.high else: bbox[0] = bbox[2]", "@crs.setter def crs(self, value): self._crs = value @property def srid(self): \"\"\" Tries to", "line.srid = srid if srid != 4326: line.transform(4326) qs = qs.filter(footprint__intersects=line) else: if", "if low is not None: qs = qs.filter( end_time__gte=low ) # check if", "map( lambda v: v[0] if v[0] is not None else v[1], zip(bbox, max_extent)", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "subset.low is not None: bbox[1] = max(subset.low, bbox[1]) if subset.high is not None:", "(max_extent[2], value) ) line.srid = srid if srid != 4326: line.transform(4326) qs =", "def is_y(self): return self.axis in y_axes class Slice(Subset): def __init__(self, axis, value): super(Slice,", "bbox = list(extent) else: bbox = list(footprint.extent) for subset in self: if not", "(\"z\", \"height\") all_axes = temporal_axes + x_axes + y_axes + z_axes def is_temporal(axis):", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "s: s.is_y, self)) @property def has_t(self): return any(map(lambda s: s.is_temporal, self)) @property def", "to add set the initial subsets \"\"\" self.allowed_types = allowed_types if allowed_types is", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "self.axis = axis @property def is_temporal(self): return self.axis in temporal_axes @property def is_x(self):", "def crs(self, value): self._crs = value @property def srid(self): \"\"\" Tries to find", "Project: EOxServer <http://eoxserver.org> # Authors: <NAME> <<EMAIL>> # #------------------------------------------------------------------------------- # Copyright (C) 2013", "USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- import logging from django.contrib.gis.geos", "is not None else ( Trim, Slice ) # Do a manual insertion", "!= 4326: poly.transform(4326) if containment == \"overlaps\": if not footprint.intersects(poly): return False elif", "4326: poly.transform(4326) if containment == \"overlaps\": qs = qs.filter(footprint__intersects=poly) elif containment == \"contains\":", "SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- import", "this permission notice shall be included in all # copies of this Software", "subset_srid is None: poly = Polygon.from_bbox(bbox) poly.srid = srid else: poly = Polygon.from_bbox(bbox)", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "qs.filter( begin_time__lte=high ) if low is not None: qs = qs.filter( end_time__gte=low )", "Authors: <NAME> <<EMAIL>> # #------------------------------------------------------------------------------- # Copyright (C) 2013 EOX IT Services GmbH", "subset.is_temporal: if is_slice: qs = qs.filter( begin_time__lte=value, end_time__gte=value ) else: if high is", "__repr__(self): return \"Slice: %s[%s]\" % (self.axis, self.value) class Trim(Subset): def __init__(self, axis, low=None,", "= srid if srid != 4326: poly.transform(4326) if containment == \"overlaps\": if not", ") if self.has_t and subset.is_temporal: raise InvalidSubsettingException( \"Multiple subsets for time-axis given.\" )", "of this Software or works derived from this Software. # # THE SOFTWARE", "subset.is_y: if subset.low is not None: bbox[1] = max(subset.low, bbox[1]) if subset.high is", "is hereby granted, free of charge, to any person obtaining a copy #", "raise InvalidSubsettingException( \"Multiple subsets for X-axis given.\" ) if self.has_y and subset.is_y: raise", "- extent[0]) elif subset.is_y: if subset.low is not None: l = max(float(subset.low) /", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "qs = qs.filter( begin_time__lte=high ) if low is not None: qs = qs.filter(", "%s[%s]\" % (self.axis, self.value) class Trim(Subset): def __init__(self, axis, low=None, high=None): super(Trim, self).__init__(axis)", "max(float(subset.low) / float(size_x), 0.0) bbox[0] = extent[0] + l * (extent[2] - extent[0])", "EOX IT Services GmbH # # Permission is hereby granted, free of charge,", "set the initial subsets \"\"\" self.allowed_types = allowed_types if allowed_types is not None", "@property def is_temporal(self): return self.axis in temporal_axes @property def is_x(self): return self.axis in", "insertion here to assure integrity for subset in iterable: self.append(subset) self._crs = crs", "not isinstance(subset, Trim) or subset.is_temporal: continue if subset_srid is None: # transform coordinates", "is not allowed.\" ) if self.has_x and subset.is_x: raise InvalidSubsettingException( \"Multiple subsets for", "any(map(lambda s: s.is_temporal, self)) @property def crs(self): return self._crs @crs.setter def crs(self, value):", "so, subject to the following conditions: # # The above copyright notice and", "if subset.is_x: if subset.low is not None: bbox[0] = max(subset.low, bbox[0]) if subset.high", "= coverage.size footprint = coverage.footprint subset_srid = self.srid if subset_srid is None: bbox", "= qs.filter( begin_time__gte=low ) else: if is_slice: if subset.is_x: line = LineString( (value,", "not None: bbox[1] = max(subset.low, bbox[1]) if subset.high is not None: bbox[3] =", "GmbH # # Permission is hereby granted, free of charge, to any person", "if allowed_types is not None else ( Trim, Slice ) # Do a", "subset_srid return poly class Subset(object): def __init__(self, axis): axis = axis.lower() if axis", "copy # of this software and associated documentation files (the \"Software\"), to deal", "subset.is_temporal: raise InvalidSubsettingException( \"Multiple subsets for time-axis given.\" ) @property def xy_bbox(self): \"\"\"", "poly = Polygon.from_bbox(bbox) poly.srid = subset_srid return poly class Subset(object): def __init__(self, axis):", "= logging.getLogger(__name__) class Subsets(list): \"\"\" Convenience class to handle a variety of spatial", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #-------------------------------------------------------------------------------", "#------------------------------------------------------------------------------- # Copyright (C) 2013 EOX IT Services GmbH # # Permission is", "@property def is_y(self): return self.axis in y_axes class Slice(Subset): def __init__(self, axis, value):", "if not isinstance(subset, Trim) or subset.is_temporal: continue if subset_srid is None: # transform", "footprint.intersects(poly): return False elif containment == \"contains\": if not footprint.within(poly): return False return", "len(self): return True bbox = [None, None, None, None] srid = self.srid if", "InvalidSubsettingException( \"Multiple subsets for Y-axis given.\" ) if self.has_t and subset.is_temporal: raise InvalidSubsettingException(", "sublicense, and/or sell # copies of the Software, and to permit persons to", "= (\"z\", \"height\") all_axes = temporal_axes + x_axes + y_axes + z_axes def", "# transform coordinates from imageCRS to coverages CRS if subset.is_x: if subset.low is", "False low = subset.low high = subset.high if subset.is_temporal: if is_slice: qs =", "= Polygon.from_bbox(bbox) poly.srid = srid if srid != 4326: poly.transform(4326) if containment ==", "and high is not None: if begin_time > high: return False elif low", "subset.is_x: line = LineString( (value, max_extent[1]), (value, max_extent[3]) ) else: line = LineString(", "x_axes = (\"x\", \"lon\", \"long\") y_axes = (\"y\", \"lat\") z_axes = (\"z\", \"height\")", "EOxServer <http://eoxserver.org> # Authors: <NAME> <<EMAIL>> # #------------------------------------------------------------------------------- # Copyright (C) 2013 EOX", "qs def matches(self, eo_object, containment=\"overlaps\"): if not len(self): return True bbox = [None,", "# copies of the Software, and to permit persons to whom the Software", "= LineString( (value, max_extent[1]), (value, max_extent[3]) ) else: line = LineString( (max_extent[0], value),", "value) ) line.srid = srid if srid != 4326: line.transform(4326) if not line.intersects(footprint):", "if subset.high is not None: l = max(float(subset.high) / float(size_y), 0.0) bbox[3] =", "self._check_subset(subset) super(Subsets, self).append(subset) def insert(self, i, subset): self._check_subset(subset) super(Subsets, self).insert(i, subset) # Subset", "min(subset.high, bbox[3]) if subset_srid is None: poly = Polygon.from_bbox(bbox) poly.srid = srid else:", "coverage.srid extent = coverage.extent size_x, size_y = coverage.size footprint = coverage.footprint subset_srid =", "copies of this Software or works derived from this Software. # # THE", "raise InvalidSubsettingException( \"Invalid bounds: lower bound greater than upper bound.\" ) self.low =", "import Polygon, LineString from eoxserver.resources.coverages import crss from eoxserver.services.exceptions import ( InvalidAxisLabelException, InvalidSubsettingException", "high is not None and low > high: raise InvalidSubsettingException( \"Invalid bounds: lower", "if low is not None: qs = qs.filter( begin_time__gte=low ) else: if is_slice:", "not None and low > high: raise InvalidSubsettingException( \"Invalid bounds: lower bound greater", "None: bbox[2] = min(subset.high, bbox[2]) if subset.is_y: if subset.low is not None: bbox[1]", "float(size_x), 0.0) bbox[2] = extent[0] + l * (extent[2] - extent[0]) elif subset.is_y:", "__init__(self, axis, low=None, high=None): super(Trim, self).__init__(axis) if low is not None and high", "bbox[2] = min(subset.high, bbox[2]) if subset.is_y: if subset.low is not None: bbox[1] =", "not isinstance(subset, Subset): raise ValueError(\"Supplied argument is not a subset.\") if not isinstance(subset,", "bbox[0] = subset.low bbox[2] = subset.high else: bbox[1] = subset.low bbox[3] = subset.high", "in all_axes: raise InvalidAxisLabelException(axis) self.axis = axis @property def is_temporal(self): return self.axis in", "to assure integrity for subset in iterable: self.append(subset) self._crs = crs # List", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "InvalidSubsettingException ) __all__ = [\"Subsets\", \"Trim\", \"Slice\"] logger = logging.getLogger(__name__) class Subsets(list): \"\"\"", "Trim): is_slice = False low = subset.low high = subset.high if subset.is_temporal: if", "extent = coverage.extent size_x, size_y = coverage.size footprint = coverage.footprint subset_srid = self.srid", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "Polygon.from_bbox(bbox) poly.srid = srid else: poly = Polygon.from_bbox(bbox) poly.srid = subset_srid return poly", "self).append(subset) def insert(self, i, subset): self._check_subset(subset) super(Subsets, self).insert(i, subset) # Subset related stuff", "None: qs = qs.filter( end_time__gte=low ) # check if the temporal bounds must", "is None: # transform coordinates from imageCRS to coverages CRS if subset.is_x: if", "value): super(Slice, self).__init__(axis) self.value = value def __repr__(self): return \"Slice: %s[%s]\" % (self.axis,", "subset.high is not None: bbox[3] = min(subset.high, bbox[3]) if subset_srid is None: poly", "or works derived from this Software. # # THE SOFTWARE IS PROVIDED \"AS", "= axis @property def is_temporal(self): return self.axis in temporal_axes @property def is_x(self): return", "is not None: bbox[0] = max(subset.low, bbox[0]) if subset.high is not None: bbox[2]", "if low is not None and high is not None and low >", "def __init__(self, axis, low=None, high=None): super(Trim, self).__init__(axis) if low is not None and", "z_axes def is_temporal(axis): \"\"\" Returns whether or not an axis is a temporal", "axis @property def is_temporal(self): return self.axis in temporal_axes @property def is_x(self): return self.axis", "whom the Software is # furnished to do so, subject to the following", "if subset.is_x: bbox[0] = subset.low bbox[2] = subset.high else: bbox[1] = subset.low bbox[3]", "= qs.filter(footprint__intersects=line) else: if subset.is_x: bbox[0] = subset.low bbox[2] = subset.high else: bbox[1]", "bbox[2] += tolerance; bbox[3] += tolerance logger.debug( \"Applying BBox %s with containment '%s'.\"", "SRID for the crs. \"\"\" crs = self.crs if crs is not None:", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "subset.low is not None: l = max(float(subset.low) / float(size_x), 0.0) bbox[0] = extent[0]", "temporal_axes @property def is_x(self): return self.axis in x_axes @property def is_y(self): return self.axis", "None: srid = 4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) for subset in", "given.\" ) if self.has_t and subset.is_temporal: raise InvalidSubsettingException( \"Multiple subsets for time-axis given.\"", "'%s'.\" % (bbox, containment) ) poly = Polygon.from_bbox(bbox) poly.srid = srid if srid", "not len(self): return queryset qs = queryset bbox = [None, None, None, None]", "if high is not None: qs = qs.filter( begin_time__lte=high ) if low is", "is # furnished to do so, subject to the following conditions: # #", "bounding_polygon(self, coverage): srid = coverage.srid extent = coverage.extent size_x, size_y = coverage.size footprint", "None, None] srid = self.srid if srid is None: srid = 4326 max_extent", "max_extent) ) bbox[0] -= tolerance; bbox[1] -= tolerance bbox[2] += tolerance; bbox[3] +=", "Trim) or subset.is_temporal: continue if subset_srid is None: # transform coordinates from imageCRS", "self: if isinstance(subset, Slice): is_slice = True value = subset.value elif isinstance(subset, Trim):", "def _check_subset(self, subset): if not isinstance(subset, Subset): raise ValueError(\"Supplied argument is not a", "InvalidSubsettingException( \"Could not parse EPSG code from URI '%s'\" % crs ) return", "qs = qs.filter(footprint__intersects=poly) elif containment == \"contains\": qs = qs.filter(footprint__within=poly) return qs def", "subset.low bbox[3] = subset.high if bbox != [None, None, None, None]: bbox =", "return False elif low is None and high is not None: if begin_time", "subset): self._check_subset(subset) super(Subsets, self).insert(i, subset) # Subset related stuff @property def has_x(self): return", "else: bbox[1] = subset.low bbox[3] = subset.high if bbox != [None, None, None,", "( Trim, Slice ) # Do a manual insertion here to assure integrity", "= bbox[2] = subset.value elif subset.is_y: if isinstance(subset, Trim): bbox[1] = subset.low bbox[3]", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "or not an axis is a temporal one. \"\"\" return (axis.lower() in temporal_axes)", "is_slice = True value = subset.value elif isinstance(subset, Trim): is_slice = False low", "= subset.value return bbox def bounding_polygon(self, coverage): srid = coverage.srid extent = coverage.extent", ") @property def xy_bbox(self): \"\"\" Returns the minimum bounding box for all X", ") # check if the temporal bounds must be strictly contained if containment", "coverage.footprint subset_srid = self.srid if subset_srid is None: bbox = list(extent) else: bbox", "\"Invalid bounds: lower bound greater than upper bound.\" ) self.low = low self.high", "axis.lower() if axis not in all_axes: raise InvalidAxisLabelException(axis) self.axis = axis @property def", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.", "if containment == \"overlaps\": if not footprint.intersects(poly): return False elif containment == \"contains\":", "poly = Polygon.from_bbox(bbox) poly.srid = srid if srid != 4326: poly.transform(4326) if containment", "Subset(object): def __init__(self, axis): axis = axis.lower() if axis not in all_axes: raise", "% (self.axis, self.value) class Trim(Subset): def __init__(self, axis, low=None, high=None): super(Trim, self).__init__(axis) if", "if self.has_y and subset.is_y: raise InvalidSubsettingException( \"Multiple subsets for Y-axis given.\" ) if", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "= min(subset.high, bbox[2]) if subset.is_y: if subset.low is not None: bbox[1] = max(subset.low,", "if srid != 4326: poly.transform(4326) if containment == \"overlaps\": if not footprint.intersects(poly): return", "= (\"y\", \"lat\") z_axes = (\"z\", \"height\") all_axes = temporal_axes + x_axes +", "bbox[0]) if subset.high is not None: bbox[2] = min(subset.high, bbox[2]) if subset.is_y: if", "Do a manual insertion here to assure integrity for subset in iterable: self.append(subset)", "subset.is_x: if subset.low is not None: l = max(float(subset.low) / float(size_x), 0.0) bbox[0]", "if srid != 4326: poly.transform(4326) if containment == \"overlaps\": qs = qs.filter(footprint__intersects=poly) elif", "furnished to do so, subject to the following conditions: # # The above", "if containment == \"contains\": if high is not None: qs = qs.filter( end_time__lte=high", "!= [None, None, None, None]: bbox = map( lambda v: v[0] if v[0]", "tolerance = crss.crs_tolerance(srid) for subset in self: if isinstance(subset, Slice): is_slice = True", "srid if srid != 4326: poly.transform(4326) if containment == \"overlaps\": qs = qs.filter(footprint__intersects=poly)", "temporal bounds must be strictly contained if containment == \"contains\": if high is", "(max_extent[0], value), (max_extent[2], value) ) line.srid = srid if srid != 4326: line.transform(4326)", "None and high is not None and low > high: raise InvalidSubsettingException( \"Invalid", "/ float(size_y), 0.0) bbox[1] = extent[3] - l * (extent[3] - extent[1]) if", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "contained if containment == \"contains\": if high is not None: qs = qs.filter(", "[None, None, None, None] for subset in self: if subset.is_x: if isinstance(subset, Trim):", "= queryset bbox = [None, None, None, None] srid = self.srid if srid", "self).__init__(axis) if low is not None and high is not None and low", "from eoxserver.services.exceptions import ( InvalidAxisLabelException, InvalidSubsettingException ) __all__ = [\"Subsets\", \"Trim\", \"Slice\"] logger", "axis, value): super(Slice, self).__init__(axis) self.value = value def __repr__(self): return \"Slice: %s[%s]\" %", "Trim): bbox[1] = subset.low bbox[3] = subset.high else: bbox[1] = bbox[3] = subset.value", "Tries to find the correct integer SRID for the crs. \"\"\" crs =", "django.contrib.gis.geos import Polygon, LineString from eoxserver.resources.coverages import crss from eoxserver.services.exceptions import ( InvalidAxisLabelException,", "iterable): for subset in iterable: self._check_subset(subset) super(Subsets, self).append(subset) def append(self, subset): self._check_subset(subset) super(Subsets,", "( InvalidAxisLabelException, InvalidSubsettingException ) __all__ = [\"Subsets\", \"Trim\", \"Slice\"] logger = logging.getLogger(__name__) class", "self._check_subset(subset) super(Subsets, self).append(subset) def append(self, subset): self._check_subset(subset) super(Subsets, self).append(subset) def insert(self, i, subset):", "return False return True def _check_subset(self, subset): if not isinstance(subset, Subset): raise ValueError(\"Supplied", "not None: l = max(float(subset.low) / float(size_y), 0.0) bbox[1] = extent[3] - l", "if v[0] is not None else v[1], zip(bbox, max_extent) ) bbox[0] -= tolerance;", "True bbox = [None, None, None, None] srid = self.srid if srid is", "srid else: poly = Polygon.from_bbox(bbox) poly.srid = subset_srid return poly class Subset(object): def", "max(float(subset.high) / float(size_y), 0.0) bbox[3] = extent[3] - l * (extent[3] - extent[1])", "self.value = value def __repr__(self): return \"Slice: %s[%s]\" % (self.axis, self.value) class Trim(Subset):", "Trim(Subset): def __init__(self, axis, low=None, high=None): super(Trim, self).__init__(axis) if low is not None", ") else: if is_slice: if subset.is_x: line = LineString( (value, max_extent[1]), (value, max_extent[3])", "notice and this permission notice shall be included in all # copies of", "0.0) bbox[1] = extent[3] - l * (extent[3] - extent[1]) if subset.high is", "= coverage.extent size_x, size_y = coverage.size footprint = coverage.footprint subset_srid = self.srid if", "= min(subset.high, bbox[3]) if subset_srid is None: poly = Polygon.from_bbox(bbox) poly.srid = srid", "Slice ) # Do a manual insertion here to assure integrity for subset", ") self.low = low self.high = high def __repr__(self): return \"Trim: %s[%s:%s]\" %", "bbox = list(footprint.extent) for subset in self: if not isinstance(subset, Trim) or subset.is_temporal:", "is not None: l = max(float(subset.high) / float(size_x), 0.0) bbox[2] = extent[0] +", "None: qs = qs.filter( end_time__lte=high ) if low is not None: qs =", "initial subsets \"\"\" self.allowed_types = allowed_types if allowed_types is not None else (", "is not None and high is None: if end_time < low: return False", "if subset.is_x: line = Line( (value, max_extent[1]), (value, max_extent[3]) ) else: line =", "qs = qs.filter( begin_time__lte=value, end_time__gte=value ) else: if high is not None: qs", "raise InvalidSubsettingException( \"Multiple subsets for Y-axis given.\" ) if self.has_t and subset.is_temporal: raise", "is not None: srid = crss.parseEPSGCode(crs, (crss.fromURL, crss.fromURN, crss.fromShortCode) ) if srid is", "to find the correct integer SRID for the crs. \"\"\" crs = self.crs", "to coverages CRS if subset.is_x: if subset.low is not None: l = max(float(subset.low)", "self.axis in temporal_axes @property def is_x(self): return self.axis in x_axes @property def is_y(self):", "crs is not None: srid = crss.parseEPSGCode(crs, (crss.fromURL, crss.fromURN, crss.fromShortCode) ) if srid", "and subset.is_temporal: raise InvalidSubsettingException( \"Multiple subsets for time-axis given.\" ) @property def xy_bbox(self):", "if subset.high is not None: bbox[2] = min(subset.high, bbox[2]) if subset.is_y: if subset.low", "subsets for Y-axis given.\" ) if self.has_t and subset.is_temporal: raise InvalidSubsettingException( \"Multiple subsets", "qs.filter(footprint__within=poly) return qs def matches(self, eo_object, containment=\"overlaps\"): if not len(self): return True bbox", "raise InvalidSubsettingException( \"Multiple subsets for time-axis given.\" ) @property def xy_bbox(self): \"\"\" Returns", "srid is None: srid = 4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) for", "raise InvalidSubsettingException( \"Could not parse EPSG code from URI '%s'\" % crs )", "subsets \"\"\" self.allowed_types = allowed_types if allowed_types is not None else ( Trim,", "# THE SOFTWARE. #------------------------------------------------------------------------------- import logging from django.contrib.gis.geos import Polygon, LineString from eoxserver.resources.coverages", "is None: srid = 4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) footprint =", "def has_x(self): return any(map(lambda s: s.is_x, self)) @property def has_y(self): return any(map(lambda s:", "deal # in the Software without restriction, including without limitation the rights #", "\"contains\": qs = qs.filter(footprint__within=poly) return qs def matches(self, eo_object, containment=\"overlaps\"): if not len(self):", "srid != 4326: poly.transform(4326) if containment == \"overlaps\": if not footprint.intersects(poly): return False", "+ x_axes + y_axes + z_axes def is_temporal(axis): \"\"\" Returns whether or not", "= value def __repr__(self): return \"Slice: %s[%s]\" % (self.axis, self.value) class Trim(Subset): def", "# Project: EOxServer <http://eoxserver.org> # Authors: <NAME> <<EMAIL>> # #------------------------------------------------------------------------------- # Copyright (C)", "None: srid = crss.parseEPSGCode(crs, (crss.fromURL, crss.fromURN, crss.fromShortCode) ) if srid is None and", "and/or temporal subsets. \"\"\" def __init__(self, iterable, crs=None, allowed_types=None): \"\"\" Constructor. Allows to", "crs = self.crs if crs is not None: srid = crss.parseEPSGCode(crs, (crss.fromURL, crss.fromURN,", "def __repr__(self): return \"Trim: %s[%s:%s]\" % ( self.axis, self.low, self.high ) temporal_axes =", "self: if not isinstance(subset, Trim) or subset.is_temporal: continue if subset_srid is None: #", "else: if subset.is_x: bbox[0] = subset.low bbox[2] = subset.high else: bbox[1] = subset.low", "add set the initial subsets \"\"\" self.allowed_types = allowed_types if allowed_types is not", "def __init__(self, axis): axis = axis.lower() if axis not in all_axes: raise InvalidAxisLabelException(axis)", "None else v[1], zip(bbox, max_extent) ) bbox[0] -= tolerance; bbox[1] -= tolerance bbox[2]", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "to whom the Software is # furnished to do so, subject to the", "poly.srid = srid else: poly = Polygon.from_bbox(bbox) poly.srid = subset_srid return poly class", "y_axes + z_axes def is_temporal(axis): \"\"\" Returns whether or not an axis is", "high def __repr__(self): return \"Trim: %s[%s:%s]\" % ( self.axis, self.low, self.high ) temporal_axes", "than upper bound.\" ) self.low = low self.high = high def __repr__(self): return", "bound greater than upper bound.\" ) self.low = low self.high = high def", "srid if srid != 4326: poly.transform(4326) if containment == \"overlaps\": if not footprint.intersects(poly):", "value @property def srid(self): \"\"\" Tries to find the correct integer SRID for", "s: s.is_x, self)) @property def has_y(self): return any(map(lambda s: s.is_y, self)) @property def", "not footprint.within(poly): return False return True def _check_subset(self, subset): if not isinstance(subset, Subset):", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "\"Trim\", \"Slice\"] logger = logging.getLogger(__name__) class Subsets(list): \"\"\" Convenience class to handle a", "0.0) bbox[0] = extent[0] + l * (extent[2] - extent[0]) if subset.high is", "if axis not in all_axes: raise InvalidAxisLabelException(axis) self.axis = axis @property def is_temporal(self):", "OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- import logging from django.contrib.gis.geos import", "# #------------------------------------------------------------------------------- # Copyright (C) 2013 EOX IT Services GmbH # # Permission", "value or end_time < value: return False elif low is None and high", "= qs.filter( begin_time__lte=high ) if low is not None: qs = qs.filter( end_time__gte=low", "None: bbox[1] = max(subset.low, bbox[1]) if subset.high is not None: bbox[3] = min(subset.high,", "is_y(self): return self.axis in y_axes class Slice(Subset): def __init__(self, axis, value): super(Slice, self).__init__(axis)", "def is_temporal(self): return self.axis in temporal_axes @property def is_x(self): return self.axis in x_axes", "= Line( (value, max_extent[1]), (value, max_extent[3]) ) else: line = Line( (max_extent[0], value),", "if subset.high is not None: bbox[3] = min(subset.high, bbox[3]) if subset_srid is None:", "elif subset.is_y: if subset.low is not None: l = max(float(subset.low) / float(size_y), 0.0)", "(value, max_extent[3]) ) else: line = LineString( (max_extent[0], value), (max_extent[2], value) ) line.srid", "value), (max_extent[2], value) ) line.srid = srid if srid != 4326: line.transform(4326) if", "whether or not an axis is a temporal one. \"\"\" return (axis.lower() in", "if crs is not None: srid = crss.parseEPSGCode(crs, (crss.fromURL, crss.fromURN, crss.fromShortCode) ) if", "with containment '%s'.\" % (bbox, containment) ) poly = Polygon.from_bbox(bbox) poly.srid = srid", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "float(size_x), 0.0) bbox[0] = extent[0] + l * (extent[2] - extent[0]) if subset.high", "sell # copies of the Software, and to permit persons to whom the", "(max_extent[2], value) ) line.srid = srid if srid != 4326: line.transform(4326) if not", "end_time__gte=value ) else: if high is not None: qs = qs.filter( begin_time__lte=high )", "self)) @property def crs(self): return self._crs @crs.setter def crs(self, value): self._crs = value", "self.axis, self.low, self.high ) temporal_axes = (\"t\", \"time\", \"phenomenontime\") x_axes = (\"x\", \"lon\",", "size_y = coverage.size footprint = coverage.footprint subset_srid = self.srid if subset_srid is None:", "bounds must be strictly contained if containment == \"contains\": if high is not", "super(Trim, self).__init__(axis) if low is not None and high is not None and", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "Returns whether or not an axis is a temporal one. \"\"\" return (axis.lower()", "subset is not allowed.\" ) if self.has_x and subset.is_x: raise InvalidSubsettingException( \"Multiple subsets", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "= subset.low high = subset.high if subset.is_temporal: if is_slice: if begin_time > value", "files (the \"Software\"), to deal # in the Software without restriction, including without", "- l * (extent[3] - extent[1]) else: if subset.is_x: if subset.low is not", "if self.has_x and subset.is_x: raise InvalidSubsettingException( \"Multiple subsets for X-axis given.\" ) if", "if subset.is_x: if subset.low is not None: l = max(float(subset.low) / float(size_x), 0.0)", "import logging from django.contrib.gis.geos import Polygon, LineString from eoxserver.resources.coverages import crss from eoxserver.services.exceptions", "poly.srid = srid if srid != 4326: poly.transform(4326) if containment == \"overlaps\": qs", "following conditions: # # The above copyright notice and this permission notice shall", "else: poly = Polygon.from_bbox(bbox) poly.srid = subset_srid return poly class Subset(object): def __init__(self,", "\"Multiple subsets for time-axis given.\" ) @property def xy_bbox(self): \"\"\" Returns the minimum", "= subset.low bbox[2] = subset.high else: bbox[0] = bbox[2] = subset.value elif subset.is_y:", "this Software or works derived from this Software. # # THE SOFTWARE IS", "for subset in self: if not isinstance(subset, Trim) or subset.is_temporal: continue if subset_srid", "line = Line( (max_extent[0], value), (max_extent[2], value) ) line.srid = srid if srid", "coverages CRS if subset.is_x: if subset.low is not None: l = max(float(subset.low) /", "bbox[1]) if subset.high is not None: bbox[3] = min(subset.high, bbox[3]) if subset_srid is", "None: bbox[3] = min(subset.high, bbox[3]) if subset_srid is None: poly = Polygon.from_bbox(bbox) poly.srid", "List API def extend(self, iterable): for subset in iterable: self._check_subset(subset) super(Subsets, self).append(subset) def", "poly class Subset(object): def __init__(self, axis): axis = axis.lower() if axis not in", "if not line.intersects(footprint): return False else: if subset.is_x: bbox[0] = subset.low bbox[2] =", "# $Id$ # # Project: EOxServer <http://eoxserver.org> # Authors: <NAME> <<EMAIL>> # #-------------------------------------------------------------------------------", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "for time-axis given.\" ) @property def xy_bbox(self): \"\"\" Returns the minimum bounding box", "permission notice shall be included in all # copies of this Software or", "InvalidAxisLabelException, InvalidSubsettingException ) __all__ = [\"Subsets\", \"Trim\", \"Slice\"] logger = logging.getLogger(__name__) class Subsets(list):", "copyright notice and this permission notice shall be included in all # copies", "srid is None: srid = 4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) footprint", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "value = subset.value elif isinstance(subset, Trim): is_slice = False low = subset.low high", "= [\"Subsets\", \"Trim\", \"Slice\"] logger = logging.getLogger(__name__) class Subsets(list): \"\"\" Convenience class to", "else: line = Line( (max_extent[0], value), (max_extent[2], value) ) line.srid = srid if", "self.axis in x_axes @property def is_y(self): return self.axis in y_axes class Slice(Subset): def", "y_axes class Slice(Subset): def __init__(self, axis, value): super(Slice, self).__init__(axis) self.value = value def", "for Y-axis given.\" ) if self.has_t and subset.is_temporal: raise InvalidSubsettingException( \"Multiple subsets for", "None] for subset in self: if subset.is_x: if isinstance(subset, Trim): bbox[0] = subset.low", "Convenience class to handle a variety of spatial and/or temporal subsets. \"\"\" def", "* (extent[3] - extent[1]) if subset.high is not None: l = max(float(subset.high) /", "allowed_types is not None else ( Trim, Slice ) # Do a manual", "THE SOFTWARE. #------------------------------------------------------------------------------- import logging from django.contrib.gis.geos import Polygon, LineString from eoxserver.resources.coverages import", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "self)) @property def has_y(self): return any(map(lambda s: s.is_y, self)) @property def has_t(self): return", "class Subset(object): def __init__(self, axis): axis = axis.lower() if axis not in all_axes:", "subsets for X-axis given.\" ) if self.has_y and subset.is_y: raise InvalidSubsettingException( \"Multiple subsets", "\"long\") y_axes = (\"y\", \"lat\") z_axes = (\"z\", \"height\") all_axes = temporal_axes +", "poly.transform(4326) if containment == \"overlaps\": if not footprint.intersects(poly): return False elif containment ==", "containment == \"overlaps\": qs = qs.filter(footprint__intersects=poly) elif containment == \"contains\": qs = qs.filter(footprint__within=poly)", "crss.parseEPSGCode(crs, (crss.fromURL, crss.fromURN, crss.fromShortCode) ) if srid is None and not crss.is_image_crs(crs): raise", "is not None else v[1], zip(bbox, max_extent) ) bbox[0] -= tolerance; bbox[1] -=", "qs = qs.filter( end_time__gte=low ) # check if the temporal bounds must be", "self.has_x and subset.is_x: raise InvalidSubsettingException( \"Multiple subsets for X-axis given.\" ) if self.has_y", "= qs.filter( begin_time__lte=value, end_time__gte=value ) else: if high is not None: qs =", "InvalidSubsettingException( \"Supplied subset is not allowed.\" ) if self.has_x and subset.is_x: raise InvalidSubsettingException(", "= True value = subset.value elif isinstance(subset, Trim): is_slice = False low =", "bbox[2] = subset.value elif subset.is_y: if isinstance(subset, Trim): bbox[1] = subset.low bbox[3] =", "l = max(float(subset.low) / float(size_x), 0.0) bbox[0] = extent[0] + l * (extent[2]", "not None: l = max(float(subset.high) / float(size_y), 0.0) bbox[3] = extent[3] - l", "zip(bbox, max_extent) ) bbox[0] -= tolerance; bbox[1] -= tolerance bbox[2] += tolerance; bbox[3]", "ValueError(\"Supplied argument is not a subset.\") if not isinstance(subset, self.allowed_types): raise InvalidSubsettingException( \"Supplied", "max_extent[3]) ) else: line = Line( (max_extent[0], value), (max_extent[2], value) ) line.srid =", "eo_object.begin_time end_time = eo_object.end_time for subset in self: if isinstance(subset, Slice): is_slice =", "CRS if subset.is_x: if subset.low is not None: l = max(float(subset.low) / float(size_x),", "\"Slice\"] logger = logging.getLogger(__name__) class Subsets(list): \"\"\" Convenience class to handle a variety", "box for all X and Y subsets. \"\"\" bbox = [None, None, None,", "= subset_srid return poly class Subset(object): def __init__(self, axis): axis = axis.lower() if", "None: l = max(float(subset.low) / float(size_y), 0.0) bbox[1] = extent[3] - l *", "None and high is not None: if begin_time > high: return False elif", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "!= 4326: poly.transform(4326) if containment == \"overlaps\": qs = qs.filter(footprint__intersects=poly) elif containment ==", "bounds: lower bound greater than upper bound.\" ) self.low = low self.high =", "and high is None: if end_time < low: return False else: if begin_time", "be strictly contained if containment == \"contains\": if high is not None: qs", "isinstance(subset, Trim) or subset.is_temporal: continue if subset_srid is None: # transform coordinates from", "be included in all # copies of this Software or works derived from", "high = subset.high if subset.is_temporal: if is_slice: qs = qs.filter( begin_time__lte=value, end_time__gte=value )", "srid is None and not crss.is_image_crs(crs): raise InvalidSubsettingException( \"Could not parse EPSG code", "stuff @property def has_x(self): return any(map(lambda s: s.is_x, self)) @property def has_y(self): return", "* (extent[2] - extent[0]) if subset.high is not None: l = max(float(subset.high) /", "else: if high is not None: qs = qs.filter( begin_time__lte=high ) if low", "size_x, size_y = coverage.size footprint = coverage.footprint subset_srid = self.srid if subset_srid is", "subset.high if subset.is_temporal: if is_slice: if begin_time > value or end_time < value:", "qs.filter(footprint__intersects=poly) elif containment == \"contains\": qs = qs.filter(footprint__within=poly) return qs def matches(self, eo_object,", "to the following conditions: # # The above copyright notice and this permission", "axis): axis = axis.lower() if axis not in all_axes: raise InvalidAxisLabelException(axis) self.axis =", "y_axes = (\"y\", \"lat\") z_axes = (\"z\", \"height\") all_axes = temporal_axes + x_axes", "return False else: if begin_time > high or end_time < low: return False", "max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) for subset in self: if isinstance(subset, Slice):", "from imageCRS to coverages CRS if subset.is_x: if subset.low is not None: l", ") bbox[0] -= tolerance; bbox[1] -= tolerance bbox[2] += tolerance; bbox[3] += tolerance", "end_time < low: return False else: if begin_time > high or end_time <", "lower bound greater than upper bound.\" ) self.low = low self.high = high", "not a subset.\") if not isinstance(subset, self.allowed_types): raise InvalidSubsettingException( \"Supplied subset is not", "\"contains\": if high is not None: qs = qs.filter( end_time__lte=high ) if low", "* (extent[2] - extent[0]) elif subset.is_y: if subset.low is not None: l =", "InvalidAxisLabelException(axis) self.axis = axis @property def is_temporal(self): return self.axis in temporal_axes @property def", "Software is # furnished to do so, subject to the following conditions: #", "high is not None: if begin_time > high: return False elif low is", "max(float(subset.low) / float(size_y), 0.0) bbox[1] = extent[3] - l * (extent[3] - extent[1])", "qs = qs.filter(footprint__within=poly) return qs def matches(self, eo_object, containment=\"overlaps\"): if not len(self): return", "qs.filter( begin_time__gte=low ) else: if is_slice: if subset.is_x: line = LineString( (value, max_extent[1]),", "handle a variety of spatial and/or temporal subsets. \"\"\" def __init__(self, iterable, crs=None,", "= crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) footprint = eo_object.footprint begin_time = eo_object.begin_time end_time =", "raise InvalidAxisLabelException(axis) self.axis = axis @property def is_temporal(self): return self.axis in temporal_axes @property", "not len(self): return True bbox = [None, None, None, None] srid = self.srid", "return srid return None def filter(self, queryset, containment=\"overlaps\"): if not len(self): return queryset", "self.srid if subset_srid is None: bbox = list(extent) else: bbox = list(footprint.extent) for", "None: l = max(float(subset.low) / float(size_x), 0.0) bbox[0] = extent[0] + l *", "extent[3] - l * (extent[3] - extent[1]) else: if subset.is_x: if subset.low is", "= subset.high if subset.is_temporal: if is_slice: if begin_time > value or end_time <", "2013 EOX IT Services GmbH # # Permission is hereby granted, free of", "\"lon\", \"long\") y_axes = (\"y\", \"lat\") z_axes = (\"z\", \"height\") all_axes = temporal_axes", "code from URI '%s'\" % crs ) return srid return None def filter(self,", "not None: qs = qs.filter( end_time__lte=high ) if low is not None: qs", "in iterable: self.append(subset) self._crs = crs # List API def extend(self, iterable): for", "is None and high is not None: if begin_time > high: return False", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "not None and high is not None and low > high: raise InvalidSubsettingException(", "self.srid if srid is None: srid = 4326 max_extent = crss.crs_bounds(srid) tolerance =", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "the following conditions: # # The above copyright notice and this permission notice", "if subset.low is not None: bbox[0] = max(subset.low, bbox[0]) if subset.high is not", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "is not None and high is not None and low > high: raise", "shall be included in all # copies of this Software or works derived", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "+ z_axes def is_temporal(axis): \"\"\" Returns whether or not an axis is a", "\"lat\") z_axes = (\"z\", \"height\") all_axes = temporal_axes + x_axes + y_axes +", "time-axis given.\" ) @property def xy_bbox(self): \"\"\" Returns the minimum bounding box for", "coverage): srid = coverage.srid extent = coverage.extent size_x, size_y = coverage.size footprint =", "return qs def matches(self, eo_object, containment=\"overlaps\"): if not len(self): return True bbox =", "None: if end_time < low: return False else: if begin_time > high or", "in x_axes @property def is_y(self): return self.axis in y_axes class Slice(Subset): def __init__(self,", "and subset.is_x: raise InvalidSubsettingException( \"Multiple subsets for X-axis given.\" ) if self.has_y and", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "to do so, subject to the following conditions: # # The above copyright", "IT Services GmbH # # Permission is hereby granted, free of charge, to", "bbox[3] += tolerance logger.debug( \"Applying BBox %s with containment '%s'.\" % (bbox, containment)", "return poly class Subset(object): def __init__(self, axis): axis = axis.lower() if axis not", "def crs(self): return self._crs @crs.setter def crs(self, value): self._crs = value @property def", "eoxserver.resources.coverages import crss from eoxserver.services.exceptions import ( InvalidAxisLabelException, InvalidSubsettingException ) __all__ = [\"Subsets\",", "class to handle a variety of spatial and/or temporal subsets. \"\"\" def __init__(self,", "in self: if not isinstance(subset, Trim) or subset.is_temporal: continue if subset_srid is None:", "l = max(float(subset.low) / float(size_y), 0.0) bbox[1] = extent[3] - l * (extent[3]", "+= tolerance logger.debug( \"Applying BBox %s with containment '%s'.\" % (bbox, containment) )", "+= tolerance; bbox[3] += tolerance logger.debug( \"Applying BBox %s with containment '%s'.\" %", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "(extent[2] - extent[0]) elif subset.is_y: if subset.low is not None: l = max(float(subset.low)", "queryset qs = queryset bbox = [None, None, None, None] srid = self.srid", "srid = crss.parseEPSGCode(crs, (crss.fromURL, crss.fromURN, crss.fromShortCode) ) if srid is None and not", "subset.is_temporal: if is_slice: if begin_time > value or end_time < value: return False", "subset.is_temporal: continue if subset_srid is None: # transform coordinates from imageCRS to coverages", "of spatial and/or temporal subsets. \"\"\" def __init__(self, iterable, crs=None, allowed_types=None): \"\"\" Constructor.", "srid = self.srid if srid is None: srid = 4326 max_extent = crss.crs_bounds(srid)", "and subset.is_y: raise InvalidSubsettingException( \"Multiple subsets for Y-axis given.\" ) if self.has_t and", ") __all__ = [\"Subsets\", \"Trim\", \"Slice\"] logger = logging.getLogger(__name__) class Subsets(list): \"\"\" Convenience", "__repr__(self): return \"Trim: %s[%s:%s]\" % ( self.axis, self.low, self.high ) temporal_axes = (\"t\",", "if high is not None: qs = qs.filter( end_time__lte=high ) if low is", "@property def has_t(self): return any(map(lambda s: s.is_temporal, self)) @property def crs(self): return self._crs", "else: bbox = list(footprint.extent) for subset in self: if not isinstance(subset, Trim) or", "bbox[3] = subset.high if bbox != [None, None, None, None]: bbox = map(", "line = LineString( (value, max_extent[1]), (value, max_extent[3]) ) else: line = LineString( (max_extent[0],", "is not None: bbox[3] = min(subset.high, bbox[3]) if subset_srid is None: poly =", "subset in iterable: self._check_subset(subset) super(Subsets, self).append(subset) def append(self, subset): self._check_subset(subset) super(Subsets, self).append(subset) def", "> high or end_time < low: return False else: if is_slice: if subset.is_x:", "has_x(self): return any(map(lambda s: s.is_x, self)) @property def has_y(self): return any(map(lambda s: s.is_y,", "\"overlaps\": qs = qs.filter(footprint__intersects=poly) elif containment == \"contains\": qs = qs.filter(footprint__within=poly) return qs", "is not None: qs = qs.filter( begin_time__lte=high ) if low is not None:", "subset.is_x: raise InvalidSubsettingException( \"Multiple subsets for X-axis given.\" ) if self.has_y and subset.is_y:", "= coverage.footprint subset_srid = self.srid if subset_srid is None: bbox = list(extent) else:", "l * (extent[3] - extent[1]) if subset.high is not None: l = max(float(subset.high)", "if begin_time > high: return False elif low is not None and high", "= qs.filter(footprint__within=poly) return qs def matches(self, eo_object, containment=\"overlaps\"): if not len(self): return True", "isinstance(subset, Trim): bbox[1] = subset.low bbox[3] = subset.high else: bbox[1] = bbox[3] =", "Constructor. Allows to add set the initial subsets \"\"\" self.allowed_types = allowed_types if", "\"\"\" Tries to find the correct integer SRID for the crs. \"\"\" crs", "integrity for subset in iterable: self.append(subset) self._crs = crs # List API def", "None: qs = qs.filter( begin_time__lte=high ) if low is not None: qs =", "any(map(lambda s: s.is_y, self)) @property def has_t(self): return any(map(lambda s: s.is_temporal, self)) @property", "for X-axis given.\" ) if self.has_y and subset.is_y: raise InvalidSubsettingException( \"Multiple subsets for", "InvalidSubsettingException( \"Multiple subsets for time-axis given.\" ) @property def xy_bbox(self): \"\"\" Returns the", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "extent[0]) if subset.high is not None: l = max(float(subset.high) / float(size_x), 0.0) bbox[2]", "Allows to add set the initial subsets \"\"\" self.allowed_types = allowed_types if allowed_types", "end_time < low: return False else: if is_slice: if subset.is_x: line = Line(", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "bound.\" ) self.low = low self.high = high def __repr__(self): return \"Trim: %s[%s:%s]\"", "in y_axes class Slice(Subset): def __init__(self, axis, value): super(Slice, self).__init__(axis) self.value = value", "4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) for subset in self: if isinstance(subset,", "in self: if isinstance(subset, Slice): is_slice = True value = subset.value elif isinstance(subset,", "> high: return False elif low is not None and high is None:", "None else ( Trim, Slice ) # Do a manual insertion here to", "line.transform(4326) if not line.intersects(footprint): return False else: if subset.is_x: bbox[0] = subset.low bbox[2]", "low is None and high is not None: if begin_time > high: return", "is None: bbox = list(extent) else: bbox = list(footprint.extent) for subset in self:", "or end_time < value: return False elif low is None and high is", "elif isinstance(subset, Trim): is_slice = False low = subset.low high = subset.high if", "qs.filter(footprint__intersects=line) else: if subset.is_x: bbox[0] = subset.low bbox[2] = subset.high else: bbox[1] =", "and low > high: raise InvalidSubsettingException( \"Invalid bounds: lower bound greater than upper", "crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) footprint = eo_object.footprint begin_time = eo_object.begin_time end_time = eo_object.end_time", "x_axes + y_axes + z_axes def is_temporal(axis): \"\"\" Returns whether or not an", "subset.low is not None: bbox[0] = max(subset.low, bbox[0]) if subset.high is not None:", "- l * (extent[3] - extent[1]) if subset.high is not None: l =", "def append(self, subset): self._check_subset(subset) super(Subsets, self).append(subset) def insert(self, i, subset): self._check_subset(subset) super(Subsets, self).insert(i,", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "high or end_time < low: return False else: if is_slice: if subset.is_x: line", "is not None: l = max(float(subset.high) / float(size_y), 0.0) bbox[3] = extent[3] -", "= subset.high else: bbox[1] = subset.low bbox[3] = subset.high if bbox != [None,", "%s with containment '%s'.\" % (bbox, containment) ) poly = Polygon.from_bbox(bbox) poly.srid =", "\"\"\" crs = self.crs if crs is not None: srid = crss.parseEPSGCode(crs, (crss.fromURL,", "None: l = max(float(subset.high) / float(size_x), 0.0) bbox[2] = extent[0] + l *", "a manual insertion here to assure integrity for subset in iterable: self.append(subset) self._crs", "@property def crs(self): return self._crs @crs.setter def crs(self, value): self._crs = value @property", "self.high ) temporal_axes = (\"t\", \"time\", \"phenomenontime\") x_axes = (\"x\", \"lon\", \"long\") y_axes", ") line.srid = srid if srid != 4326: line.transform(4326) qs = qs.filter(footprint__intersects=line) else:", "None and not crss.is_image_crs(crs): raise InvalidSubsettingException( \"Could not parse EPSG code from URI", "BBox %s with containment '%s'.\" % (bbox, containment) ) poly = Polygon.from_bbox(bbox) poly.srid", "or end_time < low: return False else: if is_slice: if subset.is_x: line =", "= high def __repr__(self): return \"Trim: %s[%s:%s]\" % ( self.axis, self.low, self.high )", "# in the Software without restriction, including without limitation the rights # to", "4326: poly.transform(4326) if containment == \"overlaps\": if not footprint.intersects(poly): return False elif containment", "0.0) bbox[3] = extent[3] - l * (extent[3] - extent[1]) else: if subset.is_x:", "class Trim(Subset): def __init__(self, axis, low=None, high=None): super(Trim, self).__init__(axis) if low is not", "x_axes @property def is_y(self): return self.axis in y_axes class Slice(Subset): def __init__(self, axis,", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "bbox[2] = subset.high else: bbox[1] = subset.low bbox[3] = subset.high if bbox !=", "def has_t(self): return any(map(lambda s: s.is_temporal, self)) @property def crs(self): return self._crs @crs.setter", "if subset.low is not None: l = max(float(subset.low) / float(size_y), 0.0) bbox[1] =", "insert(self, i, subset): self._check_subset(subset) super(Subsets, self).insert(i, subset) # Subset related stuff @property def", "tolerance bbox[2] += tolerance; bbox[3] += tolerance logger.debug( \"Applying BBox %s with containment", "@property def is_x(self): return self.axis in x_axes @property def is_y(self): return self.axis in", "subsets. \"\"\" def __init__(self, iterable, crs=None, allowed_types=None): \"\"\" Constructor. Allows to add set", "qs = qs.filter( begin_time__gte=low ) else: if is_slice: if subset.is_x: line = LineString(", "subset.high is not None: l = max(float(subset.high) / float(size_x), 0.0) bbox[2] = extent[0]", "if not len(self): return queryset qs = queryset bbox = [None, None, None,", "bbox[0] -= tolerance; bbox[1] -= tolerance bbox[2] += tolerance; bbox[3] += tolerance logger.debug(", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "return any(map(lambda s: s.is_temporal, self)) @property def crs(self): return self._crs @crs.setter def crs(self,", "crs=None, allowed_types=None): \"\"\" Constructor. Allows to add set the initial subsets \"\"\" self.allowed_types", "for the crs. \"\"\" crs = self.crs if crs is not None: srid", "coverage.extent size_x, size_y = coverage.size footprint = coverage.footprint subset_srid = self.srid if subset_srid", "minimum bounding box for all X and Y subsets. \"\"\" bbox = [None,", "srid if srid != 4326: line.transform(4326) if not line.intersects(footprint): return False else: if", "if subset.is_x: if isinstance(subset, Trim): bbox[0] = subset.low bbox[2] = subset.high else: bbox[0]", "(C) 2013 EOX IT Services GmbH # # Permission is hereby granted, free", "None def filter(self, queryset, containment=\"overlaps\"): if not len(self): return queryset qs = queryset", "< value: return False elif low is None and high is not None:", "subset.high else: bbox[1] = subset.low bbox[3] = subset.high if bbox != [None, None,", "@property def xy_bbox(self): \"\"\" Returns the minimum bounding box for all X and", "\"\"\" Returns whether or not an axis is a temporal one. \"\"\" return", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "value def __repr__(self): return \"Slice: %s[%s]\" % (self.axis, self.value) class Trim(Subset): def __init__(self,", "return self.axis in y_axes class Slice(Subset): def __init__(self, axis, value): super(Slice, self).__init__(axis) self.value", "\"time\", \"phenomenontime\") x_axes = (\"x\", \"lon\", \"long\") y_axes = (\"y\", \"lat\") z_axes =", "# Authors: <NAME> <<EMAIL>> # #------------------------------------------------------------------------------- # Copyright (C) 2013 EOX IT Services", "crs(self): return self._crs @crs.setter def crs(self, value): self._crs = value @property def srid(self):", "or subset.is_temporal: continue if subset_srid is None: # transform coordinates from imageCRS to", "tolerance; bbox[1] -= tolerance bbox[2] += tolerance; bbox[3] += tolerance logger.debug( \"Applying BBox", "not None: qs = qs.filter( begin_time__gte=low ) else: if is_slice: if subset.is_x: line", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "elif low is None and high is not None: if begin_time > high:", "not None and high is None: if end_time < low: return False else:", "footprint.within(poly): return False return True def _check_subset(self, subset): if not isinstance(subset, Subset): raise", "not None: srid = crss.parseEPSGCode(crs, (crss.fromURL, crss.fromURN, crss.fromShortCode) ) if srid is None", "min(subset.high, bbox[2]) if subset.is_y: if subset.low is not None: bbox[1] = max(subset.low, bbox[1])", "srid != 4326: line.transform(4326) if not line.intersects(footprint): return False else: if subset.is_x: bbox[0]", "all_axes: raise InvalidAxisLabelException(axis) self.axis = axis @property def is_temporal(self): return self.axis in temporal_axes", "(\"y\", \"lat\") z_axes = (\"z\", \"height\") all_axes = temporal_axes + x_axes + y_axes", "not None: l = max(float(subset.high) / float(size_x), 0.0) bbox[2] = extent[0] + l", "= self.srid if srid is None: srid = 4326 max_extent = crss.crs_bounds(srid) tolerance", "False elif low is not None and high is None: if end_time <", "not footprint.intersects(poly): return False elif containment == \"contains\": if not footprint.within(poly): return False", "logger = logging.getLogger(__name__) class Subsets(list): \"\"\" Convenience class to handle a variety of", "Permission is hereby granted, free of charge, to any person obtaining a copy", "EPSG code from URI '%s'\" % crs ) return srid return None def", ") else: if high is not None: qs = qs.filter( begin_time__lte=high ) if", "= eo_object.begin_time end_time = eo_object.end_time for subset in self: if isinstance(subset, Slice): is_slice", "bbox[0] = subset.low bbox[2] = subset.high else: bbox[0] = bbox[2] = subset.value elif", "not None: bbox[0] = max(subset.low, bbox[0]) if subset.high is not None: bbox[2] =", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "Software without restriction, including without limitation the rights # to use, copy, modify,", "srid != 4326: line.transform(4326) qs = qs.filter(footprint__intersects=line) else: if subset.is_x: bbox[0] = subset.low", "low = subset.low high = subset.high if subset.is_temporal: if is_slice: if begin_time >", "line = Line( (value, max_extent[1]), (value, max_extent[3]) ) else: line = Line( (max_extent[0],", "is None: poly = Polygon.from_bbox(bbox) poly.srid = srid else: poly = Polygon.from_bbox(bbox) poly.srid", "low is not None and high is None: if end_time < low: return", "# The above copyright notice and this permission notice shall be included in", "\"\"\" Returns the minimum bounding box for all X and Y subsets. \"\"\"", "return self.axis in temporal_axes @property def is_x(self): return self.axis in x_axes @property def", "# of this software and associated documentation files (the \"Software\"), to deal #", "= [None, None, None, None] for subset in self: if subset.is_x: if isinstance(subset,", "self._crs @crs.setter def crs(self, value): self._crs = value @property def srid(self): \"\"\" Tries", "subset.is_x: if subset.low is not None: bbox[0] = max(subset.low, bbox[0]) if subset.high is", "subset.value return bbox def bounding_polygon(self, coverage): srid = coverage.srid extent = coverage.extent size_x,", "append(self, subset): self._check_subset(subset) super(Subsets, self).append(subset) def insert(self, i, subset): self._check_subset(subset) super(Subsets, self).insert(i, subset)", "API def extend(self, iterable): for subset in iterable: self._check_subset(subset) super(Subsets, self).append(subset) def append(self,", "is_slice: if subset.is_x: line = LineString( (value, max_extent[1]), (value, max_extent[3]) ) else: line", "return any(map(lambda s: s.is_x, self)) @property def has_y(self): return any(map(lambda s: s.is_y, self))", "def __init__(self, axis, value): super(Slice, self).__init__(axis) self.value = value def __repr__(self): return \"Slice:", "self)) @property def has_t(self): return any(map(lambda s: s.is_temporal, self)) @property def crs(self): return", "srid = 4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) footprint = eo_object.footprint begin_time", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "value: return False elif low is None and high is not None: if", "def srid(self): \"\"\" Tries to find the correct integer SRID for the crs.", "= LineString( (max_extent[0], value), (max_extent[2], value) ) line.srid = srid if srid !=", "low is not None: qs = qs.filter( begin_time__gte=low ) else: if is_slice: if", "\"\"\" def __init__(self, iterable, crs=None, allowed_types=None): \"\"\" Constructor. Allows to add set the", "= subset.low bbox[3] = subset.high if bbox != [None, None, None, None]: bbox", "= srid else: poly = Polygon.from_bbox(bbox) poly.srid = subset_srid return poly class Subset(object):", "= bbox[3] = subset.value return bbox def bounding_polygon(self, coverage): srid = coverage.srid extent", "super(Subsets, self).append(subset) def insert(self, i, subset): self._check_subset(subset) super(Subsets, self).insert(i, subset) # Subset related", "\"Multiple subsets for Y-axis given.\" ) if self.has_t and subset.is_temporal: raise InvalidSubsettingException( \"Multiple", "= subset.high else: bbox[0] = bbox[2] = subset.value elif subset.is_y: if isinstance(subset, Trim):", "(value, max_extent[1]), (value, max_extent[3]) ) else: line = LineString( (max_extent[0], value), (max_extent[2], value)", "the crs. \"\"\" crs = self.crs if crs is not None: srid =", "qs.filter( end_time__gte=low ) # check if the temporal bounds must be strictly contained", "bbox[0] = max(subset.low, bbox[0]) if subset.high is not None: bbox[2] = min(subset.high, bbox[2])", "not None: qs = qs.filter( begin_time__lte=high ) if low is not None: qs", "def __repr__(self): return \"Slice: %s[%s]\" % (self.axis, self.value) class Trim(Subset): def __init__(self, axis,", "return None def filter(self, queryset, containment=\"overlaps\"): if not len(self): return queryset qs =", "(self.axis, self.value) class Trim(Subset): def __init__(self, axis, low=None, high=None): super(Trim, self).__init__(axis) if low", "bbox[0] = extent[0] + l * (extent[2] - extent[0]) if subset.high is not", "return False else: if subset.is_x: bbox[0] = subset.low bbox[2] = subset.high else: bbox[1]", "= crss.crs_tolerance(srid) for subset in self: if isinstance(subset, Slice): is_slice = True value", "% (bbox, containment) ) poly = Polygon.from_bbox(bbox) poly.srid = srid if srid !=", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "@property def has_x(self): return any(map(lambda s: s.is_x, self)) @property def has_y(self): return any(map(lambda", "None and high is None: if end_time < low: return False else: if", "if is_slice: qs = qs.filter( begin_time__lte=value, end_time__gte=value ) else: if high is not", "= max(float(subset.high) / float(size_x), 0.0) bbox[2] = extent[0] + l * (extent[2] -", "queryset, containment=\"overlaps\"): if not len(self): return queryset qs = queryset bbox = [None,", "= False low = subset.low high = subset.high if subset.is_temporal: if is_slice: if", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "is not None: bbox[1] = max(subset.low, bbox[1]) if subset.high is not None: bbox[3]", "/ float(size_y), 0.0) bbox[3] = extent[3] - l * (extent[3] - extent[1]) else:", "def has_y(self): return any(map(lambda s: s.is_y, self)) @property def has_t(self): return any(map(lambda s:", "SOFTWARE. #------------------------------------------------------------------------------- import logging from django.contrib.gis.geos import Polygon, LineString from eoxserver.resources.coverages import crss", "if is_slice: if subset.is_x: line = Line( (value, max_extent[1]), (value, max_extent[3]) ) else:", "bbox[3] = extent[3] - l * (extent[3] - extent[1]) else: if subset.is_x: if", "Services GmbH # # Permission is hereby granted, free of charge, to any", "None and low > high: raise InvalidSubsettingException( \"Invalid bounds: lower bound greater than", "= crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) for subset in self: if isinstance(subset, Slice): is_slice", "containment) ) poly = Polygon.from_bbox(bbox) poly.srid = srid if srid != 4326: poly.transform(4326)", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "= srid if srid != 4326: poly.transform(4326) if containment == \"overlaps\": qs =", "included in all # copies of this Software or works derived from this", "of this software and associated documentation files (the \"Software\"), to deal # in", "= axis.lower() if axis not in all_axes: raise InvalidAxisLabelException(axis) self.axis = axis @property", "subset.high if bbox != [None, None, None, None]: bbox = map( lambda v:", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "subset.high is not None: bbox[2] = min(subset.high, bbox[2]) if subset.is_y: if subset.low is", "def bounding_polygon(self, coverage): srid = coverage.srid extent = coverage.extent size_x, size_y = coverage.size", "isinstance(subset, Subset): raise ValueError(\"Supplied argument is not a subset.\") if not isinstance(subset, self.allowed_types):", "None: qs = qs.filter( begin_time__gte=low ) else: if is_slice: if subset.is_x: line =", "extent[0] + l * (extent[2] - extent[0]) if subset.high is not None: l", "OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- import logging", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "= subset.low high = subset.high if subset.is_temporal: if is_slice: qs = qs.filter( begin_time__lte=value,", "high = subset.high if subset.is_temporal: if is_slice: if begin_time > value or end_time", "qs = qs.filter( end_time__lte=high ) if low is not None: qs = qs.filter(", "and this permission notice shall be included in all # copies of this", "+ l * (extent[2] - extent[0]) elif subset.is_y: if subset.low is not None:", "- extent[1]) if subset.high is not None: l = max(float(subset.high) / float(size_y), 0.0)", "= max(float(subset.low) / float(size_y), 0.0) bbox[1] = extent[3] - l * (extent[3] -", "= subset.high if bbox != [None, None, None, None]: bbox = map( lambda", "from eoxserver.resources.coverages import crss from eoxserver.services.exceptions import ( InvalidAxisLabelException, InvalidSubsettingException ) __all__ =", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "the Software without restriction, including without limitation the rights # to use, copy,", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "v[0] if v[0] is not None else v[1], zip(bbox, max_extent) ) bbox[0] -=", "person obtaining a copy # of this software and associated documentation files (the", "subset.low high = subset.high if subset.is_temporal: if is_slice: qs = qs.filter( begin_time__lte=value, end_time__gte=value", "return self.axis in x_axes @property def is_y(self): return self.axis in y_axes class Slice(Subset):", "if end_time < low: return False else: if begin_time > high or end_time", "extent[1]) if subset.high is not None: l = max(float(subset.high) / float(size_y), 0.0) bbox[3]", "@property def srid(self): \"\"\" Tries to find the correct integer SRID for the", "is None: if end_time < low: return False else: if begin_time > high", "is_slice = False low = subset.low high = subset.high if subset.is_temporal: if is_slice:", "subset.low is not None: l = max(float(subset.low) / float(size_y), 0.0) bbox[1] = extent[3]", "bbox[1] = subset.low bbox[3] = subset.high else: bbox[1] = bbox[3] = subset.value return", "given.\" ) if self.has_y and subset.is_y: raise InvalidSubsettingException( \"Multiple subsets for Y-axis given.\"", "upper bound.\" ) self.low = low self.high = high def __repr__(self): return \"Trim:", "% ( self.axis, self.low, self.high ) temporal_axes = (\"t\", \"time\", \"phenomenontime\") x_axes =", "subset.is_y: if subset.low is not None: l = max(float(subset.low) / float(size_y), 0.0) bbox[1]", "bbox[3] = subset.high else: bbox[1] = bbox[3] = subset.value return bbox def bounding_polygon(self,", "(crss.fromURL, crss.fromURN, crss.fromShortCode) ) if srid is None and not crss.is_image_crs(crs): raise InvalidSubsettingException(", "persons to whom the Software is # furnished to do so, subject to", "a variety of spatial and/or temporal subsets. \"\"\" def __init__(self, iterable, crs=None, allowed_types=None):", "\"Could not parse EPSG code from URI '%s'\" % crs ) return srid", "conditions: # # The above copyright notice and this permission notice shall be", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "transform coordinates from imageCRS to coverages CRS if subset.is_x: if subset.low is not", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "subset.high else: bbox[1] = bbox[3] = subset.value return bbox def bounding_polygon(self, coverage): srid", "return True def _check_subset(self, subset): if not isinstance(subset, Subset): raise ValueError(\"Supplied argument is", "LineString( (value, max_extent[1]), (value, max_extent[3]) ) else: line = LineString( (max_extent[0], value), (max_extent[2],", "of charge, to any person obtaining a copy # of this software and", "None, None, None]: bbox = map( lambda v: v[0] if v[0] is not", "must be strictly contained if containment == \"contains\": if high is not None:", "= coverage.srid extent = coverage.extent size_x, size_y = coverage.size footprint = coverage.footprint subset_srid", "subset in iterable: self.append(subset) self._crs = crs # List API def extend(self, iterable):", "None: if begin_time > high: return False elif low is not None and", "return self._crs @crs.setter def crs(self, value): self._crs = value @property def srid(self): \"\"\"", "begin_time__lte=high ) if low is not None: qs = qs.filter( end_time__gte=low ) #", "is_slice: if subset.is_x: line = Line( (value, max_extent[1]), (value, max_extent[3]) ) else: line", "self.low, self.high ) temporal_axes = (\"t\", \"time\", \"phenomenontime\") x_axes = (\"x\", \"lon\", \"long\")", "if containment == \"overlaps\": qs = qs.filter(footprint__intersects=poly) elif containment == \"contains\": qs =", "for subset in self: if isinstance(subset, Slice): is_slice = True value = subset.value", "True value = subset.value elif isinstance(subset, Trim): is_slice = False low = subset.low", "0.0) bbox[2] = extent[0] + l * (extent[2] - extent[0]) elif subset.is_y: if", "= max(float(subset.low) / float(size_x), 0.0) bbox[0] = extent[0] + l * (extent[2] -", "!= 4326: line.transform(4326) if not line.intersects(footprint): return False else: if subset.is_x: bbox[0] =", "self._crs = crs # List API def extend(self, iterable): for subset in iterable:", "= subset.value elif subset.is_y: if isinstance(subset, Trim): bbox[1] = subset.low bbox[3] = subset.high", "bbox[2] = subset.high else: bbox[0] = bbox[2] = subset.value elif subset.is_y: if isinstance(subset,", "integer SRID for the crs. \"\"\" crs = self.crs if crs is not", "and associated documentation files (the \"Software\"), to deal # in the Software without", "else: bbox[0] = bbox[2] = subset.value elif subset.is_y: if isinstance(subset, Trim): bbox[1] =", "containment == \"overlaps\": if not footprint.intersects(poly): return False elif containment == \"contains\": if", "spatial and/or temporal subsets. \"\"\" def __init__(self, iterable, crs=None, allowed_types=None): \"\"\" Constructor. Allows", "if isinstance(subset, Trim): bbox[1] = subset.low bbox[3] = subset.high else: bbox[1] = bbox[3]", "= qs.filter(footprint__intersects=poly) elif containment == \"contains\": qs = qs.filter(footprint__within=poly) return qs def matches(self,", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "allowed_types=None): \"\"\" Constructor. Allows to add set the initial subsets \"\"\" self.allowed_types =", "the minimum bounding box for all X and Y subsets. \"\"\" bbox =", "else v[1], zip(bbox, max_extent) ) bbox[0] -= tolerance; bbox[1] -= tolerance bbox[2] +=", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "the temporal bounds must be strictly contained if containment == \"contains\": if high", "value): self._crs = value @property def srid(self): \"\"\" Tries to find the correct", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "if not isinstance(subset, self.allowed_types): raise InvalidSubsettingException( \"Supplied subset is not allowed.\" ) if", ") return srid return None def filter(self, queryset, containment=\"overlaps\"): if not len(self): return", "s.is_x, self)) @property def has_y(self): return any(map(lambda s: s.is_y, self)) @property def has_t(self):", "for subset in iterable: self.append(subset) self._crs = crs # List API def extend(self,", "containment == \"contains\": qs = qs.filter(footprint__within=poly) return qs def matches(self, eo_object, containment=\"overlaps\"): if", "subset): if not isinstance(subset, Subset): raise ValueError(\"Supplied argument is not a subset.\") if", "if srid != 4326: line.transform(4326) qs = qs.filter(footprint__intersects=line) else: if subset.is_x: bbox[0] =", "footprint = coverage.footprint subset_srid = self.srid if subset_srid is None: bbox = list(extent)", "else: if begin_time > high or end_time < low: return False else: if", "= allowed_types if allowed_types is not None else ( Trim, Slice ) #", "bbox[1] -= tolerance bbox[2] += tolerance; bbox[3] += tolerance logger.debug( \"Applying BBox %s", "bbox[1] = bbox[3] = subset.value return bbox def bounding_polygon(self, coverage): srid = coverage.srid", "class Subsets(list): \"\"\" Convenience class to handle a variety of spatial and/or temporal", "= subset.low bbox[2] = subset.high else: bbox[1] = subset.low bbox[3] = subset.high if", "< low: return False else: if begin_time > high or end_time < low:", "Subsets(list): \"\"\" Convenience class to handle a variety of spatial and/or temporal subsets.", "logging.getLogger(__name__) class Subsets(list): \"\"\" Convenience class to handle a variety of spatial and/or", "high: raise InvalidSubsettingException( \"Invalid bounds: lower bound greater than upper bound.\" ) self.low", "bbox != [None, None, None, None]: bbox = map( lambda v: v[0] if", "subset): self._check_subset(subset) super(Subsets, self).append(subset) def insert(self, i, subset): self._check_subset(subset) super(Subsets, self).insert(i, subset) #", "poly.transform(4326) if containment == \"overlaps\": qs = qs.filter(footprint__intersects=poly) elif containment == \"contains\": qs", "crss.crs_tolerance(srid) footprint = eo_object.footprint begin_time = eo_object.begin_time end_time = eo_object.end_time for subset in", "bbox[1] = subset.low bbox[3] = subset.high if bbox != [None, None, None, None]:", ") if self.has_x and subset.is_x: raise InvalidSubsettingException( \"Multiple subsets for X-axis given.\" )", "None, None, None] for subset in self: if subset.is_x: if isinstance(subset, Trim): bbox[0]", "False elif containment == \"contains\": if not footprint.within(poly): return False return True def", "subsets for time-axis given.\" ) @property def xy_bbox(self): \"\"\" Returns the minimum bounding", "high is not None: qs = qs.filter( end_time__lte=high ) if low is not", "low > high: raise InvalidSubsettingException( \"Invalid bounds: lower bound greater than upper bound.\"", "# copies of this Software or works derived from this Software. # #", "not allowed.\" ) if self.has_x and subset.is_x: raise InvalidSubsettingException( \"Multiple subsets for X-axis", "if subset.is_x: line = LineString( (value, max_extent[1]), (value, max_extent[3]) ) else: line =", "# Do a manual insertion here to assure integrity for subset in iterable:", "= Line( (max_extent[0], value), (max_extent[2], value) ) line.srid = srid if srid !=", "matches(self, eo_object, containment=\"overlaps\"): if not len(self): return True bbox = [None, None, None,", "is not None: if begin_time > high: return False elif low is not", "line.srid = srid if srid != 4326: line.transform(4326) if not line.intersects(footprint): return False", "URI '%s'\" % crs ) return srid return None def filter(self, queryset, containment=\"overlaps\"):", "self.allowed_types): raise InvalidSubsettingException( \"Supplied subset is not allowed.\" ) if self.has_x and subset.is_x:", "from django.contrib.gis.geos import Polygon, LineString from eoxserver.resources.coverages import crss from eoxserver.services.exceptions import (", "is_x(self): return self.axis in x_axes @property def is_y(self): return self.axis in y_axes class", "subset.low bbox[2] = subset.high else: bbox[0] = bbox[2] = subset.value elif subset.is_y: if", "= Polygon.from_bbox(bbox) poly.srid = srid else: poly = Polygon.from_bbox(bbox) poly.srid = subset_srid return", "is not None: l = max(float(subset.low) / float(size_y), 0.0) bbox[1] = extent[3] -", "bbox def bounding_polygon(self, coverage): srid = coverage.srid extent = coverage.extent size_x, size_y =", "free of charge, to any person obtaining a copy # of this software", "for all X and Y subsets. \"\"\" bbox = [None, None, None, None]", "crss.fromURN, crss.fromShortCode) ) if srid is None and not crss.is_image_crs(crs): raise InvalidSubsettingException( \"Could", "max_extent[3]) ) else: line = LineString( (max_extent[0], value), (max_extent[2], value) ) line.srid =", "= crss.crs_tolerance(srid) footprint = eo_object.footprint begin_time = eo_object.begin_time end_time = eo_object.end_time for subset", "allowed.\" ) if self.has_x and subset.is_x: raise InvalidSubsettingException( \"Multiple subsets for X-axis given.\"", "begin_time__lte=value, end_time__gte=value ) else: if high is not None: qs = qs.filter( begin_time__lte=high", "Subset related stuff @property def has_x(self): return any(map(lambda s: s.is_x, self)) @property def", "assure integrity for subset in iterable: self.append(subset) self._crs = crs # List API", "def __init__(self, iterable, crs=None, allowed_types=None): \"\"\" Constructor. Allows to add set the initial", "s.is_temporal, self)) @property def crs(self): return self._crs @crs.setter def crs(self, value): self._crs =", "def xy_bbox(self): \"\"\" Returns the minimum bounding box for all X and Y", "= [None, None, None, None] srid = self.srid if srid is None: srid", "argument is not a subset.\") if not isinstance(subset, self.allowed_types): raise InvalidSubsettingException( \"Supplied subset", "= Polygon.from_bbox(bbox) poly.srid = subset_srid return poly class Subset(object): def __init__(self, axis): axis", "not None: bbox[2] = min(subset.high, bbox[2]) if subset.is_y: if subset.low is not None:", "from URI '%s'\" % crs ) return srid return None def filter(self, queryset,", "end_time__lte=high ) if low is not None: qs = qs.filter( begin_time__gte=low ) else:", "%s[%s:%s]\" % ( self.axis, self.low, self.high ) temporal_axes = (\"t\", \"time\", \"phenomenontime\") x_axes", "poly.srid = srid if srid != 4326: poly.transform(4326) if containment == \"overlaps\": if", "in self: if subset.is_x: if isinstance(subset, Trim): bbox[0] = subset.low bbox[2] = subset.high", "Polygon, LineString from eoxserver.resources.coverages import crss from eoxserver.services.exceptions import ( InvalidAxisLabelException, InvalidSubsettingException )", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "is not a subset.\") if not isinstance(subset, self.allowed_types): raise InvalidSubsettingException( \"Supplied subset is", "if not isinstance(subset, Subset): raise ValueError(\"Supplied argument is not a subset.\") if not", "None: srid = 4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) footprint = eo_object.footprint", "-= tolerance bbox[2] += tolerance; bbox[3] += tolerance logger.debug( \"Applying BBox %s with", "(\"t\", \"time\", \"phenomenontime\") x_axes = (\"x\", \"lon\", \"long\") y_axes = (\"y\", \"lat\") z_axes", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "works derived from this Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "\"\"\" Constructor. Allows to add set the initial subsets \"\"\" self.allowed_types = allowed_types", "poly = Polygon.from_bbox(bbox) poly.srid = srid else: poly = Polygon.from_bbox(bbox) poly.srid = subset_srid", "Software, and to permit persons to whom the Software is # furnished to", "extend(self, iterable): for subset in iterable: self._check_subset(subset) super(Subsets, self).append(subset) def append(self, subset): self._check_subset(subset)", "elif subset.is_y: if isinstance(subset, Trim): bbox[1] = subset.low bbox[3] = subset.high else: bbox[1]", "else: line = LineString( (max_extent[0], value), (max_extent[2], value) ) line.srid = srid if", "if begin_time > high or end_time < low: return False else: if is_slice:", "coordinates from imageCRS to coverages CRS if subset.is_x: if subset.low is not None:", "= (\"x\", \"lon\", \"long\") y_axes = (\"y\", \"lat\") z_axes = (\"z\", \"height\") all_axes", "= crss.parseEPSGCode(crs, (crss.fromURL, crss.fromURN, crss.fromShortCode) ) if srid is None and not crss.is_image_crs(crs):", "self.has_y and subset.is_y: raise InvalidSubsettingException( \"Multiple subsets for Y-axis given.\" ) if self.has_t", "subset.is_x: line = Line( (value, max_extent[1]), (value, max_extent[3]) ) else: line = Line(", "eoxserver.services.exceptions import ( InvalidAxisLabelException, InvalidSubsettingException ) __all__ = [\"Subsets\", \"Trim\", \"Slice\"] logger =", "bounding box for all X and Y subsets. \"\"\" bbox = [None, None,", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "== \"contains\": if not footprint.within(poly): return False return True def _check_subset(self, subset): if", "= subset.low bbox[3] = subset.high else: bbox[1] = bbox[3] = subset.value return bbox", "else: if subset.is_x: if subset.low is not None: bbox[0] = max(subset.low, bbox[0]) if", "is not None: qs = qs.filter( begin_time__gte=low ) else: if is_slice: if subset.is_x:", "low=None, high=None): super(Trim, self).__init__(axis) if low is not None and high is not", "check if the temporal bounds must be strictly contained if containment == \"contains\":", "variety of spatial and/or temporal subsets. \"\"\" def __init__(self, iterable, crs=None, allowed_types=None): \"\"\"", "if subset.is_temporal: if is_slice: if begin_time > value or end_time < value: return", "subset.\") if not isinstance(subset, self.allowed_types): raise InvalidSubsettingException( \"Supplied subset is not allowed.\" )", "greater than upper bound.\" ) self.low = low self.high = high def __repr__(self):", "Slice): is_slice = True value = subset.value elif isinstance(subset, Trim): is_slice = False", "begin_time = eo_object.begin_time end_time = eo_object.end_time for subset in self: if isinstance(subset, Slice):", "i, subset): self._check_subset(subset) super(Subsets, self).insert(i, subset) # Subset related stuff @property def has_x(self):", "in the Software without restriction, including without limitation the rights # to use,", "if not footprint.intersects(poly): return False elif containment == \"contains\": if not footprint.within(poly): return", "super(Subsets, self).append(subset) def append(self, subset): self._check_subset(subset) super(Subsets, self).append(subset) def insert(self, i, subset): self._check_subset(subset)", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", ") line.srid = srid if srid != 4326: line.transform(4326) if not line.intersects(footprint): return", "has_y(self): return any(map(lambda s: s.is_y, self)) @property def has_t(self): return any(map(lambda s: s.is_temporal,", "if subset.is_y: if subset.low is not None: bbox[1] = max(subset.low, bbox[1]) if subset.high", "l * (extent[2] - extent[0]) if subset.high is not None: l = max(float(subset.high)", "if srid != 4326: line.transform(4326) if not line.intersects(footprint): return False else: if subset.is_x:", "filter(self, queryset, containment=\"overlaps\"): if not len(self): return queryset qs = queryset bbox =", "subset.high if subset.is_temporal: if is_slice: qs = qs.filter( begin_time__lte=value, end_time__gte=value ) else: if", "not None else v[1], zip(bbox, max_extent) ) bbox[0] -= tolerance; bbox[1] -= tolerance", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "s.is_y, self)) @property def has_t(self): return any(map(lambda s: s.is_temporal, self)) @property def crs(self):", "crss from eoxserver.services.exceptions import ( InvalidAxisLabelException, InvalidSubsettingException ) __all__ = [\"Subsets\", \"Trim\", \"Slice\"]", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "None: bbox[0] = max(subset.low, bbox[0]) if subset.high is not None: bbox[2] = min(subset.high,", "\"Multiple subsets for X-axis given.\" ) if self.has_y and subset.is_y: raise InvalidSubsettingException( \"Multiple", "The above copyright notice and this permission notice shall be included in all", "and/or sell # copies of the Software, and to permit persons to whom", "max(subset.low, bbox[1]) if subset.high is not None: bbox[3] = min(subset.high, bbox[3]) if subset_srid", "not parse EPSG code from URI '%s'\" % crs ) return srid return", "LineString( (max_extent[0], value), (max_extent[2], value) ) line.srid = srid if srid != 4326:", "import crss from eoxserver.services.exceptions import ( InvalidAxisLabelException, InvalidSubsettingException ) __all__ = [\"Subsets\", \"Trim\",", "self._crs = value @property def srid(self): \"\"\" Tries to find the correct integer", "False else: if begin_time > high or end_time < low: return False else:", "v: v[0] if v[0] is not None else v[1], zip(bbox, max_extent) ) bbox[0]", "is_slice: if begin_time > value or end_time < value: return False elif low", "* (extent[3] - extent[1]) else: if subset.is_x: if subset.low is not None: bbox[0]", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "subset_srid is None: # transform coordinates from imageCRS to coverages CRS if subset.is_x:", "bbox[3]) if subset_srid is None: poly = Polygon.from_bbox(bbox) poly.srid = srid else: poly", "bbox[2] = extent[0] + l * (extent[2] - extent[0]) elif subset.is_y: if subset.low", "Copyright (C) 2013 EOX IT Services GmbH # # Permission is hereby granted,", "= (\"t\", \"time\", \"phenomenontime\") x_axes = (\"x\", \"lon\", \"long\") y_axes = (\"y\", \"lat\")", "float(size_y), 0.0) bbox[1] = extent[3] - l * (extent[3] - extent[1]) if subset.high", "# Subset related stuff @property def has_x(self): return any(map(lambda s: s.is_x, self)) @property", "(extent[3] - extent[1]) if subset.high is not None: l = max(float(subset.high) / float(size_y),", "not None else ( Trim, Slice ) # Do a manual insertion here", "# # The above copyright notice and this permission notice shall be included", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "+ l * (extent[2] - extent[0]) if subset.high is not None: l =", "False else: if subset.is_x: bbox[0] = subset.low bbox[2] = subset.high else: bbox[1] =", "None]: bbox = map( lambda v: v[0] if v[0] is not None else", "False low = subset.low high = subset.high if subset.is_temporal: if is_slice: if begin_time", "\"\"\" self.allowed_types = allowed_types if allowed_types is not None else ( Trim, Slice", "Software or works derived from this Software. # # THE SOFTWARE IS PROVIDED", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "# List API def extend(self, iterable): for subset in iterable: self._check_subset(subset) super(Subsets, self).append(subset)", "if subset.low is not None: l = max(float(subset.low) / float(size_x), 0.0) bbox[0] =", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", ") if low is not None: qs = qs.filter( begin_time__gte=low ) else: if", "isinstance(subset, Slice): is_slice = True value = subset.value elif isinstance(subset, Trim): is_slice =", "\"contains\": if not footprint.within(poly): return False return True def _check_subset(self, subset): if not", "crs # List API def extend(self, iterable): for subset in iterable: self._check_subset(subset) super(Subsets,", "crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) for subset in self: if isinstance(subset, Slice): is_slice =", "srid(self): \"\"\" Tries to find the correct integer SRID for the crs. \"\"\"", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "import ( InvalidAxisLabelException, InvalidSubsettingException ) __all__ = [\"Subsets\", \"Trim\", \"Slice\"] logger = logging.getLogger(__name__)", "# Copyright (C) 2013 EOX IT Services GmbH # # Permission is hereby", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "end_time < value: return False elif low is None and high is not", "value), (max_extent[2], value) ) line.srid = srid if srid != 4326: line.transform(4326) qs", "return False else: if is_slice: if subset.is_x: line = Line( (value, max_extent[1]), (value,", "= srid if srid != 4326: line.transform(4326) if not line.intersects(footprint): return False else:", "bbox[3] = subset.value return bbox def bounding_polygon(self, coverage): srid = coverage.srid extent =", "for subset in iterable: self._check_subset(subset) super(Subsets, self).append(subset) def append(self, subset): self._check_subset(subset) super(Subsets, self).append(subset)", "(extent[3] - extent[1]) else: if subset.is_x: if subset.low is not None: bbox[0] =", "isinstance(subset, self.allowed_types): raise InvalidSubsettingException( \"Supplied subset is not allowed.\" ) if self.has_x and", "temporal_axes = (\"t\", \"time\", \"phenomenontime\") x_axes = (\"x\", \"lon\", \"long\") y_axes = (\"y\",", "in iterable: self._check_subset(subset) super(Subsets, self).append(subset) def append(self, subset): self._check_subset(subset) super(Subsets, self).append(subset) def insert(self,", "do so, subject to the following conditions: # # The above copyright notice", "if subset.low is not None: bbox[1] = max(subset.low, bbox[1]) if subset.high is not", "correct integer SRID for the crs. \"\"\" crs = self.crs if crs is", "= extent[0] + l * (extent[2] - extent[0]) if subset.high is not None:", "permit persons to whom the Software is # furnished to do so, subject", "if the temporal bounds must be strictly contained if containment == \"contains\": if", "temporal_axes + x_axes + y_axes + z_axes def is_temporal(axis): \"\"\" Returns whether or", "None: poly = Polygon.from_bbox(bbox) poly.srid = srid else: poly = Polygon.from_bbox(bbox) poly.srid =", "if srid is None: srid = 4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid)", "bbox[1] = extent[3] - l * (extent[3] - extent[1]) if subset.high is not", "bbox[1] = max(subset.low, bbox[1]) if subset.high is not None: bbox[3] = min(subset.high, bbox[3])", "Slice(Subset): def __init__(self, axis, value): super(Slice, self).__init__(axis) self.value = value def __repr__(self): return", "subsets. \"\"\" bbox = [None, None, None, None] for subset in self: if", "'%s'\" % crs ) return srid return None def filter(self, queryset, containment=\"overlaps\"): if", "extent[0] + l * (extent[2] - extent[0]) elif subset.is_y: if subset.low is not", "= extent[0] + l * (extent[2] - extent[0]) elif subset.is_y: if subset.low is", "tolerance; bbox[3] += tolerance logger.debug( \"Applying BBox %s with containment '%s'.\" % (bbox,", "<<EMAIL>> # #------------------------------------------------------------------------------- # Copyright (C) 2013 EOX IT Services GmbH # #", "axis = axis.lower() if axis not in all_axes: raise InvalidAxisLabelException(axis) self.axis = axis", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "if srid is None and not crss.is_image_crs(crs): raise InvalidSubsettingException( \"Could not parse EPSG", "elif containment == \"contains\": qs = qs.filter(footprint__within=poly) return qs def matches(self, eo_object, containment=\"overlaps\"):", "not None: if begin_time > high: return False elif low is not None", "if bbox != [None, None, None, None]: bbox = map( lambda v: v[0]", "def filter(self, queryset, containment=\"overlaps\"): if not len(self): return queryset qs = queryset bbox", "axis not in all_axes: raise InvalidAxisLabelException(axis) self.axis = axis @property def is_temporal(self): return", "all X and Y subsets. \"\"\" bbox = [None, None, None, None] for", "xy_bbox(self): \"\"\" Returns the minimum bounding box for all X and Y subsets.", "crs ) return srid return None def filter(self, queryset, containment=\"overlaps\"): if not len(self):", "bbox[0] = bbox[2] = subset.value elif subset.is_y: if isinstance(subset, Trim): bbox[1] = subset.low", "( self.axis, self.low, self.high ) temporal_axes = (\"t\", \"time\", \"phenomenontime\") x_axes = (\"x\",", "= 4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) for subset in self: if", "= extent[3] - l * (extent[3] - extent[1]) if subset.high is not None:", "list(footprint.extent) for subset in self: if not isinstance(subset, Trim) or subset.is_temporal: continue if", "subset.is_x: if isinstance(subset, Trim): bbox[0] = subset.low bbox[2] = subset.high else: bbox[0] =", "[None, None, None, None] srid = self.srid if srid is None: srid =", "self.allowed_types = allowed_types if allowed_types is not None else ( Trim, Slice )", "\"phenomenontime\") x_axes = (\"x\", \"lon\", \"long\") y_axes = (\"y\", \"lat\") z_axes = (\"z\",", "<reponame>ESA-VirES/eoxserver<filename>eoxserver/services/subset.py<gh_stars>1-10 #------------------------------------------------------------------------------- # $Id$ # # Project: EOxServer <http://eoxserver.org> # Authors: <NAME> <<EMAIL>>", "extent[1]) else: if subset.is_x: if subset.low is not None: bbox[0] = max(subset.low, bbox[0])", "logging from django.contrib.gis.geos import Polygon, LineString from eoxserver.resources.coverages import crss from eoxserver.services.exceptions import", "== \"overlaps\": qs = qs.filter(footprint__intersects=poly) elif containment == \"contains\": qs = qs.filter(footprint__within=poly) return", "= False low = subset.low high = subset.high if subset.is_temporal: if is_slice: qs", "[\"Subsets\", \"Trim\", \"Slice\"] logger = logging.getLogger(__name__) class Subsets(list): \"\"\" Convenience class to handle", "def matches(self, eo_object, containment=\"overlaps\"): if not len(self): return True bbox = [None, None,", "= self.srid if subset_srid is None: bbox = list(extent) else: bbox = list(footprint.extent)", "self.high = high def __repr__(self): return \"Trim: %s[%s:%s]\" % ( self.axis, self.low, self.high", "eo_object.footprint begin_time = eo_object.begin_time end_time = eo_object.end_time for subset in self: if isinstance(subset,", "subset_srid is None: bbox = list(extent) else: bbox = list(footprint.extent) for subset in", "<http://eoxserver.org> # Authors: <NAME> <<EMAIL>> # #------------------------------------------------------------------------------- # Copyright (C) 2013 EOX IT", "LineString from eoxserver.resources.coverages import crss from eoxserver.services.exceptions import ( InvalidAxisLabelException, InvalidSubsettingException ) __all__", "begin_time > value or end_time < value: return False elif low is None", "hereby granted, free of charge, to any person obtaining a copy # of", "logger.debug( \"Applying BBox %s with containment '%s'.\" % (bbox, containment) ) poly =", "not None: l = max(float(subset.low) / float(size_x), 0.0) bbox[0] = extent[0] + l", "iterable, crs=None, allowed_types=None): \"\"\" Constructor. Allows to add set the initial subsets \"\"\"", "allowed_types if allowed_types is not None else ( Trim, Slice ) # Do", "return False elif containment == \"contains\": if not footprint.within(poly): return False return True", "-= tolerance; bbox[1] -= tolerance bbox[2] += tolerance; bbox[3] += tolerance logger.debug( \"Applying", "% crs ) return srid return None def filter(self, queryset, containment=\"overlaps\"): if not", "if subset.is_temporal: if is_slice: qs = qs.filter( begin_time__lte=value, end_time__gte=value ) else: if high", "is None: srid = 4326 max_extent = crss.crs_bounds(srid) tolerance = crss.crs_tolerance(srid) for subset", "for subset in self: if subset.is_x: if isinstance(subset, Trim): bbox[0] = subset.low bbox[2]", "/ float(size_x), 0.0) bbox[2] = extent[0] + l * (extent[2] - extent[0]) elif", "derived from this Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT" ]
[ "import sys from typing import Dict, Optional, Tuple, cast SRC_VERSION_RE = re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE", "Parse plain and editable versions. See test_genversion.py for examples. \"\"\" src_matcher = SRC_VERSION_RE.match(comp)", "typing import Dict, Optional, Tuple, cast SRC_VERSION_RE = re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE = re.compile(r\"^([^=]*)==([^=]*)$\") LOG", "def main() -> None: \"\"\"Run the command.\"\"\" if len(sys.argv) == 2: git_tag =", "return main() def main() -> None: \"\"\"Run the command.\"\"\" if len(sys.argv) == 2:", "None: \"\"\"Run the command and print a deprecated notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py is deprecated; use", "else: if len(comp) > 0 and not comp[:3] == \"-e \": print(\"Cannot parse", "if len(comp) > 0 and not comp[:3] == \"-e \": print(\"Cannot parse package", "comp) return None, None def _get_packages_version() -> Dict[str, str]: result = {} with", "print a deprecated notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py is deprecated; use c2cwsgiutils-genversion instead\") return main() def", "encoding=\"utf-8\") as devnull: for comp in ( subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"], stderr=devnull) #", "{} with open(os.devnull, \"w\", encoding=\"utf-8\") as devnull: for comp in ( subprocess.check_output([\"python3\", \"-m\",", "is not None: result[name] = version return result def deprecated() -> None: \"\"\"Run", "str]: result = {} with open(os.devnull, \"w\", encoding=\"utf-8\") as devnull: for comp in", "= sys.argv[1] git_hash = sys.argv[2] report = {\"main\": {\"git_hash\": git_hash}, \"packages\": _get_packages_version()} if", "\"freeze\"], stderr=devnull) # nosec .decode() .strip() .split(\"\\n\") ): name, version = _get_package_version(comp) if", "= {\"main\": {\"git_hash\": git_hash}, \"packages\": _get_packages_version()} if git_tag is not None: report[\"main\"][\"git_tag\"] =", "\"packages\": _get_packages_version()} if git_tag is not None: report[\"main\"][\"git_tag\"] = git_tag with open(\"versions.json\", \"w\",", "None and version is not None: result[name] = version return result def deprecated()", "logging.getLogger(__name__) def _get_package_version(comp: str) -> Tuple[Optional[str], Optional[str]]: \"\"\" Parse plain and editable versions.", "== \"-e \": print(\"Cannot parse package version: \" + comp) return None, None", "command.\"\"\" if len(sys.argv) == 2: git_tag = None git_hash = sys.argv[1] else: git_tag", "import json import logging import os import re import subprocess # nosec import", "= re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE = re.compile(r\"^([^=]*)==([^=]*)$\") LOG = logging.getLogger(__name__) def _get_package_version(comp: str) -> Tuple[Optional[str],", "c2cwsgiutils-genversion instead\") return main() def main() -> None: \"\"\"Run the command.\"\"\" if len(sys.argv)", "for comp in ( subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"], stderr=devnull) # nosec .decode() .strip()", "open(\"versions.json\", \"w\", encoding=\"utf-8\") as file: json.dump(report, file, indent=2) if __name__ == \"__main__\": main()", "Tuple[Optional[str], Optional[str]]: \"\"\" Parse plain and editable versions. See test_genversion.py for examples. \"\"\"", "str) -> Tuple[Optional[str], Optional[str]]: \"\"\" Parse plain and editable versions. See test_genversion.py for", "git_hash = sys.argv[2] report = {\"main\": {\"git_hash\": git_hash}, \"packages\": _get_packages_version()} if git_tag is", "subprocess # nosec import sys from typing import Dict, Optional, Tuple, cast SRC_VERSION_RE", "2: git_tag = None git_hash = sys.argv[1] else: git_tag = sys.argv[1] git_hash =", "git_tag = None git_hash = sys.argv[1] else: git_tag = sys.argv[1] git_hash = sys.argv[2]", "sys from typing import Dict, Optional, Tuple, cast SRC_VERSION_RE = re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE =", "name is not None and version is not None: result[name] = version return", "None: report[\"main\"][\"git_tag\"] = git_tag with open(\"versions.json\", \"w\", encoding=\"utf-8\") as file: json.dump(report, file, indent=2)", "\"\"\"Run the command and print a deprecated notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py is deprecated; use c2cwsgiutils-genversion", "> 0 and not comp[:3] == \"-e \": print(\"Cannot parse package version: \"", "is deprecated; use c2cwsgiutils-genversion instead\") return main() def main() -> None: \"\"\"Run the", "is not None: report[\"main\"][\"git_tag\"] = git_tag with open(\"versions.json\", \"w\", encoding=\"utf-8\") as file: json.dump(report,", "import Dict, Optional, Tuple, cast SRC_VERSION_RE = re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE = re.compile(r\"^([^=]*)==([^=]*)$\") LOG =", "= src_matcher or VERSION_RE.match(comp) if matcher: return cast(Tuple[str, str], matcher.groups()) else: if len(comp)", "if git_tag is not None: report[\"main\"][\"git_tag\"] = git_tag with open(\"versions.json\", \"w\", encoding=\"utf-8\") as", "= _get_package_version(comp) if name is not None and version is not None: result[name]", "re import subprocess # nosec import sys from typing import Dict, Optional, Tuple,", "= logging.getLogger(__name__) def _get_package_version(comp: str) -> Tuple[Optional[str], Optional[str]]: \"\"\" Parse plain and editable", "\" + comp) return None, None def _get_packages_version() -> Dict[str, str]: result =", "the command and print a deprecated notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py is deprecated; use c2cwsgiutils-genversion instead\")", "\": print(\"Cannot parse package version: \" + comp) return None, None def _get_packages_version()", "def deprecated() -> None: \"\"\"Run the command and print a deprecated notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py", "= sys.argv[2] report = {\"main\": {\"git_hash\": git_hash}, \"packages\": _get_packages_version()} if git_tag is not", "else: git_tag = sys.argv[1] git_hash = sys.argv[2] report = {\"main\": {\"git_hash\": git_hash}, \"packages\":", "Dict[str, str]: result = {} with open(os.devnull, \"w\", encoding=\"utf-8\") as devnull: for comp", "cast(Tuple[str, str], matcher.groups()) else: if len(comp) > 0 and not comp[:3] == \"-e", "\"-m\", \"pip\", \"freeze\"], stderr=devnull) # nosec .decode() .strip() .split(\"\\n\") ): name, version =", "nosec .decode() .strip() .split(\"\\n\") ): name, version = _get_package_version(comp) if name is not", "return result def deprecated() -> None: \"\"\"Run the command and print a deprecated", "command and print a deprecated notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py is deprecated; use c2cwsgiutils-genversion instead\") return", "json import logging import os import re import subprocess # nosec import sys", "None git_hash = sys.argv[1] else: git_tag = sys.argv[1] git_hash = sys.argv[2] report =", "if matcher: return cast(Tuple[str, str], matcher.groups()) else: if len(comp) > 0 and not", "= version return result def deprecated() -> None: \"\"\"Run the command and print", "cast SRC_VERSION_RE = re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE = re.compile(r\"^([^=]*)==([^=]*)$\") LOG = logging.getLogger(__name__) def _get_package_version(comp: str)", "git_hash = sys.argv[1] else: git_tag = sys.argv[1] git_hash = sys.argv[2] report = {\"main\":", "version return result def deprecated() -> None: \"\"\"Run the command and print a", "is not None and version is not None: result[name] = version return result", "from typing import Dict, Optional, Tuple, cast SRC_VERSION_RE = re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE = re.compile(r\"^([^=]*)==([^=]*)$\")", "SRC_VERSION_RE = re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE = re.compile(r\"^([^=]*)==([^=]*)$\") LOG = logging.getLogger(__name__) def _get_package_version(comp: str) ->", "devnull: for comp in ( subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"], stderr=devnull) # nosec .decode()", "instead\") return main() def main() -> None: \"\"\"Run the command.\"\"\" if len(sys.argv) ==", "== 2: git_tag = None git_hash = sys.argv[1] else: git_tag = sys.argv[1] git_hash", ".strip() .split(\"\\n\") ): name, version = _get_package_version(comp) if name is not None and", "versions. See test_genversion.py for examples. \"\"\" src_matcher = SRC_VERSION_RE.match(comp) matcher = src_matcher or", "report = {\"main\": {\"git_hash\": git_hash}, \"packages\": _get_packages_version()} if git_tag is not None: report[\"main\"][\"git_tag\"]", "= None git_hash = sys.argv[1] else: git_tag = sys.argv[1] git_hash = sys.argv[2] report", "-> None: \"\"\"Run the command and print a deprecated notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py is deprecated;", "-> Dict[str, str]: result = {} with open(os.devnull, \"w\", encoding=\"utf-8\") as devnull: for", "open(os.devnull, \"w\", encoding=\"utf-8\") as devnull: for comp in ( subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"],", "_get_packages_version()} if git_tag is not None: report[\"main\"][\"git_tag\"] = git_tag with open(\"versions.json\", \"w\", encoding=\"utf-8\")", "version: \" + comp) return None, None def _get_packages_version() -> Dict[str, str]: result", "sys.argv[2] report = {\"main\": {\"git_hash\": git_hash}, \"packages\": _get_packages_version()} if git_tag is not None:", "= {} with open(os.devnull, \"w\", encoding=\"utf-8\") as devnull: for comp in ( subprocess.check_output([\"python3\",", "and version is not None: result[name] = version return result def deprecated() ->", "= SRC_VERSION_RE.match(comp) matcher = src_matcher or VERSION_RE.match(comp) if matcher: return cast(Tuple[str, str], matcher.groups())", "result def deprecated() -> None: \"\"\"Run the command and print a deprecated notice.\"\"\"", "Tuple, cast SRC_VERSION_RE = re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE = re.compile(r\"^([^=]*)==([^=]*)$\") LOG = logging.getLogger(__name__) def _get_package_version(comp:", "version is not None: result[name] = version return result def deprecated() -> None:", "python3 import json import logging import os import re import subprocess # nosec", "if len(sys.argv) == 2: git_tag = None git_hash = sys.argv[1] else: git_tag =", "git_tag is not None: report[\"main\"][\"git_tag\"] = git_tag with open(\"versions.json\", \"w\", encoding=\"utf-8\") as file:", "VERSION_RE = re.compile(r\"^([^=]*)==([^=]*)$\") LOG = logging.getLogger(__name__) def _get_package_version(comp: str) -> Tuple[Optional[str], Optional[str]]: \"\"\"", "SRC_VERSION_RE.match(comp) matcher = src_matcher or VERSION_RE.match(comp) if matcher: return cast(Tuple[str, str], matcher.groups()) else:", "src_matcher or VERSION_RE.match(comp) if matcher: return cast(Tuple[str, str], matcher.groups()) else: if len(comp) >", "main() -> None: \"\"\"Run the command.\"\"\" if len(sys.argv) == 2: git_tag = None", "str], matcher.groups()) else: if len(comp) > 0 and not comp[:3] == \"-e \":", "use c2cwsgiutils-genversion instead\") return main() def main() -> None: \"\"\"Run the command.\"\"\" if", "\"\"\"Run the command.\"\"\" if len(sys.argv) == 2: git_tag = None git_hash = sys.argv[1]", "return cast(Tuple[str, str], matcher.groups()) else: if len(comp) > 0 and not comp[:3] ==", "_get_packages_version() -> Dict[str, str]: result = {} with open(os.devnull, \"w\", encoding=\"utf-8\") as devnull:", "not None: result[name] = version return result def deprecated() -> None: \"\"\"Run the", "\"-e \": print(\"Cannot parse package version: \" + comp) return None, None def", "_get_package_version(comp: str) -> Tuple[Optional[str], Optional[str]]: \"\"\" Parse plain and editable versions. See test_genversion.py", "len(comp) > 0 and not comp[:3] == \"-e \": print(\"Cannot parse package version:", "deprecated notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py is deprecated; use c2cwsgiutils-genversion instead\") return main() def main() ->", "-> None: \"\"\"Run the command.\"\"\" if len(sys.argv) == 2: git_tag = None git_hash", "+ comp) return None, None def _get_packages_version() -> Dict[str, str]: result = {}", "len(sys.argv) == 2: git_tag = None git_hash = sys.argv[1] else: git_tag = sys.argv[1]", "None: \"\"\"Run the command.\"\"\" if len(sys.argv) == 2: git_tag = None git_hash =", "editable versions. See test_genversion.py for examples. \"\"\" src_matcher = SRC_VERSION_RE.match(comp) matcher = src_matcher", "notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py is deprecated; use c2cwsgiutils-genversion instead\") return main() def main() -> None:", "import os import re import subprocess # nosec import sys from typing import", "LOG = logging.getLogger(__name__) def _get_package_version(comp: str) -> Tuple[Optional[str], Optional[str]]: \"\"\" Parse plain and", "( subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"], stderr=devnull) # nosec .decode() .strip() .split(\"\\n\") ): name,", "package version: \" + comp) return None, None def _get_packages_version() -> Dict[str, str]:", "not None: report[\"main\"][\"git_tag\"] = git_tag with open(\"versions.json\", \"w\", encoding=\"utf-8\") as file: json.dump(report, file,", "report[\"main\"][\"git_tag\"] = git_tag with open(\"versions.json\", \"w\", encoding=\"utf-8\") as file: json.dump(report, file, indent=2) if", "deprecated; use c2cwsgiutils-genversion instead\") return main() def main() -> None: \"\"\"Run the command.\"\"\"", "logging import os import re import subprocess # nosec import sys from typing", "git_hash}, \"packages\": _get_packages_version()} if git_tag is not None: report[\"main\"][\"git_tag\"] = git_tag with open(\"versions.json\",", "a deprecated notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py is deprecated; use c2cwsgiutils-genversion instead\") return main() def main()", "print(\"Cannot parse package version: \" + comp) return None, None def _get_packages_version() ->", "comp in ( subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"], stderr=devnull) # nosec .decode() .strip() .split(\"\\n\")", "\"\"\" Parse plain and editable versions. See test_genversion.py for examples. \"\"\" src_matcher =", "main() def main() -> None: \"\"\"Run the command.\"\"\" if len(sys.argv) == 2: git_tag", "= re.compile(r\"^([^=]*)==([^=]*)$\") LOG = logging.getLogger(__name__) def _get_package_version(comp: str) -> Tuple[Optional[str], Optional[str]]: \"\"\" Parse", "matcher: return cast(Tuple[str, str], matcher.groups()) else: if len(comp) > 0 and not comp[:3]", "with open(os.devnull, \"w\", encoding=\"utf-8\") as devnull: for comp in ( subprocess.check_output([\"python3\", \"-m\", \"pip\",", "version = _get_package_version(comp) if name is not None and version is not None:", "test_genversion.py for examples. \"\"\" src_matcher = SRC_VERSION_RE.match(comp) matcher = src_matcher or VERSION_RE.match(comp) if", "not None and version is not None: result[name] = version return result def", "Optional, Tuple, cast SRC_VERSION_RE = re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE = re.compile(r\"^([^=]*)==([^=]*)$\") LOG = logging.getLogger(__name__) def", "#!/usr/bin/env python3 import json import logging import os import re import subprocess #", "in ( subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"], stderr=devnull) # nosec .decode() .strip() .split(\"\\n\") ):", "sys.argv[1] git_hash = sys.argv[2] report = {\"main\": {\"git_hash\": git_hash}, \"packages\": _get_packages_version()} if git_tag", "deprecated() -> None: \"\"\"Run the command and print a deprecated notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py is", "# nosec import sys from typing import Dict, Optional, Tuple, cast SRC_VERSION_RE =", "= sys.argv[1] else: git_tag = sys.argv[1] git_hash = sys.argv[2] report = {\"main\": {\"git_hash\":", "re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE = re.compile(r\"^([^=]*)==([^=]*)$\") LOG = logging.getLogger(__name__) def _get_package_version(comp: str) -> Tuple[Optional[str], Optional[str]]:", "return None, None def _get_packages_version() -> Dict[str, str]: result = {} with open(os.devnull,", "nosec import sys from typing import Dict, Optional, Tuple, cast SRC_VERSION_RE = re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\")", "and editable versions. See test_genversion.py for examples. \"\"\" src_matcher = SRC_VERSION_RE.match(comp) matcher =", "): name, version = _get_package_version(comp) if name is not None and version is", "Dict, Optional, Tuple, cast SRC_VERSION_RE = re.compile(r\"^.*\\(([^=]*)===?([^=]*)\\)$\") VERSION_RE = re.compile(r\"^([^=]*)==([^=]*)$\") LOG = logging.getLogger(__name__)", "the command.\"\"\" if len(sys.argv) == 2: git_tag = None git_hash = sys.argv[1] else:", "import logging import os import re import subprocess # nosec import sys from", "Optional[str]]: \"\"\" Parse plain and editable versions. See test_genversion.py for examples. \"\"\" src_matcher", "re.compile(r\"^([^=]*)==([^=]*)$\") LOG = logging.getLogger(__name__) def _get_package_version(comp: str) -> Tuple[Optional[str], Optional[str]]: \"\"\" Parse plain", "matcher.groups()) else: if len(comp) > 0 and not comp[:3] == \"-e \": print(\"Cannot", "_get_package_version(comp) if name is not None and version is not None: result[name] =", "result[name] = version return result def deprecated() -> None: \"\"\"Run the command and", "import subprocess # nosec import sys from typing import Dict, Optional, Tuple, cast", "result = {} with open(os.devnull, \"w\", encoding=\"utf-8\") as devnull: for comp in (", "See test_genversion.py for examples. \"\"\" src_matcher = SRC_VERSION_RE.match(comp) matcher = src_matcher or VERSION_RE.match(comp)", "stderr=devnull) # nosec .decode() .strip() .split(\"\\n\") ): name, version = _get_package_version(comp) if name", "src_matcher = SRC_VERSION_RE.match(comp) matcher = src_matcher or VERSION_RE.match(comp) if matcher: return cast(Tuple[str, str],", "parse package version: \" + comp) return None, None def _get_packages_version() -> Dict[str,", "-> Tuple[Optional[str], Optional[str]]: \"\"\" Parse plain and editable versions. See test_genversion.py for examples.", "for examples. \"\"\" src_matcher = SRC_VERSION_RE.match(comp) matcher = src_matcher or VERSION_RE.match(comp) if matcher:", "comp[:3] == \"-e \": print(\"Cannot parse package version: \" + comp) return None,", "git_tag = sys.argv[1] git_hash = sys.argv[2] report = {\"main\": {\"git_hash\": git_hash}, \"packages\": _get_packages_version()}", "if name is not None and version is not None: result[name] = version", "examples. \"\"\" src_matcher = SRC_VERSION_RE.match(comp) matcher = src_matcher or VERSION_RE.match(comp) if matcher: return", "None, None def _get_packages_version() -> Dict[str, str]: result = {} with open(os.devnull, \"w\",", "None def _get_packages_version() -> Dict[str, str]: result = {} with open(os.devnull, \"w\", encoding=\"utf-8\")", "import re import subprocess # nosec import sys from typing import Dict, Optional,", "and print a deprecated notice.\"\"\" LOG.warning(\"c2cwsgiutils_genversion.py is deprecated; use c2cwsgiutils-genversion instead\") return main()", "or VERSION_RE.match(comp) if matcher: return cast(Tuple[str, str], matcher.groups()) else: if len(comp) > 0", "subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"], stderr=devnull) # nosec .decode() .strip() .split(\"\\n\") ): name, version", "LOG.warning(\"c2cwsgiutils_genversion.py is deprecated; use c2cwsgiutils-genversion instead\") return main() def main() -> None: \"\"\"Run", "# nosec .decode() .strip() .split(\"\\n\") ): name, version = _get_package_version(comp) if name is", "{\"git_hash\": git_hash}, \"packages\": _get_packages_version()} if git_tag is not None: report[\"main\"][\"git_tag\"] = git_tag with", "with open(\"versions.json\", \"w\", encoding=\"utf-8\") as file: json.dump(report, file, indent=2) if __name__ == \"__main__\":", "os import re import subprocess # nosec import sys from typing import Dict,", "as devnull: for comp in ( subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"], stderr=devnull) # nosec", "\"w\", encoding=\"utf-8\") as devnull: for comp in ( subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"], stderr=devnull)", "and not comp[:3] == \"-e \": print(\"Cannot parse package version: \" + comp)", "None: result[name] = version return result def deprecated() -> None: \"\"\"Run the command", "sys.argv[1] else: git_tag = sys.argv[1] git_hash = sys.argv[2] report = {\"main\": {\"git_hash\": git_hash},", "matcher = src_matcher or VERSION_RE.match(comp) if matcher: return cast(Tuple[str, str], matcher.groups()) else: if", "{\"main\": {\"git_hash\": git_hash}, \"packages\": _get_packages_version()} if git_tag is not None: report[\"main\"][\"git_tag\"] = git_tag", "VERSION_RE.match(comp) if matcher: return cast(Tuple[str, str], matcher.groups()) else: if len(comp) > 0 and", "= git_tag with open(\"versions.json\", \"w\", encoding=\"utf-8\") as file: json.dump(report, file, indent=2) if __name__", "not comp[:3] == \"-e \": print(\"Cannot parse package version: \" + comp) return", ".split(\"\\n\") ): name, version = _get_package_version(comp) if name is not None and version", ".decode() .strip() .split(\"\\n\") ): name, version = _get_package_version(comp) if name is not None", "plain and editable versions. See test_genversion.py for examples. \"\"\" src_matcher = SRC_VERSION_RE.match(comp) matcher", "name, version = _get_package_version(comp) if name is not None and version is not", "git_tag with open(\"versions.json\", \"w\", encoding=\"utf-8\") as file: json.dump(report, file, indent=2) if __name__ ==", "def _get_packages_version() -> Dict[str, str]: result = {} with open(os.devnull, \"w\", encoding=\"utf-8\") as", "def _get_package_version(comp: str) -> Tuple[Optional[str], Optional[str]]: \"\"\" Parse plain and editable versions. See", "\"pip\", \"freeze\"], stderr=devnull) # nosec .decode() .strip() .split(\"\\n\") ): name, version = _get_package_version(comp)", "0 and not comp[:3] == \"-e \": print(\"Cannot parse package version: \" +", "\"\"\" src_matcher = SRC_VERSION_RE.match(comp) matcher = src_matcher or VERSION_RE.match(comp) if matcher: return cast(Tuple[str," ]
[ "@property def record_type(self): try: return self.__record_type except AttributeError: return None @property def domain_id(self):", "def record_type(self): try: return self.__record_type except AttributeError: return None @property def domain_id(self): try:", "= protocol self.__domain = domain @property def record_type(self): try: return self.__record_type except AttributeError:", "Record\"\"\" import logging _LOGGER = logging.getLogger(__name__) class DomainRecord(object): def __init__(self, protocol=None, domain=None): super().__init__()", "DomainRecord(object): def __init__(self, protocol=None, domain=None): super().__init__() self.__protocol = protocol self.__domain = domain @property", "<filename>src/constellix/domains/record/type/main.py \"\"\"A Record\"\"\" import logging _LOGGER = logging.getLogger(__name__) class DomainRecord(object): def __init__(self, protocol=None,", "_LOGGER = logging.getLogger(__name__) class DomainRecord(object): def __init__(self, protocol=None, domain=None): super().__init__() self.__protocol = protocol", "import logging _LOGGER = logging.getLogger(__name__) class DomainRecord(object): def __init__(self, protocol=None, domain=None): super().__init__() self.__protocol", "protocol self.__domain = domain @property def record_type(self): try: return self.__record_type except AttributeError: return", "__init__(self, protocol=None, domain=None): super().__init__() self.__protocol = protocol self.__domain = domain @property def record_type(self):", "record_type(self): try: return self.__record_type except AttributeError: return None @property def domain_id(self): try: return", "def __init__(self, protocol=None, domain=None): super().__init__() self.__protocol = protocol self.__domain = domain @property def", "super().__init__() self.__protocol = protocol self.__domain = domain @property def record_type(self): try: return self.__record_type", "= logging.getLogger(__name__) class DomainRecord(object): def __init__(self, protocol=None, domain=None): super().__init__() self.__protocol = protocol self.__domain", "self.__protocol = protocol self.__domain = domain @property def record_type(self): try: return self.__record_type except", "self.__domain = domain @property def record_type(self): try: return self.__record_type except AttributeError: return None", "return self.__record_type except AttributeError: return None @property def domain_id(self): try: return self.__domain.id except", "\"\"\"A Record\"\"\" import logging _LOGGER = logging.getLogger(__name__) class DomainRecord(object): def __init__(self, protocol=None, domain=None):", "domain=None): super().__init__() self.__protocol = protocol self.__domain = domain @property def record_type(self): try: return", "logging.getLogger(__name__) class DomainRecord(object): def __init__(self, protocol=None, domain=None): super().__init__() self.__protocol = protocol self.__domain =", "= domain @property def record_type(self): try: return self.__record_type except AttributeError: return None @property", "domain @property def record_type(self): try: return self.__record_type except AttributeError: return None @property def", "try: return self.__record_type except AttributeError: return None @property def domain_id(self): try: return self.__domain.id", "self.__record_type except AttributeError: return None @property def domain_id(self): try: return self.__domain.id except AttributeError:", "logging _LOGGER = logging.getLogger(__name__) class DomainRecord(object): def __init__(self, protocol=None, domain=None): super().__init__() self.__protocol =", "protocol=None, domain=None): super().__init__() self.__protocol = protocol self.__domain = domain @property def record_type(self): try:", "AttributeError: return None @property def domain_id(self): try: return self.__domain.id except AttributeError: return None", "class DomainRecord(object): def __init__(self, protocol=None, domain=None): super().__init__() self.__protocol = protocol self.__domain = domain", "except AttributeError: return None @property def domain_id(self): try: return self.__domain.id except AttributeError: return" ]
[ "# Default primary fraction range to be used if it is not defined", "scaling factors Type 2: Zero-attenuator direct beams Type 3: Data that we don't", "show_dx=False) else: plot_div = plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"q (1/A)\", x_log=True,", "ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir, SlitTolerance=0.06, ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True, TemplateFile=template_file, FindPeaks=False, NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1]) #------------------------------------------------------------------------- #", "reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1, first_run_of_set+r) reduced_file_path = os.path.join(output_dir, reduced_file_name) if os.path.isfile(reduced_file_path):", "plot1d except ImportError: from finddata.publish_plot import plot1d, _determine_config_file, publish_plot if _determine_config_file(None) is None:", "show_dx=False, publish=False) publish_plot('REF_L', first_run_of_set+r, files={'file': plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\") else: plot1d(run_number, [[x, y, dy, dx]],", "publish=False) publish_plot('REF_L', first_run_of_set+r, files={'file': plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\") else: plot1d(run_number, [[x, y, dy, dx]], instrument='REF_L',", "%s\" % os.path.join(output_dir, default_file_name)) reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit=\"MomentumTransfer\") plotting_ready = True try:", "from sf_calculator import ScalingFactor DIRECT_BEAM_CALC_AVAILABLE = True logger.notice(\"sf_calculator available\") except: import scipy logger.notice(\"Scaling", "os.path.join(output_dir, \"template.xml\") elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file = \"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using template: %s\" % template_file) #", "\"template.xml\")): template_file = os.path.join(output_dir, \"template.xml\") elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file = \"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using template: %s\"", "and DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing scaling factors\") sequence_number = ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set = ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium =", "ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium = ws.getRun().getProperty(\"incident_medium\").value[0] _fpath = os.path.join(output_dir, \"sf_%s_%s_auto.cfg\" % (first_run_of_set, incident_medium)) sf =", "= os.path.join(output_dir, \"sf_%s_%s_auto.cfg\" % (first_run_of_set, incident_medium)) sf = ScalingFactor(run_list=range(first_run_of_set, first_run_of_set + sequence_number), sort_by_runs=True,", "os.path.join(output_dir, default_file_name)) reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit=\"MomentumTransfer\") plotting_ready = True try: from postprocessing.publish_plot", "print(\"Using template: %s\" % template_file) # Run the auto-reduction ws = LoadEventNexus(Filename=event_file_path) #", "we don't need to treat \"\"\" import sys import os import json import", "\"WithReference\" else: NORMALIZATION_TYPE = \"DirectBeam\" # Determine whether this is data or whether", "Run the auto-reduction ws = LoadEventNexus(Filename=event_file_path) # Check the measurement geometry if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0]", "need to compute scaling factors data_type = ws.getRun().getProperty(\"data_type\").value[0] if data_type == 1 and", "else: logger.notice(\"Automated reduction\") output = LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir, SlitTolerance=0.06, ReadSequenceFromFile=True,", "= LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit=\"MomentumTransfer\") plotting_ready = True try: from postprocessing.publish_plot import plot1d except", "Type 3: Data that we don't need to treat \"\"\" import sys import", "default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set default_file_path = os.path.join(output_dir, default_file_name) if os.path.isfile(default_file_path): print(\"Loading %s\"", "sample data Type 1: Direct beams for scaling factors Type 2: Zero-attenuator direct", "NORMALIZATION_TYPE = \"WithReference\" #NORMALIZATION_TYPE = \"DirectBeam\" # Allowed values: dict or \"\" #", "dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False) else: plot_div = plot1d(first_run_of_set+r, [[x,", "Quartz #REFL1D_PARS = json.dumps(dict(back_sld=4.09, # back_roughness=4.28, # front_sld=0, # layers=[], # scale=0.9169, #", "import ScalingFactor DIRECT_BEAM_CALC_AVAILABLE = True logger.notice(\"sf_calculator available\") except: import scipy logger.notice(\"Scaling factor calculation", "DIRECT_BEAM_CALC_AVAILABLE = True logger.notice(\"sf_calculator available\") except: import scipy logger.notice(\"Scaling factor calculation upgrade not", "data or whether we need to compute scaling factors data_type = ws.getRun().getProperty(\"data_type\").value[0] if", "= reflectivity.readY(0) dy = reflectivity.readE(0) dx = reflectivity.readDx(0) if int(run_number) - first_run_of_set <", "below which we don't need the absolute normalization WL_CUTOFF = 10.0 # Default", "% first_run_of_set default_file_path = os.path.join(output_dir, default_file_name) if os.path.isfile(default_file_path): print(\"Loading %s\" % os.path.join(output_dir, default_file_name))", "first_run_of_set = ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium = ws.getRun().getProperty(\"incident_medium\").value[0] _fpath = os.path.join(output_dir, \"sf_%s_%s_auto.cfg\" % (first_run_of_set, incident_medium))", "Reduction options #------------------------------------------------------------------------- # Wavelength below which we don't need the absolute normalization", "= \"WithReference\" #NORMALIZATION_TYPE = \"DirectBeam\" # Allowed values: dict or \"\" # D2O", "plot1d, _determine_config_file, publish_plot if _determine_config_file(None) is None: plotting_ready = False x = reflectivity.readX(0)", "in range(0, 10): reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1, first_run_of_set+r) reduced_file_path = os.path.join(output_dir,", "\"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using template: %s\" % template_file) # Run the auto-reduction ws = LoadEventNexus(Filename=event_file_path)", "1 and DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing scaling factors\") sequence_number = ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set = ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium", "ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True, TemplateFile=template_file, FindPeaks=False, NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1]) #------------------------------------------------------------------------- # Produce plot for the", "if _determine_config_file(None) is None: plotting_ready = False x = reflectivity.readX(0) y = reflectivity.readY(0)", "options #------------------------------------------------------------------------- # Wavelength below which we don't need the absolute normalization WL_CUTOFF", "if plotting_ready: plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True,", "290] NORMALIZE_TO_UNITY = False # Allowed values: ['DirectBeam', 'WithReference'] NORMALIZATION_TYPE = \"WithReference\" #NORMALIZATION_TYPE", "= ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium = ws.getRun().getProperty(\"incident_medium\").value[0] _fpath = os.path.join(output_dir, \"sf_%s_%s_auto.cfg\" % (first_run_of_set, incident_medium)) sf", "os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\") import mantid from mantid.simpleapi import * try: from sf_calculator import", "dy = reflectivity.readE(0) dx = reflectivity.readDx(0) if int(run_number) - first_run_of_set < 10: for", "D2O REFL1D_PARS = json.dumps(dict(back_sld=6.4, back_roughness=2.7, front_sld=0, layers=[], scale=1.0, background=0.0)) # Quartz #REFL1D_PARS =", "import plot1d, _determine_config_file, publish_plot if _determine_config_file(None) is None: plotting_ready = False x =", "plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False) else:", "os.path.split(event_file_path)[-1] # The legacy format is REF_L_xyz_event.nxs # The new format is REF_L_xyz.nxs.h5", "not defined in the template PRIMARY_FRACTION_RANGE = [5, 290] NORMALIZE_TO_UNITY = False #", "incident_medium)) sf = ScalingFactor(run_list=range(first_run_of_set, first_run_of_set + sequence_number), sort_by_runs=True, tof_step=200, medium=incident_medium, slit_tolerance=0.06, sf_file=_fpath) sf.execute()", "LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir, SlitTolerance=0.06, ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True, TemplateFile=template_file, FindPeaks=False, NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS)", "_determine_config_file(None) is None: plotting_ready = False x = reflectivity.readX(0) y = reflectivity.readY(0) dy", "import json import warnings warnings.simplefilter('ignore') if (\"MANTIDPATH\" in os.environ): del os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\")", "for r in range(0, 10): reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1, first_run_of_set+r) reduced_file_path", "os.path.isfile(reduced_file_path): # Look to see whether submitting the plot is enabled if plotting_ready:", "The new format is REF_L_xyz.nxs.h5 run_number = event_file.split('_')[2] run_number = run_number.replace('.nxs.h5', '') #", "template file # If no template file is available, the automated reduction will", "scipy=%s\" % scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE = False event_file_path=sys.argv[1] output_dir=sys.argv[2] event_file = os.path.split(event_file_path)[-1] # The", "\"DirectBeam\" # Allowed values: dict or \"\" # D2O REFL1D_PARS = json.dumps(dict(back_sld=6.4, back_roughness=2.7,", "'Free Liquid': NORMALIZATION_TYPE = \"WithReference\" else: NORMALIZATION_TYPE = \"DirectBeam\" # Determine whether this", "os.path.join(output_dir, reduced_file_name) if os.path.isfile(reduced_file_path): # Look to see whether submitting the plot is", "if os.path.isfile(\"template.xml\"): template_file = \"template.xml\" elif os.path.isfile(os.path.join(output_dir, \"template.xml\")): template_file = os.path.join(output_dir, \"template.xml\") elif", "LoadEventNexus(Filename=event_file_path) # Check the measurement geometry if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': NORMALIZATION_TYPE =", "or whether we need to compute scaling factors data_type = ws.getRun().getProperty(\"data_type\").value[0] if data_type", "= json.dumps(dict(back_sld=4.09, # back_roughness=4.28, # front_sld=0, # layers=[], # scale=0.9169, # background=3.753e-07)) #-------------------------------------------------------------------------", "postprocessing.publish_plot import plot1d except ImportError: from finddata.publish_plot import plot1d, _determine_config_file, publish_plot if _determine_config_file(None)", "= os.path.join(output_dir, default_file_name) if os.path.isfile(default_file_path): print(\"Loading %s\" % os.path.join(output_dir, default_file_name)) reflectivity = LoadAscii(Filename=os.path.join(output_dir,", "logger.notice(\"Scaling factor calculation upgrade not available: scipy=%s\" % scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE = False event_file_path=sys.argv[1]", "factors\") sequence_number = ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set = ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium = ws.getRun().getProperty(\"incident_medium\").value[0] _fpath = os.path.join(output_dir,", "WL_CUTOFF = 10.0 # Default primary fraction range to be used if it", "default_file_name)) reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit=\"MomentumTransfer\") plotting_ready = True try: from postprocessing.publish_plot import", "import mantid from mantid.simpleapi import * try: from sf_calculator import ScalingFactor DIRECT_BEAM_CALC_AVAILABLE =", "first_run_of_set < 10: for r in range(0, 10): reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set,", "% scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE = False event_file_path=sys.argv[1] output_dir=sys.argv[2] event_file = os.path.split(event_file_path)[-1] # The legacy", "\"\" if os.path.isfile(\"template.xml\"): template_file = \"template.xml\" elif os.path.isfile(os.path.join(output_dir, \"template.xml\")): template_file = os.path.join(output_dir, \"template.xml\")", "plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\") else: plot1d(run_number, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\",", "no template file is available, the automated reduction will generate one template_file =", "= json.dumps(dict(back_sld=6.4, back_roughness=2.7, front_sld=0, layers=[], scale=1.0, background=0.0)) # Quartz #REFL1D_PARS = json.dumps(dict(back_sld=4.09, #", "ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir, SlitTolerance=0.06, ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True, TemplateFile=template_file, FindPeaks=False, NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1]) #-------------------------------------------------------------------------", "= 'REFL_%s_combined_data_auto.txt' % first_run_of_set default_file_path = os.path.join(output_dir, default_file_name) if os.path.isfile(default_file_path): print(\"Loading %s\" %", "True try: from postprocessing.publish_plot import plot1d except ImportError: from finddata.publish_plot import plot1d, _determine_config_file,", "# layers=[], # scale=0.9169, # background=3.753e-07)) #------------------------------------------------------------------------- # Locate the template file #", "y_log=True, show_dx=False) else: plot_div = plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"q (1/A)\",", "is None: plotting_ready = False x = reflectivity.readX(0) y = reflectivity.readY(0) dy =", "import scipy logger.notice(\"Scaling factor calculation upgrade not available: scipy=%s\" % scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE =", "format is REF_L_xyz.nxs.h5 run_number = event_file.split('_')[2] run_number = run_number.replace('.nxs.h5', '') # Reduction options", "ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': NORMALIZATION_TYPE = \"WithReference\" else: NORMALIZATION_TYPE = \"DirectBeam\" # Determine", "scaling factors data_type = ws.getRun().getProperty(\"data_type\").value[0] if data_type == 1 and DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing scaling", "= ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set = ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium = ws.getRun().getProperty(\"incident_medium\").value[0] _fpath = os.path.join(output_dir, \"sf_%s_%s_auto.cfg\" %", "sf = ScalingFactor(run_list=range(first_run_of_set, first_run_of_set + sequence_number), sort_by_runs=True, tof_step=200, medium=incident_medium, slit_tolerance=0.06, sf_file=_fpath) sf.execute() else:", "used if it is not defined in the template PRIMARY_FRACTION_RANGE = [5, 290]", "y_title=\"Reflectivity\", y_log=True, show_dx=False, publish=False) publish_plot('REF_L', first_run_of_set+r, files={'file': plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\") else: plot1d(run_number, [[x, y,", "reflectivity.readDx(0) if int(run_number) - first_run_of_set < 10: for r in range(0, 10): reduced_file_name", "x_title=u\"q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False, publish=False) publish_plot('REF_L', first_run_of_set+r, files={'file': plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\") else:", "generate one template_file = \"\" if os.path.isfile(\"template.xml\"): template_file = \"template.xml\" elif os.path.isfile(os.path.join(output_dir, \"template.xml\")):", "(1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False, publish=False) publish_plot('REF_L', first_run_of_set+r, files={'file': plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\") else: plot1d(run_number,", "sys.path.insert(1,\"/opt/mantidnightly/lib\") import mantid from mantid.simpleapi import * try: from sf_calculator import ScalingFactor DIRECT_BEAM_CALC_AVAILABLE", "this is data or whether we need to compute scaling factors data_type =", "ScalingFactor DIRECT_BEAM_CALC_AVAILABLE = True logger.notice(\"sf_calculator available\") except: import scipy logger.notice(\"Scaling factor calculation upgrade", "run_number = event_file.split('_')[2] run_number = run_number.replace('.nxs.h5', '') # Reduction options #------------------------------------------------------------------------- # Wavelength", "#REFL1D_PARS = json.dumps(dict(back_sld=4.09, # back_roughness=4.28, # front_sld=0, # layers=[], # scale=0.9169, # background=3.753e-07))", "REF_L_xyz_event.nxs # The new format is REF_L_xyz.nxs.h5 run_number = event_file.split('_')[2] run_number = run_number.replace('.nxs.h5',", "compute scaling factors data_type = ws.getRun().getProperty(\"data_type\").value[0] if data_type == 1 and DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing", "Determine whether this is data or whether we need to compute scaling factors", "y_title=\"Reflectivity\", y_log=True, show_dx=False) else: plot_div = plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"q", "10): reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1, first_run_of_set+r) reduced_file_path = os.path.join(output_dir, reduced_file_name) if", "sf_calculator import ScalingFactor DIRECT_BEAM_CALC_AVAILABLE = True logger.notice(\"sf_calculator available\") except: import scipy logger.notice(\"Scaling factor", "if os.path.isfile(reduced_file_path): # Look to see whether submitting the plot is enabled if", "# D2O REFL1D_PARS = json.dumps(dict(back_sld=6.4, back_roughness=2.7, front_sld=0, layers=[], scale=1.0, background=0.0)) # Quartz #REFL1D_PARS", "= ws.getRun().getProperty(\"data_type\").value[0] if data_type == 1 and DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing scaling factors\") sequence_number =", "= \"\" if os.path.isfile(\"template.xml\"): template_file = \"template.xml\" elif os.path.isfile(os.path.join(output_dir, \"template.xml\")): template_file = os.path.join(output_dir,", "x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False, publish=False) publish_plot('REF_L', first_run_of_set+r, files={'file': plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\") else: plot1d(run_number, [[x,", "The legacy format is REF_L_xyz_event.nxs # The new format is REF_L_xyz.nxs.h5 run_number =", "it is not defined in the template PRIMARY_FRACTION_RANGE = [5, 290] NORMALIZE_TO_UNITY =", "tof_step=200, medium=incident_medium, slit_tolerance=0.06, sf_file=_fpath) sf.execute() else: logger.notice(\"Automated reduction\") output = LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY,", "'REFL_%s_combined_data_auto.txt' % first_run_of_set default_file_path = os.path.join(output_dir, default_file_name) if os.path.isfile(default_file_path): print(\"Loading %s\" % os.path.join(output_dir,", "# The new format is REF_L_xyz.nxs.h5 run_number = event_file.split('_')[2] run_number = run_number.replace('.nxs.h5', '')", "need to treat \"\"\" import sys import os import json import warnings warnings.simplefilter('ignore')", "= plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False,", "sequence_number = ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set = ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium = ws.getRun().getProperty(\"incident_medium\").value[0] _fpath = os.path.join(output_dir, \"sf_%s_%s_auto.cfg\"", "= os.path.split(event_file_path)[-1] # The legacy format is REF_L_xyz_event.nxs # The new format is", "except: import scipy logger.notice(\"Scaling factor calculation upgrade not available: scipy=%s\" % scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE", "\"WithReference\" #NORMALIZATION_TYPE = \"DirectBeam\" # Allowed values: dict or \"\" # D2O REFL1D_PARS", "reflectivity.readY(0) dy = reflectivity.readE(0) dx = reflectivity.readDx(0) if int(run_number) - first_run_of_set < 10:", "10.0 # Default primary fraction range to be used if it is not", "(\"MANTIDPATH\" in os.environ): del os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\") import mantid from mantid.simpleapi import *", "#------------------------------------------------------------------------- # Wavelength below which we don't need the absolute normalization WL_CUTOFF =", "Direct beams for scaling factors Type 2: Zero-attenuator direct beams Type 3: Data", "incident_medium = ws.getRun().getProperty(\"incident_medium\").value[0] _fpath = os.path.join(output_dir, \"sf_%s_%s_auto.cfg\" % (first_run_of_set, incident_medium)) sf = ScalingFactor(run_list=range(first_run_of_set,", "# scale=0.9169, # background=3.753e-07)) #------------------------------------------------------------------------- # Locate the template file # If no", "don't need to treat \"\"\" import sys import os import json import warnings", "run_number = run_number.replace('.nxs.h5', '') # Reduction options #------------------------------------------------------------------------- # Wavelength below which we", "# Determine whether this is data or whether we need to compute scaling", "Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1]) #------------------------------------------------------------------------- # Produce plot for the web monitor default_file_name = 'REFL_%s_combined_data_auto.txt'", "scaling factors\") sequence_number = ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set = ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium = ws.getRun().getProperty(\"incident_medium\").value[0] _fpath =", "be used if it is not defined in the template PRIMARY_FRACTION_RANGE = [5,", "os.path.isfile(os.path.join(output_dir, \"template.xml\")): template_file = os.path.join(output_dir, \"template.xml\") elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file = \"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using template:", "= \"template.xml\" elif os.path.isfile(os.path.join(output_dir, \"template.xml\")): template_file = os.path.join(output_dir, \"template.xml\") elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file =", "calculation upgrade not available: scipy=%s\" % scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE = False event_file_path=sys.argv[1] output_dir=sys.argv[2] event_file", "% os.path.join(output_dir, default_file_name)) reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit=\"MomentumTransfer\") plotting_ready = True try: from", "factor calculation upgrade not available: scipy=%s\" % scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE = False event_file_path=sys.argv[1] output_dir=sys.argv[2]", "elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file = \"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using template: %s\" % template_file) # Run the", "for the web monitor default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set default_file_path = os.path.join(output_dir, default_file_name)", "#------------------------------------------------------------------------- # Locate the template file # If no template file is available,", "from postprocessing.publish_plot import plot1d except ImportError: from finddata.publish_plot import plot1d, _determine_config_file, publish_plot if", "automated reduction will generate one template_file = \"\" if os.path.isfile(\"template.xml\"): template_file = \"template.xml\"", "that we don't need to treat \"\"\" import sys import os import json", "Wavelength below which we don't need the absolute normalization WL_CUTOFF = 10.0 #", "[[x, y, dy, dx]], instrument='REF_L', x_title=u\"q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False, publish=False) publish_plot('REF_L',", "sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\") import mantid from mantid.simpleapi import * try: from sf_calculator import ScalingFactor", "= run_number.replace('.nxs.h5', '') # Reduction options #------------------------------------------------------------------------- # Wavelength below which we don't", "% (first_run_of_set, r+1, first_run_of_set+r) reduced_file_path = os.path.join(output_dir, reduced_file_name) if os.path.isfile(reduced_file_path): # Look to", "to treat \"\"\" import sys import os import json import warnings warnings.simplefilter('ignore') if", "os.path.join(output_dir, \"sf_%s_%s_auto.cfg\" % (first_run_of_set, incident_medium)) sf = ScalingFactor(run_list=range(first_run_of_set, first_run_of_set + sequence_number), sort_by_runs=True, tof_step=200,", "PRIMARY_FRACTION_RANGE = [5, 290] NORMALIZE_TO_UNITY = False # Allowed values: ['DirectBeam', 'WithReference'] NORMALIZATION_TYPE", "we need to compute scaling factors data_type = ws.getRun().getProperty(\"data_type\").value[0] if data_type == 1", "is data or whether we need to compute scaling factors data_type = ws.getRun().getProperty(\"data_type\").value[0]", "factors data_type = ws.getRun().getProperty(\"data_type\").value[0] if data_type == 1 and DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing scaling factors\")", "in os.environ): del os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\") import mantid from mantid.simpleapi import * try:", "1: Direct beams for scaling factors Type 2: Zero-attenuator direct beams Type 3:", "the measurement geometry if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': NORMALIZATION_TYPE = \"WithReference\" else: NORMALIZATION_TYPE", "the plot is enabled if plotting_ready: plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q", "# background=3.753e-07)) #------------------------------------------------------------------------- # Locate the template file # If no template file", "background=3.753e-07)) #------------------------------------------------------------------------- # Locate the template file # If no template file is", "dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False) else: plot_div = plot1d(first_run_of_set+r,", "If no template file is available, the automated reduction will generate one template_file", "to be used if it is not defined in the template PRIMARY_FRACTION_RANGE =", "y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False) else: plot_div =", "# Wavelength below which we don't need the absolute normalization WL_CUTOFF = 10.0", "back_roughness=2.7, front_sld=0, layers=[], scale=1.0, background=0.0)) # Quartz #REFL1D_PARS = json.dumps(dict(back_sld=4.09, # back_roughness=4.28, #", "file is available, the automated reduction will generate one template_file = \"\" if", "Zero-attenuator direct beams Type 3: Data that we don't need to treat \"\"\"", "3: Data that we don't need to treat \"\"\" import sys import os", "data_type = ws.getRun().getProperty(\"data_type\").value[0] if data_type == 1 and DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing scaling factors\") sequence_number", "= 10.0 # Default primary fraction range to be used if it is", "# If no template file is available, the automated reduction will generate one", "# Allowed values: dict or \"\" # D2O REFL1D_PARS = json.dumps(dict(back_sld=6.4, back_roughness=2.7, front_sld=0,", "Check the measurement geometry if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': NORMALIZATION_TYPE = \"WithReference\" else:", "= \"DirectBeam\" # Allowed values: dict or \"\" # D2O REFL1D_PARS = json.dumps(dict(back_sld=6.4,", "finddata.publish_plot import plot1d, _determine_config_file, publish_plot if _determine_config_file(None) is None: plotting_ready = False x", "json import warnings warnings.simplefilter('ignore') if (\"MANTIDPATH\" in os.environ): del os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\") import", "OutputDirectory=output_dir, SlitTolerance=0.06, ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True, TemplateFile=template_file, FindPeaks=False, NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1]) #------------------------------------------------------------------------- # Produce plot", "REFL1D_PARS = json.dumps(dict(back_sld=6.4, back_roughness=2.7, front_sld=0, layers=[], scale=1.0, background=0.0)) # Quartz #REFL1D_PARS = json.dumps(dict(back_sld=4.09,", "# The legacy format is REF_L_xyz_event.nxs # The new format is REF_L_xyz.nxs.h5 run_number", "# Run the auto-reduction ws = LoadEventNexus(Filename=event_file_path) # Check the measurement geometry if", "== 1 and DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing scaling factors\") sequence_number = ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set = ws.getRun().getProperty(\"sequence_id\").value[0]", "scale=1.0, background=0.0)) # Quartz #REFL1D_PARS = json.dumps(dict(back_sld=4.09, # back_roughness=4.28, # front_sld=0, # layers=[],", "os import json import warnings warnings.simplefilter('ignore') if (\"MANTIDPATH\" in os.environ): del os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\")", "if os.path.isfile(default_file_path): print(\"Loading %s\" % os.path.join(output_dir, default_file_name)) reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit=\"MomentumTransfer\") plotting_ready", "Liquid': NORMALIZATION_TYPE = \"WithReference\" else: NORMALIZATION_TYPE = \"DirectBeam\" # Determine whether this is", "sort_by_runs=True, tof_step=200, medium=incident_medium, slit_tolerance=0.06, sf_file=_fpath) sf.execute() else: logger.notice(\"Automated reduction\") output = LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws,", "format is REF_L_xyz_event.nxs # The new format is REF_L_xyz.nxs.h5 run_number = event_file.split('_')[2] run_number", "instrument='REF_L', x_title=u\"q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False, publish=False) publish_plot('REF_L', first_run_of_set+r, files={'file': plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\")", "will generate one template_file = \"\" if os.path.isfile(\"template.xml\"): template_file = \"template.xml\" elif os.path.isfile(os.path.join(output_dir,", "else: plot_div = plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"q (1/A)\", x_log=True, y_title=\"Reflectivity\",", "one template_file = \"\" if os.path.isfile(\"template.xml\"): template_file = \"template.xml\" elif os.path.isfile(os.path.join(output_dir, \"template.xml\")): template_file", "which we don't need the absolute normalization WL_CUTOFF = 10.0 # Default primary", "logger.notice(\"sf_calculator available\") except: import scipy logger.notice(\"Scaling factor calculation upgrade not available: scipy=%s\" %", "plot for the web monitor default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set default_file_path = os.path.join(output_dir,", "# Look to see whether submitting the plot is enabled if plotting_ready: plot1d(first_run_of_set+r,", "need the absolute normalization WL_CUTOFF = 10.0 # Default primary fraction range to", "= True logger.notice(\"sf_calculator available\") except: import scipy logger.notice(\"Scaling factor calculation upgrade not available:", "instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False) else: plot_div = plot1d(first_run_of_set+r, [[x, y,", "Type 0: Normal sample data Type 1: Direct beams for scaling factors Type", "Allowed values: ['DirectBeam', 'WithReference'] NORMALIZATION_TYPE = \"WithReference\" #NORMALIZATION_TYPE = \"DirectBeam\" # Allowed values:", "= \"DirectBeam\" # Determine whether this is data or whether we need to", "available, the automated reduction will generate one template_file = \"\" if os.path.isfile(\"template.xml\"): template_file", "NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1]) #------------------------------------------------------------------------- # Produce plot for the web monitor default_file_name =", "# Reduction options #------------------------------------------------------------------------- # Wavelength below which we don't need the absolute", "NORMALIZATION_TYPE = \"WithReference\" else: NORMALIZATION_TYPE = \"DirectBeam\" # Determine whether this is data", "os.path.join(output_dir, default_file_name) if os.path.isfile(default_file_path): print(\"Loading %s\" % os.path.join(output_dir, default_file_name)) reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name),", "the web monitor default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set default_file_path = os.path.join(output_dir, default_file_name) if", "the template file # If no template file is available, the automated reduction", "the automated reduction will generate one template_file = \"\" if os.path.isfile(\"template.xml\"): template_file =", "don't need the absolute normalization WL_CUTOFF = 10.0 # Default primary fraction range", "scipy logger.notice(\"Scaling factor calculation upgrade not available: scipy=%s\" % scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE = False", "reflectivity.readE(0) dx = reflectivity.readDx(0) if int(run_number) - first_run_of_set < 10: for r in", "\"\" # D2O REFL1D_PARS = json.dumps(dict(back_sld=6.4, back_roughness=2.7, front_sld=0, layers=[], scale=1.0, background=0.0)) # Quartz", "the absolute normalization WL_CUTOFF = 10.0 # Default primary fraction range to be", "values: dict or \"\" # D2O REFL1D_PARS = json.dumps(dict(back_sld=6.4, back_roughness=2.7, front_sld=0, layers=[], scale=1.0,", "first_run_of_set + sequence_number), sort_by_runs=True, tof_step=200, medium=incident_medium, slit_tolerance=0.06, sf_file=_fpath) sf.execute() else: logger.notice(\"Automated reduction\") output", "LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit=\"MomentumTransfer\") plotting_ready = True try: from postprocessing.publish_plot import plot1d except ImportError:", "Type 2: Zero-attenuator direct beams Type 3: Data that we don't need to", "= ScalingFactor(run_list=range(first_run_of_set, first_run_of_set + sequence_number), sort_by_runs=True, tof_step=200, medium=incident_medium, slit_tolerance=0.06, sf_file=_fpath) sf.execute() else: logger.notice(\"Automated", "NORMALIZATION_TYPE = \"DirectBeam\" # Determine whether this is data or whether we need", "template_file = \"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using template: %s\" % template_file) # Run the auto-reduction ws", "del os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\") import mantid from mantid.simpleapi import * try: from sf_calculator", "= os.path.join(output_dir, reduced_file_name) if os.path.isfile(reduced_file_path): # Look to see whether submitting the plot", "the Liquids Reflectometer For reference: Type 0: Normal sample data Type 1: Direct", "+ sequence_number), sort_by_runs=True, tof_step=200, medium=incident_medium, slit_tolerance=0.06, sf_file=_fpath) sf.execute() else: logger.notice(\"Automated reduction\") output =", "Auto-reduction script for the Liquids Reflectometer For reference: Type 0: Normal sample data", "template PRIMARY_FRACTION_RANGE = [5, 290] NORMALIZE_TO_UNITY = False # Allowed values: ['DirectBeam', 'WithReference']", "the auto-reduction ws = LoadEventNexus(Filename=event_file_path) # Check the measurement geometry if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] ==", "range(0, 10): reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1, first_run_of_set+r) reduced_file_path = os.path.join(output_dir, reduced_file_name)", "0: Normal sample data Type 1: Direct beams for scaling factors Type 2:", "from mantid.simpleapi import * try: from sf_calculator import ScalingFactor DIRECT_BEAM_CALC_AVAILABLE = True logger.notice(\"sf_calculator", "data_type == 1 and DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing scaling factors\") sequence_number = ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set =", "Reflectometer For reference: Type 0: Normal sample data Type 1: Direct beams for", "ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set = ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium = ws.getRun().getProperty(\"incident_medium\").value[0] _fpath = os.path.join(output_dir, \"sf_%s_%s_auto.cfg\" % (first_run_of_set,", "= reflectivity.readDx(0) if int(run_number) - first_run_of_set < 10: for r in range(0, 10):", "monitor default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set default_file_path = os.path.join(output_dir, default_file_name) if os.path.isfile(default_file_path): print(\"Loading", "= event_file.split('_')[2] run_number = run_number.replace('.nxs.h5', '') # Reduction options #------------------------------------------------------------------------- # Wavelength below", "NORMALIZE_TO_UNITY = False # Allowed values: ['DirectBeam', 'WithReference'] NORMALIZATION_TYPE = \"WithReference\" #NORMALIZATION_TYPE =", "\"template.xml\" elif os.path.isfile(os.path.join(output_dir, \"template.xml\")): template_file = os.path.join(output_dir, \"template.xml\") elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file = \"/SNS/REF_L/shared/autoreduce/template.xml\"", "Look to see whether submitting the plot is enabled if plotting_ready: plot1d(first_run_of_set+r, [[x,", "# Produce plot for the web monitor default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set default_file_path", "beams Type 3: Data that we don't need to treat \"\"\" import sys", "plotting_ready: plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False)", "dx = reflectivity.readDx(0) if int(run_number) - first_run_of_set < 10: for r in range(0,", "False event_file_path=sys.argv[1] output_dir=sys.argv[2] event_file = os.path.split(event_file_path)[-1] # The legacy format is REF_L_xyz_event.nxs #", "% template_file) # Run the auto-reduction ws = LoadEventNexus(Filename=event_file_path) # Check the measurement", "#------------------------------------------------------------------------- # Produce plot for the web monitor default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set", "plotting_ready = True try: from postprocessing.publish_plot import plot1d except ImportError: from finddata.publish_plot import", "= LoadEventNexus(Filename=event_file_path) # Check the measurement geometry if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': NORMALIZATION_TYPE", "< 10: for r in range(0, 10): reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1,", "absolute normalization WL_CUTOFF = 10.0 # Default primary fraction range to be used", "direct beams Type 3: Data that we don't need to treat \"\"\" import", "the template PRIMARY_FRACTION_RANGE = [5, 290] NORMALIZE_TO_UNITY = False # Allowed values: ['DirectBeam',", "= False # Allowed values: ['DirectBeam', 'WithReference'] NORMALIZATION_TYPE = \"WithReference\" #NORMALIZATION_TYPE = \"DirectBeam\"", "'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1, first_run_of_set+r) reduced_file_path = os.path.join(output_dir, reduced_file_name) if os.path.isfile(reduced_file_path): # Look", "r in range(0, 10): reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1, first_run_of_set+r) reduced_file_path =", "Default primary fraction range to be used if it is not defined in", "None: plotting_ready = False x = reflectivity.readX(0) y = reflectivity.readY(0) dy = reflectivity.readE(0)", "\"DirectBeam\" # Determine whether this is data or whether we need to compute", "reduced_file_path = os.path.join(output_dir, reduced_file_name) if os.path.isfile(reduced_file_path): # Look to see whether submitting the", "\"sf_%s_%s_auto.cfg\" % (first_run_of_set, incident_medium)) sf = ScalingFactor(run_list=range(first_run_of_set, first_run_of_set + sequence_number), sort_by_runs=True, tof_step=200, medium=incident_medium,", "submitting the plot is enabled if plotting_ready: plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L',", "Produce plot for the web monitor default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set default_file_path =", "logger.notice(\"Automated reduction\") output = LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir, SlitTolerance=0.06, ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True,", "web monitor default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set default_file_path = os.path.join(output_dir, default_file_name) if os.path.isfile(default_file_path):", "int(run_number) - first_run_of_set < 10: for r in range(0, 10): reduced_file_name = 'REFL_%s_%s_%s_auto.nxs'", "== 'Free Liquid': NORMALIZATION_TYPE = \"WithReference\" else: NORMALIZATION_TYPE = \"DirectBeam\" # Determine whether", "2: Zero-attenuator direct beams Type 3: Data that we don't need to treat", "True logger.notice(\"sf_calculator available\") except: import scipy logger.notice(\"Scaling factor calculation upgrade not available: scipy=%s\"", "output_dir=sys.argv[2] event_file = os.path.split(event_file_path)[-1] # The legacy format is REF_L_xyz_event.nxs # The new", "publish_plot('REF_L', first_run_of_set+r, files={'file': plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\") else: plot1d(run_number, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q", "TemplateFile=template_file, FindPeaks=False, NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1]) #------------------------------------------------------------------------- # Produce plot for the web monitor", "OrderDirectBeamsByRunNumber=True, TemplateFile=template_file, FindPeaks=False, NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1]) #------------------------------------------------------------------------- # Produce plot for the web", "is REF_L_xyz.nxs.h5 run_number = event_file.split('_')[2] run_number = run_number.replace('.nxs.h5', '') # Reduction options #-------------------------------------------------------------------------", "import warnings warnings.simplefilter('ignore') if (\"MANTIDPATH\" in os.environ): del os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\") import mantid", "= os.path.join(output_dir, \"template.xml\") elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file = \"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using template: %s\" % template_file)", "files={'file': plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\") else: plot1d(run_number, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True,", "if it is not defined in the template PRIMARY_FRACTION_RANGE = [5, 290] NORMALIZE_TO_UNITY", "# Locate the template file # If no template file is available, the", "treat \"\"\" import sys import os import json import warnings warnings.simplefilter('ignore') if (\"MANTIDPATH\"", "is enabled if plotting_ready: plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True,", "to see whether submitting the plot is enabled if plotting_ready: plot1d(first_run_of_set+r, [[x, y,", "we don't need the absolute normalization WL_CUTOFF = 10.0 # Default primary fraction", "factors Type 2: Zero-attenuator direct beams Type 3: Data that we don't need", "config=\"/SNS/REF_L/shared/.livedata.conf\") else: plot1d(run_number, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True,", "first_run_of_set=int(output[1]) #------------------------------------------------------------------------- # Produce plot for the web monitor default_file_name = 'REFL_%s_combined_data_auto.txt' %", "to compute scaling factors data_type = ws.getRun().getProperty(\"data_type\").value[0] if data_type == 1 and DIRECT_BEAM_CALC_AVAILABLE:", "sys import os import json import warnings warnings.simplefilter('ignore') if (\"MANTIDPATH\" in os.environ): del", "ws.getRun().getProperty(\"data_type\").value[0] if data_type == 1 and DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing scaling factors\") sequence_number = ws.getRun().getProperty(\"sequence_number\").value[0]", "Liquids Reflectometer For reference: Type 0: Normal sample data Type 1: Direct beams", "os.path.isfile(\"template.xml\"): template_file = \"template.xml\" elif os.path.isfile(os.path.join(output_dir, \"template.xml\")): template_file = os.path.join(output_dir, \"template.xml\") elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"):", "import sys import os import json import warnings warnings.simplefilter('ignore') if (\"MANTIDPATH\" in os.environ):", "= LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir, SlitTolerance=0.06, ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True, TemplateFile=template_file, FindPeaks=False, NormalizationType=NORMALIZATION_TYPE,", "os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file = \"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using template: %s\" % template_file) # Run the auto-reduction", "json.dumps(dict(back_sld=4.09, # back_roughness=4.28, # front_sld=0, # layers=[], # scale=0.9169, # background=3.753e-07)) #------------------------------------------------------------------------- #", "PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir, SlitTolerance=0.06, ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True, TemplateFile=template_file, FindPeaks=False, NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1]) #------------------------------------------------------------------------- # Produce", "% (first_run_of_set, incident_medium)) sf = ScalingFactor(run_list=range(first_run_of_set, first_run_of_set + sequence_number), sort_by_runs=True, tof_step=200, medium=incident_medium, slit_tolerance=0.06,", "reduction will generate one template_file = \"\" if os.path.isfile(\"template.xml\"): template_file = \"template.xml\" elif", "if data_type == 1 and DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing scaling factors\") sequence_number = ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set", "DIRECT_BEAM_CALC_AVAILABLE: logger.notice(\"Computing scaling factors\") sequence_number = ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set = ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium = ws.getRun().getProperty(\"incident_medium\").value[0]", "InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir, SlitTolerance=0.06, ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True, TemplateFile=template_file, FindPeaks=False, NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1])", "Unit=\"MomentumTransfer\") plotting_ready = True try: from postprocessing.publish_plot import plot1d except ImportError: from finddata.publish_plot", "os.environ): del os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\") import mantid from mantid.simpleapi import * try: from", "= ws.getRun().getProperty(\"incident_medium\").value[0] _fpath = os.path.join(output_dir, \"sf_%s_%s_auto.cfg\" % (first_run_of_set, incident_medium)) sf = ScalingFactor(run_list=range(first_run_of_set, first_run_of_set", "\"\"\" import sys import os import json import warnings warnings.simplefilter('ignore') if (\"MANTIDPATH\" in", "['DirectBeam', 'WithReference'] NORMALIZATION_TYPE = \"WithReference\" #NORMALIZATION_TYPE = \"DirectBeam\" # Allowed values: dict or", "range to be used if it is not defined in the template PRIMARY_FRACTION_RANGE", "ScalingFactor(run_list=range(first_run_of_set, first_run_of_set + sequence_number), sort_by_runs=True, tof_step=200, medium=incident_medium, slit_tolerance=0.06, sf_file=_fpath) sf.execute() else: logger.notice(\"Automated reduction\")", "warnings warnings.simplefilter('ignore') if (\"MANTIDPATH\" in os.environ): del os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\") import mantid from", "mantid.simpleapi import * try: from sf_calculator import ScalingFactor DIRECT_BEAM_CALC_AVAILABLE = True logger.notice(\"sf_calculator available\")", "plot_div = plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True,", "if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': NORMALIZATION_TYPE = \"WithReference\" else: NORMALIZATION_TYPE = \"DirectBeam\" #", "front_sld=0, # layers=[], # scale=0.9169, # background=3.753e-07)) #------------------------------------------------------------------------- # Locate the template file", "x = reflectivity.readX(0) y = reflectivity.readY(0) dy = reflectivity.readE(0) dx = reflectivity.readDx(0) if", "from finddata.publish_plot import plot1d, _determine_config_file, publish_plot if _determine_config_file(None) is None: plotting_ready = False", "beams for scaling factors Type 2: Zero-attenuator direct beams Type 3: Data that", "Normal sample data Type 1: Direct beams for scaling factors Type 2: Zero-attenuator", "= False x = reflectivity.readX(0) y = reflectivity.readY(0) dy = reflectivity.readE(0) dx =", "# front_sld=0, # layers=[], # scale=0.9169, # background=3.753e-07)) #------------------------------------------------------------------------- # Locate the template", "scale=0.9169, # background=3.753e-07)) #------------------------------------------------------------------------- # Locate the template file # If no template", "(first_run_of_set, r+1, first_run_of_set+r) reduced_file_path = os.path.join(output_dir, reduced_file_name) if os.path.isfile(reduced_file_path): # Look to see", "= False event_file_path=sys.argv[1] output_dir=sys.argv[2] event_file = os.path.split(event_file_path)[-1] # The legacy format is REF_L_xyz_event.nxs", "event_file_path=sys.argv[1] output_dir=sys.argv[2] event_file = os.path.split(event_file_path)[-1] # The legacy format is REF_L_xyz_event.nxs # The", "warnings.simplefilter('ignore') if (\"MANTIDPATH\" in os.environ): del os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\") import mantid from mantid.simpleapi", "sf.execute() else: logger.notice(\"Automated reduction\") output = LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir, SlitTolerance=0.06,", "FindPeaks=False, NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1]) #------------------------------------------------------------------------- # Produce plot for the web monitor default_file_name", "upgrade not available: scipy=%s\" % scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE = False event_file_path=sys.argv[1] output_dir=sys.argv[2] event_file =", "reduction\") output = LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir, SlitTolerance=0.06, ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True, TemplateFile=template_file,", "template_file = os.path.join(output_dir, \"template.xml\") elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file = \"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using template: %s\" %", "template_file) # Run the auto-reduction ws = LoadEventNexus(Filename=event_file_path) # Check the measurement geometry", "<gh_stars>0 \"\"\" Auto-reduction script for the Liquids Reflectometer For reference: Type 0: Normal", "back_roughness=4.28, # front_sld=0, # layers=[], # scale=0.9169, # background=3.753e-07)) #------------------------------------------------------------------------- # Locate the", "or \"\" # D2O REFL1D_PARS = json.dumps(dict(back_sld=6.4, back_roughness=2.7, front_sld=0, layers=[], scale=1.0, background=0.0)) #", "else: plot1d(run_number, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False)", "= \"WithReference\" else: NORMALIZATION_TYPE = \"DirectBeam\" # Determine whether this is data or", "json.dumps(dict(back_sld=6.4, back_roughness=2.7, front_sld=0, layers=[], scale=1.0, background=0.0)) # Quartz #REFL1D_PARS = json.dumps(dict(back_sld=4.09, # back_roughness=4.28,", "default_file_name) if os.path.isfile(default_file_path): print(\"Loading %s\" % os.path.join(output_dir, default_file_name)) reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit=\"MomentumTransfer\")", "DIRECT_BEAM_CALC_AVAILABLE = False event_file_path=sys.argv[1] output_dir=sys.argv[2] event_file = os.path.split(event_file_path)[-1] # The legacy format is", "'WithReference'] NORMALIZATION_TYPE = \"WithReference\" #NORMALIZATION_TYPE = \"DirectBeam\" # Allowed values: dict or \"\"", "file # If no template file is available, the automated reduction will generate", "first_run_of_set default_file_path = os.path.join(output_dir, default_file_name) if os.path.isfile(default_file_path): print(\"Loading %s\" % os.path.join(output_dir, default_file_name)) reflectivity", "- first_run_of_set < 10: for r in range(0, 10): reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' %", "_fpath = os.path.join(output_dir, \"sf_%s_%s_auto.cfg\" % (first_run_of_set, incident_medium)) sf = ScalingFactor(run_list=range(first_run_of_set, first_run_of_set + sequence_number),", "(first_run_of_set, incident_medium)) sf = ScalingFactor(run_list=range(first_run_of_set, first_run_of_set + sequence_number), sort_by_runs=True, tof_step=200, medium=incident_medium, slit_tolerance=0.06, sf_file=_fpath)", "False # Allowed values: ['DirectBeam', 'WithReference'] NORMALIZATION_TYPE = \"WithReference\" #NORMALIZATION_TYPE = \"DirectBeam\" #", "= [5, 290] NORMALIZE_TO_UNITY = False # Allowed values: ['DirectBeam', 'WithReference'] NORMALIZATION_TYPE =", "x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False) else: plot_div = plot1d(first_run_of_set+r, [[x, y, dy,", "reference: Type 0: Normal sample data Type 1: Direct beams for scaling factors", "ws.getRun().getProperty(\"incident_medium\").value[0] _fpath = os.path.join(output_dir, \"sf_%s_%s_auto.cfg\" % (first_run_of_set, incident_medium)) sf = ScalingFactor(run_list=range(first_run_of_set, first_run_of_set +", "output = LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir, SlitTolerance=0.06, ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True, TemplateFile=template_file, FindPeaks=False,", "legacy format is REF_L_xyz_event.nxs # The new format is REF_L_xyz.nxs.h5 run_number = event_file.split('_')[2]", "template: %s\" % template_file) # Run the auto-reduction ws = LoadEventNexus(Filename=event_file_path) # Check", "_determine_config_file, publish_plot if _determine_config_file(None) is None: plotting_ready = False x = reflectivity.readX(0) y", "first_run_of_set+r, files={'file': plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\") else: plot1d(run_number, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\",", "import plot1d except ImportError: from finddata.publish_plot import plot1d, _determine_config_file, publish_plot if _determine_config_file(None) is", "For reference: Type 0: Normal sample data Type 1: Direct beams for scaling", "\"template.xml\") elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file = \"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using template: %s\" % template_file) # Run", "# back_roughness=4.28, # front_sld=0, # layers=[], # scale=0.9169, # background=3.753e-07)) #------------------------------------------------------------------------- # Locate", "y_log=True, show_dx=False, publish=False) publish_plot('REF_L', first_run_of_set+r, files={'file': plot_div}, config=\"/SNS/REF_L/shared/.livedata.conf\") else: plot1d(run_number, [[x, y, dy,", "see whether submitting the plot is enabled if plotting_ready: plot1d(first_run_of_set+r, [[x, y, dy,", "fraction range to be used if it is not defined in the template", "print(\"Loading %s\" % os.path.join(output_dir, default_file_name)) reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit=\"MomentumTransfer\") plotting_ready = True", "False x = reflectivity.readX(0) y = reflectivity.readY(0) dy = reflectivity.readE(0) dx = reflectivity.readDx(0)", "try: from sf_calculator import ScalingFactor DIRECT_BEAM_CALC_AVAILABLE = True logger.notice(\"sf_calculator available\") except: import scipy", "medium=incident_medium, slit_tolerance=0.06, sf_file=_fpath) sf.execute() else: logger.notice(\"Automated reduction\") output = LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF,", "background=0.0)) # Quartz #REFL1D_PARS = json.dumps(dict(back_sld=4.09, # back_roughness=4.28, # front_sld=0, # layers=[], #", "if (\"MANTIDPATH\" in os.environ): del os.environ[\"MANTIDPATH\"] sys.path.insert(0,\"/opt/mantidnightly/bin\") sys.path.insert(1,\"/opt/mantidnightly/lib\") import mantid from mantid.simpleapi import", "= reflectivity.readE(0) dx = reflectivity.readDx(0) if int(run_number) - first_run_of_set < 10: for r", "layers=[], scale=1.0, background=0.0)) # Quartz #REFL1D_PARS = json.dumps(dict(back_sld=4.09, # back_roughness=4.28, # front_sld=0, #", "= reflectivity.readX(0) y = reflectivity.readY(0) dy = reflectivity.readE(0) dx = reflectivity.readDx(0) if int(run_number)", "whether this is data or whether we need to compute scaling factors data_type", "Allowed values: dict or \"\" # D2O REFL1D_PARS = json.dumps(dict(back_sld=6.4, back_roughness=2.7, front_sld=0, layers=[],", "front_sld=0, layers=[], scale=1.0, background=0.0)) # Quartz #REFL1D_PARS = json.dumps(dict(back_sld=4.09, # back_roughness=4.28, # front_sld=0,", "plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False, publish=False)", "available: scipy=%s\" % scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE = False event_file_path=sys.argv[1] output_dir=sys.argv[2] event_file = os.path.split(event_file_path)[-1] #", "defined in the template PRIMARY_FRACTION_RANGE = [5, 290] NORMALIZE_TO_UNITY = False # Allowed", "not available: scipy=%s\" % scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE = False event_file_path=sys.argv[1] output_dir=sys.argv[2] event_file = os.path.split(event_file_path)[-1]", "dict or \"\" # D2O REFL1D_PARS = json.dumps(dict(back_sld=6.4, back_roughness=2.7, front_sld=0, layers=[], scale=1.0, background=0.0))", "publish_plot if _determine_config_file(None) is None: plotting_ready = False x = reflectivity.readX(0) y =", "for scaling factors Type 2: Zero-attenuator direct beams Type 3: Data that we", "new format is REF_L_xyz.nxs.h5 run_number = event_file.split('_')[2] run_number = run_number.replace('.nxs.h5', '') # Reduction", "is available, the automated reduction will generate one template_file = \"\" if os.path.isfile(\"template.xml\"):", "#NORMALIZATION_TYPE = \"DirectBeam\" # Allowed values: dict or \"\" # D2O REFL1D_PARS =", "[5, 290] NORMALIZE_TO_UNITY = False # Allowed values: ['DirectBeam', 'WithReference'] NORMALIZATION_TYPE = \"WithReference\"", "= True try: from postprocessing.publish_plot import plot1d except ImportError: from finddata.publish_plot import plot1d,", "plotting_ready = False x = reflectivity.readX(0) y = reflectivity.readY(0) dy = reflectivity.readE(0) dx", "SlitTolerance=0.06, ReadSequenceFromFile=True, OrderDirectBeamsByRunNumber=True, TemplateFile=template_file, FindPeaks=False, NormalizationType=NORMALIZATION_TYPE, Refl1DModelParameters=REFL1D_PARS) first_run_of_set=int(output[1]) #------------------------------------------------------------------------- # Produce plot for", "logger.notice(\"Computing scaling factors\") sequence_number = ws.getRun().getProperty(\"sequence_number\").value[0] first_run_of_set = ws.getRun().getProperty(\"sequence_id\").value[0] incident_medium = ws.getRun().getProperty(\"incident_medium\").value[0] _fpath", "template_file = \"template.xml\" elif os.path.isfile(os.path.join(output_dir, \"template.xml\")): template_file = os.path.join(output_dir, \"template.xml\") elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file", "data Type 1: Direct beams for scaling factors Type 2: Zero-attenuator direct beams", "ws = LoadEventNexus(Filename=event_file_path) # Check the measurement geometry if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid':", "values: ['DirectBeam', 'WithReference'] NORMALIZATION_TYPE = \"WithReference\" #NORMALIZATION_TYPE = \"DirectBeam\" # Allowed values: dict", "(1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False) else: plot_div = plot1d(first_run_of_set+r, [[x, y, dy, dx]],", "* try: from sf_calculator import ScalingFactor DIRECT_BEAM_CALC_AVAILABLE = True logger.notice(\"sf_calculator available\") except: import", "sf_file=_fpath) sf.execute() else: logger.notice(\"Automated reduction\") output = LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE, OutputDirectory=output_dir,", "if int(run_number) - first_run_of_set < 10: for r in range(0, 10): reduced_file_name =", "mantid from mantid.simpleapi import * try: from sf_calculator import ScalingFactor DIRECT_BEAM_CALC_AVAILABLE = True", "enabled if plotting_ready: plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\",", "y = reflectivity.readY(0) dy = reflectivity.readE(0) dx = reflectivity.readDx(0) if int(run_number) - first_run_of_set", "Locate the template file # If no template file is available, the automated", "# Allowed values: ['DirectBeam', 'WithReference'] NORMALIZATION_TYPE = \"WithReference\" #NORMALIZATION_TYPE = \"DirectBeam\" # Allowed", "first_run_of_set+r) reduced_file_path = os.path.join(output_dir, reduced_file_name) if os.path.isfile(reduced_file_path): # Look to see whether submitting", "dy, dx]], instrument='REF_L', x_title=u\"q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False, publish=False) publish_plot('REF_L', first_run_of_set+r, files={'file':", "ImportError: from finddata.publish_plot import plot1d, _determine_config_file, publish_plot if _determine_config_file(None) is None: plotting_ready =", "default_file_path = os.path.join(output_dir, default_file_name) if os.path.isfile(default_file_path): print(\"Loading %s\" % os.path.join(output_dir, default_file_name)) reflectivity =", "reduced_file_name) if os.path.isfile(reduced_file_path): # Look to see whether submitting the plot is enabled", "default_file_name), Unit=\"MomentumTransfer\") plotting_ready = True try: from postprocessing.publish_plot import plot1d except ImportError: from", "dx]], instrument='REF_L', x_title=u\"q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False, publish=False) publish_plot('REF_L', first_run_of_set+r, files={'file': plot_div},", "slit_tolerance=0.06, sf_file=_fpath) sf.execute() else: logger.notice(\"Automated reduction\") output = LRAutoReduction(#Filename=event_file_path, InputWorkspace=ws, ScaleToUnity=NORMALIZE_TO_UNITY, ScalingWavelengthCutoff=WL_CUTOFF, PrimaryFractionRange=PRIMARY_FRACTION_RANGE,", "# Check the measurement geometry if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': NORMALIZATION_TYPE = \"WithReference\"", "= 'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1, first_run_of_set+r) reduced_file_path = os.path.join(output_dir, reduced_file_name) if os.path.isfile(reduced_file_path): #", "primary fraction range to be used if it is not defined in the", "import * try: from sf_calculator import ScalingFactor DIRECT_BEAM_CALC_AVAILABLE = True logger.notice(\"sf_calculator available\") except:", "event_file.split('_')[2] run_number = run_number.replace('.nxs.h5', '') # Reduction options #------------------------------------------------------------------------- # Wavelength below which", "reflectivity.readX(0) y = reflectivity.readY(0) dy = reflectivity.readE(0) dx = reflectivity.readDx(0) if int(run_number) -", "whether submitting the plot is enabled if plotting_ready: plot1d(first_run_of_set+r, [[x, y, dy, dx]],", "Data that we don't need to treat \"\"\" import sys import os import", "os.path.isfile(default_file_path): print(\"Loading %s\" % os.path.join(output_dir, default_file_name)) reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit=\"MomentumTransfer\") plotting_ready =", "plot is enabled if plotting_ready: plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\",", "template_file = \"\" if os.path.isfile(\"template.xml\"): template_file = \"template.xml\" elif os.path.isfile(os.path.join(output_dir, \"template.xml\")): template_file =", "available\") except: import scipy logger.notice(\"Scaling factor calculation upgrade not available: scipy=%s\" % scipy.__version__)", "%s\" % template_file) # Run the auto-reduction ws = LoadEventNexus(Filename=event_file_path) # Check the", "auto-reduction ws = LoadEventNexus(Filename=event_file_path) # Check the measurement geometry if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free", "measurement geometry if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': NORMALIZATION_TYPE = \"WithReference\" else: NORMALIZATION_TYPE =", "template file is available, the automated reduction will generate one template_file = \"\"", "else: NORMALIZATION_TYPE = \"DirectBeam\" # Determine whether this is data or whether we", "import os import json import warnings warnings.simplefilter('ignore') if (\"MANTIDPATH\" in os.environ): del os.environ[\"MANTIDPATH\"]", "script for the Liquids Reflectometer For reference: Type 0: Normal sample data Type", "\"\"\" Auto-reduction script for the Liquids Reflectometer For reference: Type 0: Normal sample", "'') # Reduction options #------------------------------------------------------------------------- # Wavelength below which we don't need the", "# Quartz #REFL1D_PARS = json.dumps(dict(back_sld=4.09, # back_roughness=4.28, # front_sld=0, # layers=[], # scale=0.9169,", "run_number.replace('.nxs.h5', '') # Reduction options #------------------------------------------------------------------------- # Wavelength below which we don't need", "10: for r in range(0, 10): reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1, first_run_of_set+r)", "x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False) else: plot_div = plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L',", "is not defined in the template PRIMARY_FRACTION_RANGE = [5, 290] NORMALIZE_TO_UNITY = False", "is REF_L_xyz_event.nxs # The new format is REF_L_xyz.nxs.h5 run_number = event_file.split('_')[2] run_number =", "for the Liquids Reflectometer For reference: Type 0: Normal sample data Type 1:", "event_file = os.path.split(event_file_path)[-1] # The legacy format is REF_L_xyz_event.nxs # The new format", "elif os.path.isfile(os.path.join(output_dir, \"template.xml\")): template_file = os.path.join(output_dir, \"template.xml\") elif os.path.isfile(\"/SNS/REF_L/shared/autoreduce/template.xml\"): template_file = \"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using", "geometry if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': NORMALIZATION_TYPE = \"WithReference\" else: NORMALIZATION_TYPE = \"DirectBeam\"", "sequence_number), sort_by_runs=True, tof_step=200, medium=incident_medium, slit_tolerance=0.06, sf_file=_fpath) sf.execute() else: logger.notice(\"Automated reduction\") output = LRAutoReduction(#Filename=event_file_path,", "normalization WL_CUTOFF = 10.0 # Default primary fraction range to be used if", "= \"/SNS/REF_L/shared/autoreduce/template.xml\" print(\"Using template: %s\" % template_file) # Run the auto-reduction ws =", "r+1, first_run_of_set+r) reduced_file_path = os.path.join(output_dir, reduced_file_name) if os.path.isfile(reduced_file_path): # Look to see whether", "whether we need to compute scaling factors data_type = ws.getRun().getProperty(\"data_type\").value[0] if data_type ==", "reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit=\"MomentumTransfer\") plotting_ready = True try: from postprocessing.publish_plot import plot1d", "REF_L_xyz.nxs.h5 run_number = event_file.split('_')[2] run_number = run_number.replace('.nxs.h5', '') # Reduction options #------------------------------------------------------------------------- #", "y, dy, dx]], instrument='REF_L', x_title=u\"q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False, publish=False) publish_plot('REF_L', first_run_of_set+r,", "scipy.__version__) DIRECT_BEAM_CALC_AVAILABLE = False event_file_path=sys.argv[1] output_dir=sys.argv[2] event_file = os.path.split(event_file_path)[-1] # The legacy format", "layers=[], # scale=0.9169, # background=3.753e-07)) #------------------------------------------------------------------------- # Locate the template file # If", "in the template PRIMARY_FRACTION_RANGE = [5, 290] NORMALIZE_TO_UNITY = False # Allowed values:", "except ImportError: from finddata.publish_plot import plot1d, _determine_config_file, publish_plot if _determine_config_file(None) is None: plotting_ready", "try: from postprocessing.publish_plot import plot1d except ImportError: from finddata.publish_plot import plot1d, _determine_config_file, publish_plot", "[[x, y, dy, dx]], instrument='REF_L', x_title=u\"Q (1/A)\", x_log=True, y_title=\"Reflectivity\", y_log=True, show_dx=False) else: plot_div", "Type 1: Direct beams for scaling factors Type 2: Zero-attenuator direct beams Type" ]
[ "content2 = fp.read() finally: fp.close() # The not-handled exception may be raised here", "self.mimes) if not fobj: try: if realfname : fname = realfname else: try:", "= obj f = True for ext in obj._getExtensions(): self.exts[ext] = obj f", "obj): f = False for mime in obj._getMimeTypes(): self.mimes[mime] = obj f =", "OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). #", "# GNU Affero General Public License for more details. # # You should", "override the Content or the File methods that give an optimal result. \"\"\"", "fname,ext = filename and os.path.splitext(filename) or ('','') fd, rfname = tempfile.mkstemp(suffix=ext) os.write(fd, content)", "tempfile from subprocess import Popen, PIPE _logger = logging.getLogger(__name__) class NhException(Exception): pass class", "subprocess import Popen, PIPE _logger = logging.getLogger(__name__) class NhException(Exception): pass class indexer(object): \"\"\"", "published by the Free Software Foundation, either version 3 of the # License,", "obj._getExtensions(): self.exts[ext] = obj f = True if f: _logger.debug('Register content indexer: %r.',", "mimetypes \"\"\" return [] def _getExtensions(self): return [] def _getDefMime(self, ext): \"\"\" Return", "'') # If we created a tmp file, unlink it now if not", "handled here.\") def __repr__(self): return \"<indexer %s.%s>\" %(self.__module__, self.__class__.__name__) def mime_match(mime, mdict): if", "here return self._doIndexContent(content2) # last try, with a tmp file if content: try:", "(<http://tiny.be>). # # This program is free software: you can redistribute it and/or", "(mime, '') except Exception: _logger.exception(\"Cannot index file %s (%s).\", filename, fname or realfname)", "optimal. \"\"\" res = '' try: if content != None: return self._doIndexContent(content) except", "return None def indexContent(self, content, filename=None, realfile=None): \"\"\" Use either content or the", "(%s).\", filename, fname or realfname) res = (mime, '') # If we created", "the # GNU Affero General Public License for more details. # # You", "raise NhException(\"Content cannot be handled here.\") def __repr__(self): return \"<indexer %s.%s>\" %(self.__module__, self.__class__.__name__)", "index. Some parsers will work better with the actual content, others parse a", "else: _logger.debug(\"Have no object, return (%s, None).\", mime) res = (mime, '') except", "file, unlink it now if not realfname and fname: try: os.unlink(fname) except Exception:", "= None fname = None mime = None if content_type and self.mimes.has_key(content_type): mime", "mime = mime2 except Exception: _logger.exception('Cannot determine mime type.') try: if fobj: res", "Content or the File methods that give an optimal result. \"\"\" def _getMimeTypes(self):", "without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR", "def __repr__(self): return \"<indexer %s.%s>\" %(self.__module__, self.__class__.__name__) def mime_match(mime, mdict): if mdict.has_key(mime): return", "per file type. Override this class to add more functionality. Note that you", "add more functionality. Note that you should only override the Content or the", "(result, _) = pop.communicate() mime2 = result.split(';')[0] _logger.debug('File gives us: %s', mime2) #", "determine mime type.') try: if fobj: res = (mime, fobj.indexContent(content,filename,fname or realfname) )", "3 of the # License, or (at your option) any later version. #", "more functionality. Note that you should only override the Content or the File", "not f: raise Exception(\"Your indexer should at least support a mimetype or extension.\")", "Note that you should only override the Content or the File methods that", "mime = None if content_type and self.mimes.has_key(content_type): mime = content_type fobj = self.mimes[content_type]", "self.exts = {} def register(self, obj): f = False for mime in obj._getMimeTypes():", "support a mimetype or extension.\") def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False): fobj", "= mime_match(content_type, self.mimes) if not fobj: try: if realfname : fname = realfname", "mts = self._getMimeTypes(); if len (mts): return mts[0] return None def indexContent(self, content,", "return mts[0] return None def indexContent(self, content, filename=None, realfile=None): \"\"\" Use either content", "be handled here.\") def _doIndexFile(self, fpath): raise NhException(\"Content cannot be handled here.\") def", "content: try: fname,ext = filename and os.path.splitext(filename) or ('','') fd, rfname = tempfile.mkstemp(suffix=ext)", "content) os.close(fd) res = self._doIndexFile(rfname) os.unlink(rfname) return res except NhException: pass raise NhException('No", "type, ideally the closest to the extension ext. \"\"\" mts = self._getMimeTypes(); if", "Exception(\"Your indexer should at least support a mimetype or extension.\") def doIndex(self, content,", "file, to index. Some parsers will work better with the actual content, others", "Override this class to add more functionality. Note that you should only override", "should only override the Content or the File methods that give an optimal", "index file.') def _doIndexContent(self, content): raise NhException(\"Content cannot be handled here.\") def _doIndexFile(self,", "content_type fobj = self.mimes[content_type] elif filename: bname,ext = os.path.splitext(filename) if self.exts.has_key(ext): fobj =", "WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A", "res = self._doIndexFile(rfname) os.unlink(rfname) return res except NhException: pass raise NhException('No appropriate method", "fname = None mime = None if content_type and self.mimes.has_key(content_type): mime = content_type", "and fname: try: os.unlink(fname) except Exception: _logger.exception(\"Cannot unlink %s.\", fname) return res cntIndex", "mime,fobj = mime_match(content_type, self.mimes) if not fobj: try: if realfname : fname =", "= self.mimes[content_type] elif filename: bname,ext = os.path.splitext(filename) if self.exts.has_key(ext): fobj = self.exts[ext] mime", "that it will be useful, # but WITHOUT ANY WARRANTY; without even the", "# This program is free software: you can redistribute it and/or modify #", "= True if f: _logger.debug('Register content indexer: %r.', obj) if not f: raise", "still exists now. mime,fobj = mime_match(mime2, self.mimes) if not mime: mime = mime2", "(C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you", "'tmp' fd, fname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE)", "mdict): if mdict.has_key(mime): return (mime, mdict[mime]) if '/' in mime: mpat = mime.split('/')[0]+'/*'", "Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ##############################################################################", "Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is", "############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny", "!= None: try: return self._doIndexFile(realfile) except NhException: pass fp = open(realfile,'rb') try: content2", "if content_type and self.mimes.has_key(content_type): mime = content_type fobj = self.mimes[content_type] elif filename: bname,ext", "fd, rfname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) res = self._doIndexFile(rfname) os.unlink(rfname) return res", "self._doIndexFile(realfile) except NhException: pass fp = open(realfile,'rb') try: content2 = fp.read() finally: fp.close()", "mime type.') try: if fobj: res = (mime, fobj.indexContent(content,filename,fname or realfname) ) else:", "if '/' in mime: mpat = mime.split('/')[0]+'/*' if mdict.has_key(mpat): return (mime, mdict[mpat]) return", "indexer should at least support a mimetype or extension.\") def doIndex(self, content, filename=None,", "def indexContent(self, content, filename=None, realfile=None): \"\"\" Use either content or the real file,", "mime in obj._getMimeTypes(): self.mimes[mime] = obj f = True for ext in obj._getExtensions():", "self.__class__.__name__) def mime_match(mime, mdict): if mdict.has_key(mime): return (mime, mdict[mime]) if '/' in mime:", "{} self.exts = {} def register(self, obj): f = False for mime in", "= os.path.splitext(filename or 'test.tmp') except Exception: bname, ext = filename, 'tmp' fd, fname", "shell=False, stdout=PIPE) (result, _) = pop.communicate() mime2 = result.split(';')[0] _logger.debug('File gives us: %s',", "implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "the # License, or (at your option) any later version. # # This", "a mimetype for this document type, ideally the closest to the extension ext.", "not fobj: try: if realfname : fname = realfname else: try: bname,ext =", "os.path.splitext(filename) or ('','') fd, rfname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) res = self._doIndexFile(rfname)", "<http://www.gnu.org/licenses/>. # ############################################################################## import logging import os import tempfile from subprocess import Popen,", "cannot be handled here.\") def __repr__(self): return \"<indexer %s.%s>\" %(self.__module__, self.__class__.__name__) def mime_match(mime,", "the Free Software Foundation, either version 3 of the # License, or (at", "None: try: return self._doIndexFile(realfile) except NhException: pass fp = open(realfile,'rb') try: content2 =", "(None, None) class contentIndex(object): def __init__(self): self.mimes = {} self.exts = {} def", "filename=None, content_type=None, realfname=None, debug=False): fobj = None fname = None mime = None", "def _doIndexFile(self, fpath): raise NhException(\"Content cannot be handled here.\") def __repr__(self): return \"<indexer", "the hope that it will be useful, # but WITHOUT ANY WARRANTY; without", "file. Typically, one indexer should be instantiated per file type. Override this class", "fname = realfname else: try: bname,ext = os.path.splitext(filename or 'test.tmp') except Exception: bname,", "An indexer knows how to parse the content of some file. Typically, one", "it now if not realfname and fname: try: os.unlink(fname) except Exception: _logger.exception(\"Cannot unlink", "the optimal. \"\"\" res = '' try: if content != None: return self._doIndexContent(content)", "if mdict.has_key(mime): return (mime, mdict[mime]) if '/' in mime: mpat = mime.split('/')[0]+'/*' if", "self.mimes.has_key(content_type): mime = content_type fobj = self.mimes[content_type] elif filename: bname,ext = os.path.splitext(filename) if", "file type. Override this class to add more functionality. Note that you should", "os.path.splitext(filename) if self.exts.has_key(ext): fobj = self.exts[ext] mime = fobj._getDefMime(ext) if content_type and not", "Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>.", "here.\") def __repr__(self): return \"<indexer %s.%s>\" %(self.__module__, self.__class__.__name__) def mime_match(mime, mdict): if mdict.has_key(mime):", "= None mime = None if content_type and self.mimes.has_key(content_type): mime = content_type fobj", "self.exts[ext] = obj f = True if f: _logger.debug('Register content indexer: %r.', obj)", "parsers will work better with the actual content, others parse a file easier.", "try: return self._doIndexFile(realfile) except NhException: pass fp = open(realfile,'rb') try: content2 = fp.read()", "return (mime, mdict[mpat]) return (None, None) class contentIndex(object): def __init__(self): self.mimes = {}", "import Popen, PIPE _logger = logging.getLogger(__name__) class NhException(Exception): pass class indexer(object): \"\"\" An", "content of some file. Typically, one indexer should be instantiated per file type.", "document type, ideally the closest to the extension ext. \"\"\" mts = self._getMimeTypes();", "= Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE) (result, _) = pop.communicate() mime2 = result.split(';')[0] _logger.debug('File gives", "= self._doIndexFile(rfname) os.unlink(rfname) return res except NhException: pass raise NhException('No appropriate method to", "version. # # This program is distributed in the hope that it will", "ext. \"\"\" mts = self._getMimeTypes(); if len (mts): return mts[0] return None def", "us: %s', mime2) # Note that the temporary file still exists now. mime,fobj", "_getMimeTypes(self): \"\"\" Return supported mimetypes \"\"\" return [] def _getExtensions(self): return [] def", "not fobj: mime,fobj = mime_match(content_type, self.mimes) if not fobj: try: if realfname :", "logging.getLogger(__name__) class NhException(Exception): pass class indexer(object): \"\"\" An indexer knows how to parse", "__repr__(self): return \"<indexer %s.%s>\" %(self.__module__, self.__class__.__name__) def mime_match(mime, mdict): if mdict.has_key(mime): return (mime,", "Software Foundation, either version 3 of the # License, or (at your option)", "try: os.unlink(fname) except Exception: _logger.exception(\"Cannot unlink %s.\", fname) return res cntIndex = contentIndex()", "one indexer should be instantiated per file type. Override this class to add", "content or the real file, to index. Some parsers will work better with", "if not realfname and fname: try: os.unlink(fname) except Exception: _logger.exception(\"Cannot unlink %s.\", fname)", "try: if realfname : fname = realfname else: try: bname,ext = os.path.splitext(filename or", "Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # #", "fobj: res = (mime, fobj.indexContent(content,filename,fname or realfname) ) else: _logger.debug(\"Have no object, return", "_getDefMime(self, ext): \"\"\" Return a mimetype for this document type, ideally the closest", "easier. Try the optimal. \"\"\" res = '' try: if content != None:", "content) os.close(fd) pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE) (result, _) = pop.communicate() mime2 =", "not-handled exception may be raised here return self._doIndexContent(content2) # last try, with a", "return (mime, mdict[mime]) if '/' in mime: mpat = mime.split('/')[0]+'/*' if mdict.has_key(mpat): return", "try: bname,ext = os.path.splitext(filename or 'test.tmp') except Exception: bname, ext = filename, 'tmp'", "program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import os import tempfile", "software: you can redistribute it and/or modify # it under the terms of", "not realfname and fname: try: os.unlink(fname) except Exception: _logger.exception(\"Cannot unlink %s.\", fname) return", "or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public", "self.mimes[content_type] elif filename: bname,ext = os.path.splitext(filename) if self.exts.has_key(ext): fobj = self.exts[ext] mime =", "else: try: bname,ext = os.path.splitext(filename or 'test.tmp') except Exception: bname, ext = filename,", "from subprocess import Popen, PIPE _logger = logging.getLogger(__name__) class NhException(Exception): pass class indexer(object):", "{} def register(self, obj): f = False for mime in obj._getMimeTypes(): self.mimes[mime] =", "\"<indexer %s.%s>\" %(self.__module__, self.__class__.__name__) def mime_match(mime, mdict): if mdict.has_key(mime): return (mime, mdict[mime]) if", "doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False): fobj = None fname = None mime", "parse the content of some file. Typically, one indexer should be instantiated per", "option) any later version. # # This program is distributed in the hope", "Some parsers will work better with the actual content, others parse a file", "# along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging", "the actual content, others parse a file easier. Try the optimal. \"\"\" res", "the temporary file still exists now. mime,fobj = mime_match(mime2, self.mimes) if not mime:", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General", "return self._doIndexFile(realfile) except NhException: pass fp = open(realfile,'rb') try: content2 = fp.read() finally:", "raise NhException(\"Content cannot be handled here.\") def _doIndexFile(self, fpath): raise NhException(\"Content cannot be", "\"\"\" def _getMimeTypes(self): \"\"\" Return supported mimetypes \"\"\" return [] def _getExtensions(self): return", "= pop.communicate() mime2 = result.split(';')[0] _logger.debug('File gives us: %s', mime2) # Note that", "to add more functionality. Note that you should only override the Content or", "__init__(self): self.mimes = {} self.exts = {} def register(self, obj): f = False", "if fobj: res = (mime, fobj.indexContent(content,filename,fname or realfname) ) else: _logger.debug(\"Have no object,", "as # published by the Free Software Foundation, either version 3 of the", "bname,ext = os.path.splitext(filename or 'test.tmp') except Exception: bname, ext = filename, 'tmp' fd,", "PIPE _logger = logging.getLogger(__name__) class NhException(Exception): pass class indexer(object): \"\"\" An indexer knows", "True for ext in obj._getExtensions(): self.exts[ext] = obj f = True if f:", "instantiated per file type. Override this class to add more functionality. Note that", "def _getDefMime(self, ext): \"\"\" Return a mimetype for this document type, ideally the", "have received a copy of the GNU Affero General Public License # along", "or the real file, to index. Some parsers will work better with the", "= self._getMimeTypes(); if len (mts): return mts[0] return None def indexContent(self, content, filename=None,", "try: content2 = fp.read() finally: fp.close() # The not-handled exception may be raised", "= tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) res = self._doIndexFile(rfname) os.unlink(rfname) return res except NhException:", "# ############################################################################## import logging import os import tempfile from subprocess import Popen, PIPE", "indexer knows how to parse the content of some file. Typically, one indexer", "= fp.read() finally: fp.close() # The not-handled exception may be raised here return", "fpath): raise NhException(\"Content cannot be handled here.\") def __repr__(self): return \"<indexer %s.%s>\" %(self.__module__,", "register(self, obj): f = False for mime in obj._getMimeTypes(): self.mimes[mime] = obj f", "return self._doIndexContent(content2) # last try, with a tmp file if content: try: fname,ext", "License as # published by the Free Software Foundation, either version 3 of", "free software: you can redistribute it and/or modify # it under the terms", "in obj._getExtensions(): self.exts[ext] = obj f = True if f: _logger.debug('Register content indexer:", "of the GNU Affero General Public License # along with this program. If", "self._doIndexContent(content) except NhException: pass if realfile != None: try: return self._doIndexFile(realfile) except NhException:", "= (mime, fobj.indexContent(content,filename,fname or realfname) ) else: _logger.debug(\"Have no object, return (%s, None).\",", "if content != None: return self._doIndexContent(content) except NhException: pass if realfile != None:", "return self._doIndexContent(content) except NhException: pass if realfile != None: try: return self._doIndexFile(realfile) except", "FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License", "with the actual content, others parse a file easier. Try the optimal. \"\"\"", "res = (mime, fobj.indexContent(content,filename,fname or realfname) ) else: _logger.debug(\"Have no object, return (%s,", "fname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE) (result, _)", "_doIndexFile(self, fpath): raise NhException(\"Content cannot be handled here.\") def __repr__(self): return \"<indexer %s.%s>\"", "self._doIndexFile(rfname) os.unlink(rfname) return res except NhException: pass raise NhException('No appropriate method to index", "return [] def _getExtensions(self): return [] def _getDefMime(self, ext): \"\"\" Return a mimetype", "temporary file still exists now. mime,fobj = mime_match(mime2, self.mimes) if not mime: mime", "content, filename=None, content_type=None, realfname=None, debug=False): fobj = None fname = None mime =", "Try the optimal. \"\"\" res = '' try: if content != None: return", "a copy of the GNU Affero General Public License # along with this", "handled here.\") def _doIndexFile(self, fpath): raise NhException(\"Content cannot be handled here.\") def __repr__(self):", "or extension.\") def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False): fobj = None fname", "_getExtensions(self): return [] def _getDefMime(self, ext): \"\"\" Return a mimetype for this document", "with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import os", "2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can", "an optimal result. \"\"\" def _getMimeTypes(self): \"\"\" Return supported mimetypes \"\"\" return []", "raise Exception(\"Your indexer should at least support a mimetype or extension.\") def doIndex(self,", "this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import os import", "to parse the content of some file. Typically, one indexer should be instantiated", "of the # License, or (at your option) any later version. # #", "'/' in mime: mpat = mime.split('/')[0]+'/*' if mdict.has_key(mpat): return (mime, mdict[mpat]) return (None,", "warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #", "closest to the extension ext. \"\"\" mts = self._getMimeTypes(); if len (mts): return", "_) = pop.communicate() mime2 = result.split(';')[0] _logger.debug('File gives us: %s', mime2) # Note", "details. # # You should have received a copy of the GNU Affero", "except NhException: pass fp = open(realfile,'rb') try: content2 = fp.read() finally: fp.close() #", "fp.read() finally: fp.close() # The not-handled exception may be raised here return self._doIndexContent(content2)", "content indexer: %r.', obj) if not f: raise Exception(\"Your indexer should at least", "pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE) (result, _) = pop.communicate() mime2 = result.split(';')[0] _logger.debug('File", "program is free software: you can redistribute it and/or modify # it under", "result. \"\"\" def _getMimeTypes(self): \"\"\" Return supported mimetypes \"\"\" return [] def _getExtensions(self):", "functionality. Note that you should only override the Content or the File methods", "that the temporary file still exists now. mime,fobj = mime_match(mime2, self.mimes) if not", "class to add more functionality. Note that you should only override the Content", "realfname and fname: try: os.unlink(fname) except Exception: _logger.exception(\"Cannot unlink %s.\", fname) return res", "indexContent(self, content, filename=None, realfile=None): \"\"\" Use either content or the real file, to", "mime = content_type fobj = self.mimes[content_type] elif filename: bname,ext = os.path.splitext(filename) if self.exts.has_key(ext):", "# # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL", "Exception: _logger.exception('Cannot determine mime type.') try: if fobj: res = (mime, fobj.indexContent(content,filename,fname or", "type.') try: if fobj: res = (mime, fobj.indexContent(content,filename,fname or realfname) ) else: _logger.debug(\"Have", "this document type, ideally the closest to the extension ext. \"\"\" mts =", "mdict[mime]) if '/' in mime: mpat = mime.split('/')[0]+'/*' if mdict.has_key(mpat): return (mime, mdict[mpat])", "class NhException(Exception): pass class indexer(object): \"\"\" An indexer knows how to parse the", "be instantiated per file type. Override this class to add more functionality. Note", "self.mimes[mime] = obj f = True for ext in obj._getExtensions(): self.exts[ext] = obj", "fobj: mime,fobj = mime_match(content_type, self.mimes) if not fobj: try: if realfname : fname", "realfname : fname = realfname else: try: bname,ext = os.path.splitext(filename or 'test.tmp') except", "f = True for ext in obj._getExtensions(): self.exts[ext] = obj f = True", "or 'test.tmp') except Exception: bname, ext = filename, 'tmp' fd, fname = tempfile.mkstemp(suffix=ext)", "self._doIndexContent(content2) # last try, with a tmp file if content: try: fname,ext =", "res = '' try: if content != None: return self._doIndexContent(content) except NhException: pass", "pass fp = open(realfile,'rb') try: content2 = fp.read() finally: fp.close() # The not-handled", "extension.\") def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False): fobj = None fname =", "filename, fname or realfname) res = (mime, '') # If we created a", "f = False for mime in obj._getMimeTypes(): self.mimes[mime] = obj f = True", "= {} def register(self, obj): f = False for mime in obj._getMimeTypes(): self.mimes[mime]", "Return a mimetype for this document type, ideally the closest to the extension", "# You should have received a copy of the GNU Affero General Public", "content, others parse a file easier. Try the optimal. \"\"\" res = ''", ") else: _logger.debug(\"Have no object, return (%s, None).\", mime) res = (mime, '')", "gives us: %s', mime2) # Note that the temporary file still exists now.", "res except NhException: pass raise NhException('No appropriate method to index file.') def _doIndexContent(self,", "it will be useful, # but WITHOUT ANY WARRANTY; without even the implied", "return \"<indexer %s.%s>\" %(self.__module__, self.__class__.__name__) def mime_match(mime, mdict): if mdict.has_key(mime): return (mime, mdict[mime])", "not mime: mime = mime2 except Exception: _logger.exception('Cannot determine mime type.') try: if", "a mimetype or extension.\") def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False): fobj =", "fd, fname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE) (result,", "Popen, PIPE _logger = logging.getLogger(__name__) class NhException(Exception): pass class indexer(object): \"\"\" An indexer", "None if content_type and self.mimes.has_key(content_type): mime = content_type fobj = self.mimes[content_type] elif filename:", "GNU Affero General Public License for more details. # # You should have", "General Public License for more details. # # You should have received a", "mime.split('/')[0]+'/*' if mdict.has_key(mpat): return (mime, mdict[mpat]) return (None, None) class contentIndex(object): def __init__(self):", "a file easier. Try the optimal. \"\"\" res = '' try: if content", "the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "indexer should be instantiated per file type. Override this class to add more", "ideally the closest to the extension ext. \"\"\" mts = self._getMimeTypes(); if len", "a tmp file, unlink it now if not realfname and fname: try: os.unlink(fname)", "(mime, mdict[mpat]) return (None, None) class contentIndex(object): def __init__(self): self.mimes = {} self.exts", "mime) res = (mime, '') except Exception: _logger.exception(\"Cannot index file %s (%s).\", filename,", "mime: mime = mime2 except Exception: _logger.exception('Cannot determine mime type.') try: if fobj:", "import logging import os import tempfile from subprocess import Popen, PIPE _logger =", "None def indexContent(self, content, filename=None, realfile=None): \"\"\" Use either content or the real", "= mime.split('/')[0]+'/*' if mdict.has_key(mpat): return (mime, mdict[mpat]) return (None, None) class contentIndex(object): def", "method to index file.') def _doIndexContent(self, content): raise NhException(\"Content cannot be handled here.\")", "and/or modify # it under the terms of the GNU Affero General Public", "file %s (%s).\", filename, fname or realfname) res = (mime, '') # If", "PURPOSE. See the # GNU Affero General Public License for more details. #", "ext): \"\"\" Return a mimetype for this document type, ideally the closest to", "for ext in obj._getExtensions(): self.exts[ext] = obj f = True if f: _logger.debug('Register", "ext = filename, 'tmp' fd, fname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) pop =", "File methods that give an optimal result. \"\"\" def _getMimeTypes(self): \"\"\" Return supported", "may be raised here return self._doIndexContent(content2) # last try, with a tmp file", "%(self.__module__, self.__class__.__name__) def mime_match(mime, mdict): if mdict.has_key(mime): return (mime, mdict[mime]) if '/' in", "Return supported mimetypes \"\"\" return [] def _getExtensions(self): return [] def _getDefMime(self, ext):", "mpat = mime.split('/')[0]+'/*' if mdict.has_key(mpat): return (mime, mdict[mpat]) return (None, None) class contentIndex(object):", "in mime: mpat = mime.split('/')[0]+'/*' if mdict.has_key(mpat): return (mime, mdict[mpat]) return (None, None)", "# The not-handled exception may be raised here return self._doIndexContent(content2) # last try,", "Free Software Foundation, either version 3 of the # License, or (at your", "= filename and os.path.splitext(filename) or ('','') fd, rfname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd)", "at least support a mimetype or extension.\") def doIndex(self, content, filename=None, content_type=None, realfname=None,", "Exception: bname, ext = filename, 'tmp' fd, fname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd)", "except Exception: _logger.exception('Cannot determine mime type.') try: if fobj: res = (mime, fobj.indexContent(content,filename,fname", "except Exception: _logger.exception(\"Cannot index file %s (%s).\", filename, fname or realfname) res =", "mimetype or extension.\") def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False): fobj = None", "except NhException: pass raise NhException('No appropriate method to index file.') def _doIndexContent(self, content):", "to index file.') def _doIndexContent(self, content): raise NhException(\"Content cannot be handled here.\") def", "FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for", "for more details. # # You should have received a copy of the", "mime = fobj._getDefMime(ext) if content_type and not fobj: mime,fobj = mime_match(content_type, self.mimes) if", "mime2 = result.split(';')[0] _logger.debug('File gives us: %s', mime2) # Note that the temporary", "it and/or modify # it under the terms of the GNU Affero General", "or (at your option) any later version. # # This program is distributed", "least support a mimetype or extension.\") def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False):", "copy of the GNU Affero General Public License # along with this program.", "class indexer(object): \"\"\" An indexer knows how to parse the content of some", "_logger = logging.getLogger(__name__) class NhException(Exception): pass class indexer(object): \"\"\" An indexer knows how", "rfname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) res = self._doIndexFile(rfname) os.unlink(rfname) return res except", "None mime = None if content_type and self.mimes.has_key(content_type): mime = content_type fobj =", "appropriate method to index file.') def _doIndexContent(self, content): raise NhException(\"Content cannot be handled", "Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This", "no object, return (%s, None).\", mime) res = (mime, '') except Exception: _logger.exception(\"Cannot", "except Exception: _logger.exception(\"Cannot unlink %s.\", fname) return res cntIndex = contentIndex() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:", "\"\"\" Return supported mimetypes \"\"\" return [] def _getExtensions(self): return [] def _getDefMime(self,", "more details. # # You should have received a copy of the GNU", "Affero General Public License for more details. # # You should have received", "except NhException: pass if realfile != None: try: return self._doIndexFile(realfile) except NhException: pass", "of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU", "def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False): fobj = None fname = None", "None: return self._doIndexContent(content) except NhException: pass if realfile != None: try: return self._doIndexFile(realfile)", "optimal result. \"\"\" def _getMimeTypes(self): \"\"\" Return supported mimetypes \"\"\" return [] def", "parse a file easier. Try the optimal. \"\"\" res = '' try: if", "os.unlink(rfname) return res except NhException: pass raise NhException('No appropriate method to index file.')", "realfname) res = (mime, '') # If we created a tmp file, unlink", "return (None, None) class contentIndex(object): def __init__(self): self.mimes = {} self.exts = {}", "def _doIndexContent(self, content): raise NhException(\"Content cannot be handled here.\") def _doIndexFile(self, fpath): raise", "program is distributed in the hope that it will be useful, # but", ": fname = realfname else: try: bname,ext = os.path.splitext(filename or 'test.tmp') except Exception:", "distributed in the hope that it will be useful, # but WITHOUT ANY", "content_type=None, realfname=None, debug=False): fobj = None fname = None mime = None if", "pop.communicate() mime2 = result.split(';')[0] _logger.debug('File gives us: %s', mime2) # Note that the", "either version 3 of the # License, or (at your option) any later", "Affero General Public License as # published by the Free Software Foundation, either", "any later version. # # This program is distributed in the hope that", "= content_type fobj = self.mimes[content_type] elif filename: bname,ext = os.path.splitext(filename) if self.exts.has_key(ext): fobj", "indexer(object): \"\"\" An indexer knows how to parse the content of some file.", "mime2 except Exception: _logger.exception('Cannot determine mime type.') try: if fobj: res = (mime,", "if realfile != None: try: return self._doIndexFile(realfile) except NhException: pass fp = open(realfile,'rb')", "fobj._getDefMime(ext) if content_type and not fobj: mime,fobj = mime_match(content_type, self.mimes) if not fobj:", "pass class indexer(object): \"\"\" An indexer knows how to parse the content of", "(%s, None).\", mime) res = (mime, '') except Exception: _logger.exception(\"Cannot index file %s", "NhException(\"Content cannot be handled here.\") def __repr__(self): return \"<indexer %s.%s>\" %(self.__module__, self.__class__.__name__) def", "in obj._getMimeTypes(): self.mimes[mime] = obj f = True for ext in obj._getExtensions(): self.exts[ext]", "see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import os import tempfile from subprocess import", "# it under the terms of the GNU Affero General Public License as", "now. mime,fobj = mime_match(mime2, self.mimes) if not mime: mime = mime2 except Exception:", "License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import", "supported mimetypes \"\"\" return [] def _getExtensions(self): return [] def _getDefMime(self, ext): \"\"\"", "# # You should have received a copy of the GNU Affero General", "you can redistribute it and/or modify # it under the terms of the", "fobj.indexContent(content,filename,fname or realfname) ) else: _logger.debug(\"Have no object, return (%s, None).\", mime) res", "If we created a tmp file, unlink it now if not realfname and", "utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C)", "for this document type, ideally the closest to the extension ext. \"\"\" mts", "cannot be handled here.\") def _doIndexFile(self, fpath): raise NhException(\"Content cannot be handled here.\")", "obj) if not f: raise Exception(\"Your indexer should at least support a mimetype", "# Note that the temporary file still exists now. mime,fobj = mime_match(mime2, self.mimes)", "%s.%s>\" %(self.__module__, self.__class__.__name__) def mime_match(mime, mdict): if mdict.has_key(mime): return (mime, mdict[mime]) if '/'", "result.split(';')[0] _logger.debug('File gives us: %s', mime2) # Note that the temporary file still", "_doIndexContent(self, content): raise NhException(\"Content cannot be handled here.\") def _doIndexFile(self, fpath): raise NhException(\"Content", "'') except Exception: _logger.exception(\"Cannot index file %s (%s).\", filename, fname or realfname) res", "General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #", "if self.exts.has_key(ext): fobj = self.exts[ext] mime = fobj._getDefMime(ext) if content_type and not fobj:", "= result.split(';')[0] _logger.debug('File gives us: %s', mime2) # Note that the temporary file", "last try, with a tmp file if content: try: fname,ext = filename and", "(mime, fobj.indexContent(content,filename,fname or realfname) ) else: _logger.debug(\"Have no object, return (%s, None).\", mime)", "Note that the temporary file still exists now. mime,fobj = mime_match(mime2, self.mimes) if", "(mts): return mts[0] return None def indexContent(self, content, filename=None, realfile=None): \"\"\" Use either", "# License, or (at your option) any later version. # # This program", "try, with a tmp file if content: try: fname,ext = filename and os.path.splitext(filename)", "= self.exts[ext] mime = fobj._getDefMime(ext) if content_type and not fobj: mime,fobj = mime_match(content_type,", "self.exts.has_key(ext): fobj = self.exts[ext] mime = fobj._getDefMime(ext) if content_type and not fobj: mime,fobj", "General Public License as # published by the Free Software Foundation, either version", "hope that it will be useful, # but WITHOUT ANY WARRANTY; without even", "= True for ext in obj._getExtensions(): self.exts[ext] = obj f = True if", "mdict.has_key(mime): return (mime, mdict[mime]) if '/' in mime: mpat = mime.split('/')[0]+'/*' if mdict.has_key(mpat):", "contentIndex(object): def __init__(self): self.mimes = {} self.exts = {} def register(self, obj): f", "along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import", "Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE) (result, _) = pop.communicate() mime2 = result.split(';')[0] _logger.debug('File gives us:", "-*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution #", "f: raise Exception(\"Your indexer should at least support a mimetype or extension.\") def", "file.') def _doIndexContent(self, content): raise NhException(\"Content cannot be handled here.\") def _doIndexFile(self, fpath):", "# If we created a tmp file, unlink it now if not realfname", "we created a tmp file, unlink it now if not realfname and fname:", "\"\"\" Return a mimetype for this document type, ideally the closest to the", "a tmp file if content: try: fname,ext = filename and os.path.splitext(filename) or ('','')", "with a tmp file if content: try: fname,ext = filename and os.path.splitext(filename) or", "raise NhException('No appropriate method to index file.') def _doIndexContent(self, content): raise NhException(\"Content cannot", "filename: bname,ext = os.path.splitext(filename) if self.exts.has_key(ext): fobj = self.exts[ext] mime = fobj._getDefMime(ext) if", "\"\"\" return [] def _getExtensions(self): return [] def _getDefMime(self, ext): \"\"\" Return a", "NhException(Exception): pass class indexer(object): \"\"\" An indexer knows how to parse the content", "Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute", "pass raise NhException('No appropriate method to index file.') def _doIndexContent(self, content): raise NhException(\"Content", "indexer: %r.', obj) if not f: raise Exception(\"Your indexer should at least support", "(at your option) any later version. # # This program is distributed in", "Use either content or the real file, to index. Some parsers will work", "This program is distributed in the hope that it will be useful, #", "= (mime, '') except Exception: _logger.exception(\"Cannot index file %s (%s).\", filename, fname or", "= False for mime in obj._getMimeTypes(): self.mimes[mime] = obj f = True for", "version 3 of the # License, or (at your option) any later version.", "useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of #", "self.mimes = {} self.exts = {} def register(self, obj): f = False for", "obj f = True if f: _logger.debug('Register content indexer: %r.', obj) if not", "redistribute it and/or modify # it under the terms of the GNU Affero", "exception may be raised here return self._doIndexContent(content2) # last try, with a tmp", "filename and os.path.splitext(filename) or ('','') fd, rfname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) res", "NhException: pass raise NhException('No appropriate method to index file.') def _doIndexContent(self, content): raise", "and self.mimes.has_key(content_type): mime = content_type fobj = self.mimes[content_type] elif filename: bname,ext = os.path.splitext(filename)", "\"\"\" Use either content or the real file, to index. Some parsers will", "fobj = self.exts[ext] mime = fobj._getDefMime(ext) if content_type and not fobj: mime,fobj =", "mime_match(content_type, self.mimes) if not fobj: try: if realfname : fname = realfname else:", "stdout=PIPE) (result, _) = pop.communicate() mime2 = result.split(';')[0] _logger.debug('File gives us: %s', mime2)", "fp.close() # The not-handled exception may be raised here return self._doIndexContent(content2) # last", "bname,ext = os.path.splitext(filename) if self.exts.has_key(ext): fobj = self.exts[ext] mime = fobj._getDefMime(ext) if content_type", "mime2) # Note that the temporary file still exists now. mime,fobj = mime_match(mime2,", "exists now. mime,fobj = mime_match(mime2, self.mimes) if not mime: mime = mime2 except", "= fobj._getDefMime(ext) if content_type and not fobj: mime,fobj = mime_match(content_type, self.mimes) if not", "should have received a copy of the GNU Affero General Public License #", "mdict[mpat]) return (None, None) class contentIndex(object): def __init__(self): self.mimes = {} self.exts =", "index file %s (%s).\", filename, fname or realfname) res = (mime, '') #", "mdict.has_key(mpat): return (mime, mdict[mpat]) return (None, None) class contentIndex(object): def __init__(self): self.mimes =", "by the Free Software Foundation, either version 3 of the # License, or", "even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "= obj f = True if f: _logger.debug('Register content indexer: %r.', obj) if", "that give an optimal result. \"\"\" def _getMimeTypes(self): \"\"\" Return supported mimetypes \"\"\"", "real file, to index. Some parsers will work better with the actual content,", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero", "'' try: if content != None: return self._doIndexContent(content) except NhException: pass if realfile", "# # This program is distributed in the hope that it will be", "# OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).", "-*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010", "############################################################################## import logging import os import tempfile from subprocess import Popen, PIPE _logger", "under the terms of the GNU Affero General Public License as # published", "terms of the GNU Affero General Public License as # published by the", "realfile != None: try: return self._doIndexFile(realfile) except NhException: pass fp = open(realfile,'rb') try:", "os.write(fd, content) os.close(fd) pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE) (result, _) = pop.communicate() mime2", "None fname = None mime = None if content_type and self.mimes.has_key(content_type): mime =", "%s', mime2) # Note that the temporary file still exists now. mime,fobj =", "coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright", "= logging.getLogger(__name__) class NhException(Exception): pass class indexer(object): \"\"\" An indexer knows how to", "\"\"\" mts = self._getMimeTypes(); if len (mts): return mts[0] return None def indexContent(self,", "here.\") def _doIndexFile(self, fpath): raise NhException(\"Content cannot be handled here.\") def __repr__(self): return", "actual content, others parse a file easier. Try the optimal. \"\"\" res =", "def mime_match(mime, mdict): if mdict.has_key(mime): return (mime, mdict[mime]) if '/' in mime: mpat", "tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) res = self._doIndexFile(rfname) os.unlink(rfname) return res except NhException: pass", "be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of", "some file. Typically, one indexer should be instantiated per file type. Override this", "only override the Content or the File methods that give an optimal result.", "[] def _getExtensions(self): return [] def _getDefMime(self, ext): \"\"\" Return a mimetype for", "will work better with the actual content, others parse a file easier. Try", "or ('','') fd, rfname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) res = self._doIndexFile(rfname) os.unlink(rfname)", "the extension ext. \"\"\" mts = self._getMimeTypes(); if len (mts): return mts[0] return", "fobj = None fname = None mime = None if content_type and self.mimes.has_key(content_type):", "os.write(fd, content) os.close(fd) res = self._doIndexFile(rfname) os.unlink(rfname) return res except NhException: pass raise", "filename, 'tmp' fd, fname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) pop = Popen(['file','-b','--mime',fname], shell=False,", "obj f = True for ext in obj._getExtensions(): self.exts[ext] = obj f =", "is distributed in the hope that it will be useful, # but WITHOUT", "Public License as # published by the Free Software Foundation, either version 3", "Exception: _logger.exception(\"Cannot index file %s (%s).\", filename, fname or realfname) res = (mime,", "[] def _getDefMime(self, ext): \"\"\" Return a mimetype for this document type, ideally", "res = (mime, '') except Exception: _logger.exception(\"Cannot index file %s (%s).\", filename, fname", "in the hope that it will be useful, # but WITHOUT ANY WARRANTY;", "now if not realfname and fname: try: os.unlink(fname) except Exception: _logger.exception(\"Cannot unlink %s.\",", "tmp file if content: try: fname,ext = filename and os.path.splitext(filename) or ('','') fd,", "os import tempfile from subprocess import Popen, PIPE _logger = logging.getLogger(__name__) class NhException(Exception):", "This program is free software: you can redistribute it and/or modify # it", "import os import tempfile from subprocess import Popen, PIPE _logger = logging.getLogger(__name__) class", "filename=None, realfile=None): \"\"\" Use either content or the real file, to index. Some", "received a copy of the GNU Affero General Public License # along with", "mime: mpat = mime.split('/')[0]+'/*' if mdict.has_key(mpat): return (mime, mdict[mpat]) return (None, None) class", "GNU Affero General Public License # along with this program. If not, see", "return res except NhException: pass raise NhException('No appropriate method to index file.') def", "= realfname else: try: bname,ext = os.path.splitext(filename or 'test.tmp') except Exception: bname, ext", "len (mts): return mts[0] return None def indexContent(self, content, filename=None, realfile=None): \"\"\" Use", "if not mime: mime = mime2 except Exception: _logger.exception('Cannot determine mime type.') try:", "('','') fd, rfname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) res = self._doIndexFile(rfname) os.unlink(rfname) return", "if mdict.has_key(mpat): return (mime, mdict[mpat]) return (None, None) class contentIndex(object): def __init__(self): self.mimes", "%r.', obj) if not f: raise Exception(\"Your indexer should at least support a", "mime_match(mime, mdict): if mdict.has_key(mime): return (mime, mdict[mime]) if '/' in mime: mpat =", "realfname else: try: bname,ext = os.path.splitext(filename or 'test.tmp') except Exception: bname, ext =", "realfname) ) else: _logger.debug(\"Have no object, return (%s, None).\", mime) res = (mime,", "True if f: _logger.debug('Register content indexer: %r.', obj) if not f: raise Exception(\"Your", "mime,fobj = mime_match(mime2, self.mimes) if not mime: mime = mime2 except Exception: _logger.exception('Cannot", "class contentIndex(object): def __init__(self): self.mimes = {} self.exts = {} def register(self, obj):", "f: _logger.debug('Register content indexer: %r.', obj) if not f: raise Exception(\"Your indexer should", "return (%s, None).\", mime) res = (mime, '') except Exception: _logger.exception(\"Cannot index file", "created a tmp file, unlink it now if not realfname and fname: try:", "modify # it under the terms of the GNU Affero General Public License", "if realfname : fname = realfname else: try: bname,ext = os.path.splitext(filename or 'test.tmp')", "unlink it now if not realfname and fname: try: os.unlink(fname) except Exception: _logger.exception(\"Cannot", "= mime_match(mime2, self.mimes) if not mime: mime = mime2 except Exception: _logger.exception('Cannot determine", "you should only override the Content or the File methods that give an", "can redistribute it and/or modify # it under the terms of the GNU", "methods that give an optimal result. \"\"\" def _getMimeTypes(self): \"\"\" Return supported mimetypes", "raised here return self._doIndexContent(content2) # last try, with a tmp file if content:", "# This program is distributed in the hope that it will be useful,", "fname or realfname) res = (mime, '') # If we created a tmp", "but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or", "should at least support a mimetype or extension.\") def doIndex(self, content, filename=None, content_type=None,", "if len (mts): return mts[0] return None def indexContent(self, content, filename=None, realfile=None): \"\"\"", "self.mimes) if not mime: mime = mime2 except Exception: _logger.exception('Cannot determine mime type.')", "try: fname,ext = filename and os.path.splitext(filename) or ('','') fd, rfname = tempfile.mkstemp(suffix=ext) os.write(fd,", "content_type and not fobj: mime,fobj = mime_match(content_type, self.mimes) if not fobj: try: if", "(mime, '') # If we created a tmp file, unlink it now if", "Typically, one indexer should be instantiated per file type. Override this class to", "_logger.exception('Cannot determine mime type.') try: if fobj: res = (mime, fobj.indexContent(content,filename,fname or realfname)", "tmp file, unlink it now if not realfname and fname: try: os.unlink(fname) except", "'test.tmp') except Exception: bname, ext = filename, 'tmp' fd, fname = tempfile.mkstemp(suffix=ext) os.write(fd,", "or the File methods that give an optimal result. \"\"\" def _getMimeTypes(self): \"\"\"", "type. Override this class to add more functionality. Note that you should only", "NhException('No appropriate method to index file.') def _doIndexContent(self, content): raise NhException(\"Content cannot be", "bname, ext = filename, 'tmp' fd, fname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) pop", "give an optimal result. \"\"\" def _getMimeTypes(self): \"\"\" Return supported mimetypes \"\"\" return", "The not-handled exception may be raised here return self._doIndexContent(content2) # last try, with", "tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE) (result, _) = pop.communicate()", "_logger.exception(\"Cannot index file %s (%s).\", filename, fname or realfname) res = (mime, '')", "return [] def _getDefMime(self, ext): \"\"\" Return a mimetype for this document type,", "os.path.splitext(filename or 'test.tmp') except Exception: bname, ext = filename, 'tmp' fd, fname =", "object, return (%s, None).\", mime) res = (mime, '') except Exception: _logger.exception(\"Cannot index", "that you should only override the Content or the File methods that give", "= filename, 'tmp' fd, fname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) pop = Popen(['file','-b','--mime',fname],", "License for more details. # # You should have received a copy of", "elif filename: bname,ext = os.path.splitext(filename) if self.exts.has_key(ext): fobj = self.exts[ext] mime = fobj._getDefMime(ext)", "if not fobj: try: if realfname : fname = realfname else: try: bname,ext", "self._getMimeTypes(); if len (mts): return mts[0] return None def indexContent(self, content, filename=None, realfile=None):", "the GNU Affero General Public License as # published by the Free Software", "= None if content_type and self.mimes.has_key(content_type): mime = content_type fobj = self.mimes[content_type] elif", "def _getExtensions(self): return [] def _getDefMime(self, ext): \"\"\" Return a mimetype for this", "os.close(fd) res = self._doIndexFile(rfname) os.unlink(rfname) return res except NhException: pass raise NhException('No appropriate", "the closest to the extension ext. \"\"\" mts = self._getMimeTypes(); if len (mts):", "# published by the Free Software Foundation, either version 3 of the #", "of the GNU Affero General Public License as # published by the Free", "content_type and self.mimes.has_key(content_type): mime = content_type fobj = self.mimes[content_type] elif filename: bname,ext =", "= '' try: if content != None: return self._doIndexContent(content) except NhException: pass if", "content, filename=None, realfile=None): \"\"\" Use either content or the real file, to index.", "it under the terms of the GNU Affero General Public License as #", "# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution", "is free software: you can redistribute it and/or modify # it under the", "= mime2 except Exception: _logger.exception('Cannot determine mime type.') try: if fobj: res =", "os.close(fd) pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE) (result, _) = pop.communicate() mime2 = result.split(';')[0]", "this class to add more functionality. Note that you should only override the", "= (mime, '') # If we created a tmp file, unlink it now", "realfname=None, debug=False): fobj = None fname = None mime = None if content_type", "= tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE) (result, _) =", "and os.path.splitext(filename) or ('','') fd, rfname = tempfile.mkstemp(suffix=ext) os.write(fd, content) os.close(fd) res =", "how to parse the content of some file. Typically, one indexer should be", "Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program", "# last try, with a tmp file if content: try: fname,ext = filename", "fobj: try: if realfname : fname = realfname else: try: bname,ext = os.path.splitext(filename", "False for mime in obj._getMimeTypes(): self.mimes[mime] = obj f = True for ext", "NhException: pass if realfile != None: try: return self._doIndexFile(realfile) except NhException: pass fp", "try: if fobj: res = (mime, fobj.indexContent(content,filename,fname or realfname) ) else: _logger.debug(\"Have no", "You should have received a copy of the GNU Affero General Public License", "if f: _logger.debug('Register content indexer: %r.', obj) if not f: raise Exception(\"Your indexer", "finally: fp.close() # The not-handled exception may be raised here return self._doIndexContent(content2) #", "Foundation, either version 3 of the # License, or (at your option) any", "debug=False): fobj = None fname = None mime = None if content_type and", "def _getMimeTypes(self): \"\"\" Return supported mimetypes \"\"\" return [] def _getExtensions(self): return []", "work better with the actual content, others parse a file easier. Try the", "mime_match(mime2, self.mimes) if not mime: mime = mime2 except Exception: _logger.exception('Cannot determine mime", "fp = open(realfile,'rb') try: content2 = fp.read() finally: fp.close() # The not-handled exception", "None) class contentIndex(object): def __init__(self): self.mimes = {} self.exts = {} def register(self,", "knows how to parse the content of some file. Typically, one indexer should", "if content_type and not fobj: mime,fobj = mime_match(content_type, self.mimes) if not fobj: try:", "the GNU Affero General Public License # along with this program. If not,", "logging import os import tempfile from subprocess import Popen, PIPE _logger = logging.getLogger(__name__)", "others parse a file easier. Try the optimal. \"\"\" res = '' try:", "NhException: pass fp = open(realfile,'rb') try: content2 = fp.read() finally: fp.close() # The", "extension ext. \"\"\" mts = self._getMimeTypes(); if len (mts): return mts[0] return None", "your option) any later version. # # This program is distributed in the", "or realfname) res = (mime, '') # If we created a tmp file,", "the real file, to index. Some parsers will work better with the actual", "try: if content != None: return self._doIndexContent(content) except NhException: pass if realfile !=", "import tempfile from subprocess import Popen, PIPE _logger = logging.getLogger(__name__) class NhException(Exception): pass", "better with the actual content, others parse a file easier. Try the optimal.", "%s (%s).\", filename, fname or realfname) res = (mime, '') # If we", "self.exts[ext] mime = fobj._getDefMime(ext) if content_type and not fobj: mime,fobj = mime_match(content_type, self.mimes)", "ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR", "later version. # # This program is distributed in the hope that it", "either content or the real file, to index. Some parsers will work better", "See the # GNU Affero General Public License for more details. # #", "_logger.debug('File gives us: %s', mime2) # Note that the temporary file still exists", "file if content: try: fname,ext = filename and os.path.splitext(filename) or ('','') fd, rfname", "file easier. Try the optimal. \"\"\" res = '' try: if content !=", "WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS", "content != None: return self._doIndexContent(content) except NhException: pass if realfile != None: try:", "obj._getMimeTypes(): self.mimes[mime] = obj f = True for ext in obj._getExtensions(): self.exts[ext] =", "\"\"\" res = '' try: if content != None: return self._doIndexContent(content) except NhException:", "the File methods that give an optimal result. \"\"\" def _getMimeTypes(self): \"\"\" Return", "# # This program is free software: you can redistribute it and/or modify", "!= None: return self._doIndexContent(content) except NhException: pass if realfile != None: try: return", "to the extension ext. \"\"\" mts = self._getMimeTypes(); if len (mts): return mts[0]", "Public License for more details. # # You should have received a copy", "should be instantiated per file type. Override this class to add more functionality.", "of some file. Typically, one indexer should be instantiated per file type. Override", "res = (mime, '') # If we created a tmp file, unlink it", "SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it", "except Exception: bname, ext = filename, 'tmp' fd, fname = tempfile.mkstemp(suffix=ext) os.write(fd, content)", "License, or (at your option) any later version. # # This program is", "\"\"\" An indexer knows how to parse the content of some file. Typically,", "= {} self.exts = {} def register(self, obj): f = False for mime", "the content of some file. Typically, one indexer should be instantiated per file", "os.unlink(fname) except Exception: _logger.exception(\"Cannot unlink %s.\", fname) return res cntIndex = contentIndex() #", "not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import os import tempfile from subprocess", "GNU Affero General Public License as # published by the Free Software Foundation,", "fname: try: os.unlink(fname) except Exception: _logger.exception(\"Cannot unlink %s.\", fname) return res cntIndex =", "for mime in obj._getMimeTypes(): self.mimes[mime] = obj f = True for ext in", "def __init__(self): self.mimes = {} self.exts = {} def register(self, obj): f =", "mts[0] return None def indexContent(self, content, filename=None, realfile=None): \"\"\" Use either content or", "(mime, mdict[mime]) if '/' in mime: mpat = mime.split('/')[0]+'/*' if mdict.has_key(mpat): return (mime,", "PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details.", "None).\", mime) res = (mime, '') except Exception: _logger.exception(\"Cannot index file %s (%s).\",", "the terms of the GNU Affero General Public License as # published by", "Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software:", "realfile=None): \"\"\" Use either content or the real file, to index. Some parsers", "open(realfile,'rb') try: content2 = fp.read() finally: fp.close() # The not-handled exception may be", "content): raise NhException(\"Content cannot be handled here.\") def _doIndexFile(self, fpath): raise NhException(\"Content cannot", "f = True if f: _logger.debug('Register content indexer: %r.', obj) if not f:", "if not f: raise Exception(\"Your indexer should at least support a mimetype or", "mimetype for this document type, ideally the closest to the extension ext. \"\"\"", "A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more", "NhException(\"Content cannot be handled here.\") def _doIndexFile(self, fpath): raise NhException(\"Content cannot be handled", "or realfname) ) else: _logger.debug(\"Have no object, return (%s, None).\", mime) res =", "ext in obj._getExtensions(): self.exts[ext] = obj f = True if f: _logger.debug('Register content", "_logger.debug('Register content indexer: %r.', obj) if not f: raise Exception(\"Your indexer should at", "will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty", "be raised here return self._doIndexContent(content2) # last try, with a tmp file if", "be handled here.\") def __repr__(self): return \"<indexer %s.%s>\" %(self.__module__, self.__class__.__name__) def mime_match(mime, mdict):", "file still exists now. mime,fobj = mime_match(mime2, self.mimes) if not mime: mime =", "the Content or the File methods that give an optimal result. \"\"\" def", "def register(self, obj): f = False for mime in obj._getMimeTypes(): self.mimes[mime] = obj", "= os.path.splitext(filename) if self.exts.has_key(ext): fobj = self.exts[ext] mime = fobj._getDefMime(ext) if content_type and", "if content: try: fname,ext = filename and os.path.splitext(filename) or ('','') fd, rfname =", "= open(realfile,'rb') try: content2 = fp.read() finally: fp.close() # The not-handled exception may", "# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY", "to index. Some parsers will work better with the actual content, others parse", "and not fobj: mime,fobj = mime_match(content_type, self.mimes) if not fobj: try: if realfname", "_logger.debug(\"Have no object, return (%s, None).\", mime) res = (mime, '') except Exception:", "pass if realfile != None: try: return self._doIndexFile(realfile) except NhException: pass fp =", "fobj = self.mimes[content_type] elif filename: bname,ext = os.path.splitext(filename) if self.exts.has_key(ext): fobj = self.exts[ext]", "# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free", "If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import os import tempfile from" ]
[ "model): allowed_chars = {line.strip() for line in open(allowed_chars_file, encoding='utf-8') if len(line.strip()) == 1}", "data/kws_prep/dev.words\") print(\"This script prints a stdout word and its morpheme constituents according to", "import morfessor import sys import logging import lzma import os import math def", "word = line.strip() parts = model.viterbi_segment(word)[0] print(word,end=' ') print(\" \".join(parts).replace(\"<unk>\", \"<UNK>\")) if __name__", "encoding='utf-8') if len(line.strip()) == 1} model = morfessor.MorfessorIO().read_any_model(model) for line in sys.stdin: word", "import lzma import os import math def main(allowed_chars_file, model): allowed_chars = {line.strip() for", "morfessor import sys import logging import lzma import os import math def main(allowed_chars_file,", "from __future__ import print_function import morfessor import sys import logging import lzma import", "1} model = morfessor.MorfessorIO().read_any_model(model) for line in sys.stdin: word = line.strip() parts =", "create_morf_wordmap.py <allowe-characters-file> <morfessor-model> < word-list\") print(\"e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words\") print(\"This", "prints a stdout word and its morpheme constituents according to the morfessor.\") sys.exit(-1)", "a stdout word and its morpheme constituents according to the morfessor.\") sys.exit(-1) main(sys.argv[1],sys.argv[2])", "\"__main__\": logging.basicConfig(level=logging.INFO) if len(sys.argv) != 3: print(\"usage: python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model> < word-list\")", "<allowe-characters-file> <morfessor-model> < word-list\") print(\"e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words\") print(\"This script", "if len(sys.argv) != 3: print(\"usage: python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model> < word-list\") print(\"e.g.: python3", "if len(line.strip()) == 1} model = morfessor.MorfessorIO().read_any_model(model) for line in sys.stdin: word =", "import os import math def main(allowed_chars_file, model): allowed_chars = {line.strip() for line in", "len(line.strip()) == 1} model = morfessor.MorfessorIO().read_any_model(model) for line in sys.stdin: word = line.strip()", "<morfessor-model> < word-list\") print(\"e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words\") print(\"This script prints", "if __name__ == \"__main__\": logging.basicConfig(level=logging.INFO) if len(sys.argv) != 3: print(\"usage: python3 create_morf_wordmap.py <allowe-characters-file>", "create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words\") print(\"This script prints a stdout word and its", "parts = model.viterbi_segment(word)[0] print(word,end=' ') print(\" \".join(parts).replace(\"<unk>\", \"<UNK>\")) if __name__ == \"__main__\": logging.basicConfig(level=logging.INFO)", "import math def main(allowed_chars_file, model): allowed_chars = {line.strip() for line in open(allowed_chars_file, encoding='utf-8')", "open(allowed_chars_file, encoding='utf-8') if len(line.strip()) == 1} model = morfessor.MorfessorIO().read_any_model(model) for line in sys.stdin:", "os import math def main(allowed_chars_file, model): allowed_chars = {line.strip() for line in open(allowed_chars_file,", "sys.stdin: word = line.strip() parts = model.viterbi_segment(word)[0] print(word,end=' ') print(\" \".join(parts).replace(\"<unk>\", \"<UNK>\")) if", "print_function import morfessor import sys import logging import lzma import os import math", "python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words\") print(\"This script prints a stdout word and", "logging import lzma import os import math def main(allowed_chars_file, model): allowed_chars = {line.strip()", "{line.strip() for line in open(allowed_chars_file, encoding='utf-8') if len(line.strip()) == 1} model = morfessor.MorfessorIO().read_any_model(model)", "= model.viterbi_segment(word)[0] print(word,end=' ') print(\" \".join(parts).replace(\"<unk>\", \"<UNK>\")) if __name__ == \"__main__\": logging.basicConfig(level=logging.INFO) if", "main(allowed_chars_file, model): allowed_chars = {line.strip() for line in open(allowed_chars_file, encoding='utf-8') if len(line.strip()) ==", "import logging import lzma import os import math def main(allowed_chars_file, model): allowed_chars =", "logging.basicConfig(level=logging.INFO) if len(sys.argv) != 3: print(\"usage: python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model> < word-list\") print(\"e.g.:", "== \"__main__\": logging.basicConfig(level=logging.INFO) if len(sys.argv) != 3: print(\"usage: python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model> <", "for line in open(allowed_chars_file, encoding='utf-8') if len(line.strip()) == 1} model = morfessor.MorfessorIO().read_any_model(model) for", "def main(allowed_chars_file, model): allowed_chars = {line.strip() for line in open(allowed_chars_file, encoding='utf-8') if len(line.strip())", "model = morfessor.MorfessorIO().read_any_model(model) for line in sys.stdin: word = line.strip() parts = model.viterbi_segment(word)[0]", "in open(allowed_chars_file, encoding='utf-8') if len(line.strip()) == 1} model = morfessor.MorfessorIO().read_any_model(model) for line in", "sys import logging import lzma import os import math def main(allowed_chars_file, model): allowed_chars", "print(\" \".join(parts).replace(\"<unk>\", \"<UNK>\")) if __name__ == \"__main__\": logging.basicConfig(level=logging.INFO) if len(sys.argv) != 3: print(\"usage:", "') print(\" \".join(parts).replace(\"<unk>\", \"<UNK>\")) if __name__ == \"__main__\": logging.basicConfig(level=logging.INFO) if len(sys.argv) != 3:", "import sys import logging import lzma import os import math def main(allowed_chars_file, model):", "print(word,end=' ') print(\" \".join(parts).replace(\"<unk>\", \"<UNK>\")) if __name__ == \"__main__\": logging.basicConfig(level=logging.INFO) if len(sys.argv) !=", "3: print(\"usage: python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model> < word-list\") print(\"e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin", "data/kws_prep/morf/model.bin < data/kws_prep/dev.words\") print(\"This script prints a stdout word and its morpheme constituents", "line in open(allowed_chars_file, encoding='utf-8') if len(line.strip()) == 1} model = morfessor.MorfessorIO().read_any_model(model) for line", "morfessor.MorfessorIO().read_any_model(model) for line in sys.stdin: word = line.strip() parts = model.viterbi_segment(word)[0] print(word,end=' ')", "print(\"usage: python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model> < word-list\") print(\"e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin <", "line.strip() parts = model.viterbi_segment(word)[0] print(word,end=' ') print(\" \".join(parts).replace(\"<unk>\", \"<UNK>\")) if __name__ == \"__main__\":", "__future__ import print_function import morfessor import sys import logging import lzma import os", "script prints a stdout word and its morpheme constituents according to the morfessor.\")", "model.viterbi_segment(word)[0] print(word,end=' ') print(\" \".join(parts).replace(\"<unk>\", \"<UNK>\")) if __name__ == \"__main__\": logging.basicConfig(level=logging.INFO) if len(sys.argv)", "\".join(parts).replace(\"<unk>\", \"<UNK>\")) if __name__ == \"__main__\": logging.basicConfig(level=logging.INFO) if len(sys.argv) != 3: print(\"usage: python3", "len(sys.argv) != 3: print(\"usage: python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model> < word-list\") print(\"e.g.: python3 create_morf_wordmap.py", "python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model> < word-list\") print(\"e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words\")", "!= 3: print(\"usage: python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model> < word-list\") print(\"e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars", "math def main(allowed_chars_file, model): allowed_chars = {line.strip() for line in open(allowed_chars_file, encoding='utf-8') if", "< data/kws_prep/dev.words\") print(\"This script prints a stdout word and its morpheme constituents according", "print(\"This script prints a stdout word and its morpheme constituents according to the", "line in sys.stdin: word = line.strip() parts = model.viterbi_segment(word)[0] print(word,end=' ') print(\" \".join(parts).replace(\"<unk>\",", "= line.strip() parts = model.viterbi_segment(word)[0] print(word,end=' ') print(\" \".join(parts).replace(\"<unk>\", \"<UNK>\")) if __name__ ==", "word-list\") print(\"e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words\") print(\"This script prints a stdout", "data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words\") print(\"This script prints a stdout word and its morpheme", "= morfessor.MorfessorIO().read_any_model(model) for line in sys.stdin: word = line.strip() parts = model.viterbi_segment(word)[0] print(word,end='", "< word-list\") print(\"e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words\") print(\"This script prints a", "== 1} model = morfessor.MorfessorIO().read_any_model(model) for line in sys.stdin: word = line.strip() parts", "= {line.strip() for line in open(allowed_chars_file, encoding='utf-8') if len(line.strip()) == 1} model =", "\"<UNK>\")) if __name__ == \"__main__\": logging.basicConfig(level=logging.INFO) if len(sys.argv) != 3: print(\"usage: python3 create_morf_wordmap.py", "lzma import os import math def main(allowed_chars_file, model): allowed_chars = {line.strip() for line", "allowed_chars = {line.strip() for line in open(allowed_chars_file, encoding='utf-8') if len(line.strip()) == 1} model", "python3 from __future__ import print_function import morfessor import sys import logging import lzma", "import print_function import morfessor import sys import logging import lzma import os import", "print(\"e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words\") print(\"This script prints a stdout word", "#!/usr/bin/env python3 from __future__ import print_function import morfessor import sys import logging import", "in sys.stdin: word = line.strip() parts = model.viterbi_segment(word)[0] print(word,end=' ') print(\" \".join(parts).replace(\"<unk>\", \"<UNK>\"))", "for line in sys.stdin: word = line.strip() parts = model.viterbi_segment(word)[0] print(word,end=' ') print(\"", "__name__ == \"__main__\": logging.basicConfig(level=logging.INFO) if len(sys.argv) != 3: print(\"usage: python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model>" ]
[ "return random_depth def copy_t2t(depth): my_tree = generate_data(depth-1) change_nts(my_tree) my_list = convert_to_list_inorder(my_tree,[]) infix_tree =", "'w') elif t2t_operation == \"RELABEL_1\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR +", "random DATA_DIR = \"../data/\" def curriculum_depth(i, num_examples, max_depth): curriculum_max_depth= int((max_depth*i)/num_examples) #print(i, curriculum_max_depth,) if", "#operators = ['+','-','*','/'] #for i in range(1, max_num+1): # print >> orig_vocab, i,", "len(operators) + 1 #print >> orig_vocab, ')', max_num + len(operators) + 2 #print", "data_subset = \"train\" t2t_operation = \"COPY\" seed = 0 #NOTE: we need both", "orig = open(DATA_DIR + data_subset + '.orig', 'w') for i in range(num_examples): print(data[i][0],", "print_function from create_tree import * import numpy as np import random DATA_DIR =", "data = [] for i in range(num_examples): depth = max_depth if np.random.randint(2) ==", "'w') #max_num = 256 #operators = ['+','-','*','/'] #for i in range(1, max_num+1): #", "open(DATA_DIR + data_subset + '.copy', 'w') elif t2t_operation == \"RELABEL_1\": data = create_examples(num_examples,max_depth,", "file=trans) #orig_vocab = open(DATA_DIR + 'vocab.train.orig', 'w') #trans_vocab = open(DATA_DIR + 'vocab.train.copy', 'w')", "num_examples, max_depth) data.append(function(depth)) return data if __name__ == \"__main__\": num_examples = 1000 max_depth", "max_depth, function): data = [] for i in range(num_examples): depth = max_depth if", "= 5 data_subset = \"train\" t2t_operation = \"COPY\" seed = 0 #NOTE: we", "== \"COPY\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy',", "+ data_subset + '.orig', 'w') for i in range(num_examples): print(data[i][0], file=orig) print(data[i][1], file=trans)", "i # print >> trans_vocab, i, i #for i in range(len(operators)): # print", ">> orig_vocab, ')', max_num + len(operators) + 2 #print >> trans_vocab, '(', max_num", "= 0 #NOTE: we need both -- for reproducible trees... #numpy.random.seed(seed) #random.seed(seed) if", "create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy', 'w') orig = open(DATA_DIR", "= open(DATA_DIR + data_subset + '.copy', 'w') orig = open(DATA_DIR + data_subset +", "#trans_vocab = open(DATA_DIR + 'vocab.train.copy', 'w') #max_num = 256 #operators = ['+','-','*','/'] #for", "2 #print >> trans_vocab, '(', max_num + len(operators) + 1 #print >> trans_vocab,", "random_depth = 2 + np.random.randint(curriculum_max_depth) else: random_depth = 2 #print(\"DEPTH = \", random_depth)", "= 2 + np.random.randint(curriculum_max_depth) else: random_depth = 2 #print(\"DEPTH = \", random_depth) return", "\"COPY\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy', 'w')", "max_depth): curriculum_max_depth= int((max_depth*i)/num_examples) #print(i, curriculum_max_depth,) if curriculum_max_depth > 0: random_depth = 2 +", "+ np.random.randint(curriculum_max_depth) else: random_depth = 2 #print(\"DEPTH = \", random_depth) return random_depth def", "trans = open(DATA_DIR + data_subset + '.copy', 'w') elif t2t_operation == \"RELABEL_1\": data", "trans = open(DATA_DIR + data_subset + '.copy', 'w') orig = open(DATA_DIR + data_subset", "print(data[i][1], file=trans) #orig_vocab = open(DATA_DIR + 'vocab.train.orig', 'w') #trans_vocab = open(DATA_DIR + 'vocab.train.copy',", "+ 'vocab.train.orig', 'w') #trans_vocab = open(DATA_DIR + 'vocab.train.copy', 'w') #max_num = 256 #operators", "+ data_subset + '.copy', 'w') elif t2t_operation == \"RELABEL_1\": data = create_examples(num_examples,max_depth, function=copy_t2t)", "+ data_subset + '.copy', 'w') orig = open(DATA_DIR + data_subset + '.orig', 'w')", "operators[i], max_num+i+1 #print >> orig_vocab, '(', max_num + len(operators) + 1 #print >>", "trans_vocab, i, i #for i in range(len(operators)): # print >> orig_vocab, operators[i], max_num+i+1", "= \", random_depth) return random_depth def copy_t2t(depth): my_tree = generate_data(depth-1) change_nts(my_tree) my_list =", "0: depth = curriculum_depth(i, num_examples, max_depth) data.append(function(depth)) return data if __name__ == \"__main__\":", "'w') #trans_vocab = open(DATA_DIR + 'vocab.train.copy', 'w') #max_num = 256 #operators = ['+','-','*','/']", "1000 max_depth = 5 data_subset = \"train\" t2t_operation = \"COPY\" seed = 0", "'(', max_num + len(operators) + 1 #print >> orig_vocab, ')', max_num + len(operators)", "print >> orig_vocab, operators[i], max_num+i+1 # print >> trans_vocab, operators[i], max_num+i+1 #print >>", "np import random DATA_DIR = \"../data/\" def curriculum_depth(i, num_examples, max_depth): curriculum_max_depth= int((max_depth*i)/num_examples) #print(i,", "= 2 #print(\"DEPTH = \", random_depth) return random_depth def copy_t2t(depth): my_tree = generate_data(depth-1)", "i in range(len(operators)): # print >> orig_vocab, operators[i], max_num+i+1 # print >> trans_vocab,", "#random.seed(seed) if t2t_operation == \"COPY\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR +", "range(len(operators)): # print >> orig_vocab, operators[i], max_num+i+1 # print >> trans_vocab, operators[i], max_num+i+1", "# print >> orig_vocab, operators[i], max_num+i+1 # print >> trans_vocab, operators[i], max_num+i+1 #print", "orig_vocab, ')', max_num + len(operators) + 2 #print >> trans_vocab, '(', max_num +", "= generate_data(depth-1) change_nts(my_tree) my_list = convert_to_list_inorder(my_tree,[]) infix_tree = ' '.join(str(e) for e in", "trees... #numpy.random.seed(seed) #random.seed(seed) if t2t_operation == \"COPY\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans =", "my_list = convert_to_list_inorder(my_tree,[]) infix_tree = ' '.join(str(e) for e in my_list) #print my_tree", "= open(DATA_DIR + data_subset + '.orig', 'w') for i in range(num_examples): print(data[i][0], file=orig)", "+ '.copy', 'w') elif t2t_operation == \"RELABEL_1\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans =", "__future__ import print_function from create_tree import * import numpy as np import random", "function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy', 'w') orig = open(DATA_DIR +", "i, i # print >> trans_vocab, i, i #for i in range(len(operators)): #", "orig_vocab, '(', max_num + len(operators) + 1 #print >> orig_vocab, ')', max_num +", "+ len(operators) + 2 #print >> trans_vocab, '(', max_num + len(operators) + 1", "import * import numpy as np import random DATA_DIR = \"../data/\" def curriculum_depth(i,", "= open(DATA_DIR + 'vocab.train.orig', 'w') #trans_vocab = open(DATA_DIR + 'vocab.train.copy', 'w') #max_num =", "num_examples, max_depth): curriculum_max_depth= int((max_depth*i)/num_examples) #print(i, curriculum_max_depth,) if curriculum_max_depth > 0: random_depth = 2", "file=orig) print(data[i][1], file=trans) #orig_vocab = open(DATA_DIR + 'vocab.train.orig', 'w') #trans_vocab = open(DATA_DIR +", "data if __name__ == \"__main__\": num_examples = 1000 max_depth = 5 data_subset =", "depth = max_depth if np.random.randint(2) == 0: depth = curriculum_depth(i, num_examples, max_depth) data.append(function(depth))", ">> orig_vocab, i, i # print >> trans_vocab, i, i #for i in", "def curriculum_depth(i, num_examples, max_depth): curriculum_max_depth= int((max_depth*i)/num_examples) #print(i, curriculum_max_depth,) if curriculum_max_depth > 0: random_depth", "random_depth) return random_depth def copy_t2t(depth): my_tree = generate_data(depth-1) change_nts(my_tree) my_list = convert_to_list_inorder(my_tree,[]) infix_tree", ">> trans_vocab, operators[i], max_num+i+1 #print >> orig_vocab, '(', max_num + len(operators) + 1", "range(num_examples): print(data[i][0], file=orig) print(data[i][1], file=trans) #orig_vocab = open(DATA_DIR + 'vocab.train.orig', 'w') #trans_vocab =", "copy_t2t(depth): my_tree = generate_data(depth-1) change_nts(my_tree) my_list = convert_to_list_inorder(my_tree,[]) infix_tree = ' '.join(str(e) for", "import random DATA_DIR = \"../data/\" def curriculum_depth(i, num_examples, max_depth): curriculum_max_depth= int((max_depth*i)/num_examples) #print(i, curriculum_max_depth,)", "function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy', 'w') elif t2t_operation == \"RELABEL_1\":", "in range(num_examples): print(data[i][0], file=orig) print(data[i][1], file=trans) #orig_vocab = open(DATA_DIR + 'vocab.train.orig', 'w') #trans_vocab", "= 256 #operators = ['+','-','*','/'] #for i in range(1, max_num+1): # print >>", "-- for reproducible trees... #numpy.random.seed(seed) #random.seed(seed) if t2t_operation == \"COPY\": data = create_examples(num_examples,max_depth,", "* import numpy as np import random DATA_DIR = \"../data/\" def curriculum_depth(i, num_examples,", "#print my_tree return ([infix_tree, infix_tree]) def create_examples(num_examples, max_depth, function): data = [] for", "numpy as np import random DATA_DIR = \"../data/\" def curriculum_depth(i, num_examples, max_depth): curriculum_max_depth=", "data_subset + '.copy', 'w') elif t2t_operation == \"RELABEL_1\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans", "data_subset + '.orig', 'w') for i in range(num_examples): print(data[i][0], file=orig) print(data[i][1], file=trans) #orig_vocab", "return data if __name__ == \"__main__\": num_examples = 1000 max_depth = 5 data_subset", "#print >> orig_vocab, ')', max_num + len(operators) + 2 #print >> trans_vocab, '(',", "'.copy', 'w') elif t2t_operation == \"RELABEL_1\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR", "for i in range(num_examples): print(data[i][0], file=orig) print(data[i][1], file=trans) #orig_vocab = open(DATA_DIR + 'vocab.train.orig',", "trans_vocab, '(', max_num + len(operators) + 1 #print >> trans_vocab, ')', max_num +", "random_depth = 2 #print(\"DEPTH = \", random_depth) return random_depth def copy_t2t(depth): my_tree =", "we need both -- for reproducible trees... #numpy.random.seed(seed) #random.seed(seed) if t2t_operation == \"COPY\":", "DATA_DIR = \"../data/\" def curriculum_depth(i, num_examples, max_depth): curriculum_max_depth= int((max_depth*i)/num_examples) #print(i, curriculum_max_depth,) if curriculum_max_depth", "> 0: random_depth = 2 + np.random.randint(curriculum_max_depth) else: random_depth = 2 #print(\"DEPTH =", "== \"__main__\": num_examples = 1000 max_depth = 5 data_subset = \"train\" t2t_operation =", "')', max_num + len(operators) + 2 #print >> trans_vocab, '(', max_num + len(operators)", "max_depth = 5 data_subset = \"train\" t2t_operation = \"COPY\" seed = 0 #NOTE:", "change_nts(my_tree) my_list = convert_to_list_inorder(my_tree,[]) infix_tree = ' '.join(str(e) for e in my_list) #print", "= \"train\" t2t_operation = \"COPY\" seed = 0 #NOTE: we need both --", "#print >> trans_vocab, '(', max_num + len(operators) + 1 #print >> trans_vocab, ')',", "curriculum_max_depth= int((max_depth*i)/num_examples) #print(i, curriculum_max_depth,) if curriculum_max_depth > 0: random_depth = 2 + np.random.randint(curriculum_max_depth)", "+ '.copy', 'w') orig = open(DATA_DIR + data_subset + '.orig', 'w') for i", "'vocab.train.orig', 'w') #trans_vocab = open(DATA_DIR + 'vocab.train.copy', 'w') #max_num = 256 #operators =", "+ '.orig', 'w') for i in range(num_examples): print(data[i][0], file=orig) print(data[i][1], file=trans) #orig_vocab =", "int((max_depth*i)/num_examples) #print(i, curriculum_max_depth,) if curriculum_max_depth > 0: random_depth = 2 + np.random.randint(curriculum_max_depth) else:", "' '.join(str(e) for e in my_list) #print my_tree return ([infix_tree, infix_tree]) def create_examples(num_examples,", "#for i in range(len(operators)): # print >> orig_vocab, operators[i], max_num+i+1 # print >>", "as np import random DATA_DIR = \"../data/\" def curriculum_depth(i, num_examples, max_depth): curriculum_max_depth= int((max_depth*i)/num_examples)", "create_tree import * import numpy as np import random DATA_DIR = \"../data/\" def", "= create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy', 'w') orig =", "range(1, max_num+1): # print >> orig_vocab, i, i # print >> trans_vocab, i,", ">> orig_vocab, operators[i], max_num+i+1 # print >> trans_vocab, operators[i], max_num+i+1 #print >> orig_vocab,", "len(operators) + 2 #print >> trans_vocab, '(', max_num + len(operators) + 1 #print", "\", random_depth) return random_depth def copy_t2t(depth): my_tree = generate_data(depth-1) change_nts(my_tree) my_list = convert_to_list_inorder(my_tree,[])", "np.random.randint(curriculum_max_depth) else: random_depth = 2 #print(\"DEPTH = \", random_depth) return random_depth def copy_t2t(depth):", "my_list) #print my_tree return ([infix_tree, infix_tree]) def create_examples(num_examples, max_depth, function): data = []", "depth = curriculum_depth(i, num_examples, max_depth) data.append(function(depth)) return data if __name__ == \"__main__\": num_examples", "in my_list) #print my_tree return ([infix_tree, infix_tree]) def create_examples(num_examples, max_depth, function): data =", "trans_vocab, operators[i], max_num+i+1 #print >> orig_vocab, '(', max_num + len(operators) + 1 #print", "2 + np.random.randint(curriculum_max_depth) else: random_depth = 2 #print(\"DEPTH = \", random_depth) return random_depth", "if __name__ == \"__main__\": num_examples = 1000 max_depth = 5 data_subset = \"train\"", "+ len(operators) + 1 #print >> orig_vocab, ')', max_num + len(operators) + 2", "max_depth) data.append(function(depth)) return data if __name__ == \"__main__\": num_examples = 1000 max_depth =", "0 #NOTE: we need both -- for reproducible trees... #numpy.random.seed(seed) #random.seed(seed) if t2t_operation", "= create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy', 'w') elif t2t_operation", "== \"RELABEL_1\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy',", "i in range(1, max_num+1): # print >> orig_vocab, i, i # print >>", "max_num + len(operators) + 1 #print >> trans_vocab, ')', max_num + len(operators) +", "my_tree return ([infix_tree, infix_tree]) def create_examples(num_examples, max_depth, function): data = [] for i", "#numpy.random.seed(seed) #random.seed(seed) if t2t_operation == \"COPY\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR", "if t2t_operation == \"COPY\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset", "'.orig', 'w') for i in range(num_examples): print(data[i][0], file=orig) print(data[i][1], file=trans) #orig_vocab = open(DATA_DIR", "5 data_subset = \"train\" t2t_operation = \"COPY\" seed = 0 #NOTE: we need", "num_examples = 1000 max_depth = 5 data_subset = \"train\" t2t_operation = \"COPY\" seed", "function): data = [] for i in range(num_examples): depth = max_depth if np.random.randint(2)", "#max_num = 256 #operators = ['+','-','*','/'] #for i in range(1, max_num+1): # print", "in range(len(operators)): # print >> orig_vocab, operators[i], max_num+i+1 # print >> trans_vocab, operators[i],", "open(DATA_DIR + 'vocab.train.orig', 'w') #trans_vocab = open(DATA_DIR + 'vocab.train.copy', 'w') #max_num = 256", "= curriculum_depth(i, num_examples, max_depth) data.append(function(depth)) return data if __name__ == \"__main__\": num_examples =", "+ 'vocab.train.copy', 'w') #max_num = 256 #operators = ['+','-','*','/'] #for i in range(1,", "== 0: depth = curriculum_depth(i, num_examples, max_depth) data.append(function(depth)) return data if __name__ ==", "256 #operators = ['+','-','*','/'] #for i in range(1, max_num+1): # print >> orig_vocab,", "e in my_list) #print my_tree return ([infix_tree, infix_tree]) def create_examples(num_examples, max_depth, function): data", "+ 1 #print >> orig_vocab, ')', max_num + len(operators) + 2 #print >>", "\"../data/\" def curriculum_depth(i, num_examples, max_depth): curriculum_max_depth= int((max_depth*i)/num_examples) #print(i, curriculum_max_depth,) if curriculum_max_depth > 0:", "data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy', 'w') orig", "0: random_depth = 2 + np.random.randint(curriculum_max_depth) else: random_depth = 2 #print(\"DEPTH = \",", "data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy', 'w') elif", "operators[i], max_num+i+1 # print >> trans_vocab, operators[i], max_num+i+1 #print >> orig_vocab, '(', max_num", "+ len(operators) + 1 #print >> trans_vocab, ')', max_num + len(operators) + 2", "= max_depth if np.random.randint(2) == 0: depth = curriculum_depth(i, num_examples, max_depth) data.append(function(depth)) return", "convert_to_list_inorder(my_tree,[]) infix_tree = ' '.join(str(e) for e in my_list) #print my_tree return ([infix_tree,", "for e in my_list) #print my_tree return ([infix_tree, infix_tree]) def create_examples(num_examples, max_depth, function):", "= convert_to_list_inorder(my_tree,[]) infix_tree = ' '.join(str(e) for e in my_list) #print my_tree return", "[] for i in range(num_examples): depth = max_depth if np.random.randint(2) == 0: depth", "curriculum_depth(i, num_examples, max_depth): curriculum_max_depth= int((max_depth*i)/num_examples) #print(i, curriculum_max_depth,) if curriculum_max_depth > 0: random_depth =", "def copy_t2t(depth): my_tree = generate_data(depth-1) change_nts(my_tree) my_list = convert_to_list_inorder(my_tree,[]) infix_tree = ' '.join(str(e)", "= ['+','-','*','/'] #for i in range(1, max_num+1): # print >> orig_vocab, i, i", "max_num + len(operators) + 2 #print >> trans_vocab, '(', max_num + len(operators) +", "t2t_operation == \"COPY\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset +", "i, i #for i in range(len(operators)): # print >> orig_vocab, operators[i], max_num+i+1 #", "print >> orig_vocab, i, i # print >> trans_vocab, i, i #for i", "data.append(function(depth)) return data if __name__ == \"__main__\": num_examples = 1000 max_depth = 5", "import numpy as np import random DATA_DIR = \"../data/\" def curriculum_depth(i, num_examples, max_depth):", "t2t_operation = \"COPY\" seed = 0 #NOTE: we need both -- for reproducible", "= open(DATA_DIR + 'vocab.train.copy', 'w') #max_num = 256 #operators = ['+','-','*','/'] #for i", "#NOTE: we need both -- for reproducible trees... #numpy.random.seed(seed) #random.seed(seed) if t2t_operation ==", "both -- for reproducible trees... #numpy.random.seed(seed) #random.seed(seed) if t2t_operation == \"COPY\": data =", "i #for i in range(len(operators)): # print >> orig_vocab, operators[i], max_num+i+1 # print", "create_examples(num_examples, max_depth, function): data = [] for i in range(num_examples): depth = max_depth", "def create_examples(num_examples, max_depth, function): data = [] for i in range(num_examples): depth =", "max_num+1): # print >> orig_vocab, i, i # print >> trans_vocab, i, i", "# print >> trans_vocab, i, i #for i in range(len(operators)): # print >>", "'w') for i in range(num_examples): print(data[i][0], file=orig) print(data[i][1], file=trans) #orig_vocab = open(DATA_DIR +", "\"train\" t2t_operation = \"COPY\" seed = 0 #NOTE: we need both -- for", "open(DATA_DIR + data_subset + '.copy', 'w') orig = open(DATA_DIR + data_subset + '.orig',", "= ' '.join(str(e) for e in my_list) #print my_tree return ([infix_tree, infix_tree]) def", "for reproducible trees... #numpy.random.seed(seed) #random.seed(seed) if t2t_operation == \"COPY\": data = create_examples(num_examples,max_depth, function=copy_t2t)", "orig_vocab, operators[i], max_num+i+1 # print >> trans_vocab, operators[i], max_num+i+1 #print >> orig_vocab, '(',", "if curriculum_max_depth > 0: random_depth = 2 + np.random.randint(curriculum_max_depth) else: random_depth = 2", "'vocab.train.copy', 'w') #max_num = 256 #operators = ['+','-','*','/'] #for i in range(1, max_num+1):", "# print >> trans_vocab, operators[i], max_num+i+1 #print >> orig_vocab, '(', max_num + len(operators)", "#print(\"DEPTH = \", random_depth) return random_depth def copy_t2t(depth): my_tree = generate_data(depth-1) change_nts(my_tree) my_list", "create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy', 'w') elif t2t_operation ==", "random_depth def copy_t2t(depth): my_tree = generate_data(depth-1) change_nts(my_tree) my_list = convert_to_list_inorder(my_tree,[]) infix_tree = '", "seed = 0 #NOTE: we need both -- for reproducible trees... #numpy.random.seed(seed) #random.seed(seed)", "#print >> orig_vocab, '(', max_num + len(operators) + 1 #print >> orig_vocab, ')',", "+ 2 #print >> trans_vocab, '(', max_num + len(operators) + 1 #print >>", "np.random.randint(2) == 0: depth = curriculum_depth(i, num_examples, max_depth) data.append(function(depth)) return data if __name__", "['+','-','*','/'] #for i in range(1, max_num+1): # print >> orig_vocab, i, i #", ">> trans_vocab, '(', max_num + len(operators) + 1 #print >> trans_vocab, ')', max_num", "#print(i, curriculum_max_depth,) if curriculum_max_depth > 0: random_depth = 2 + np.random.randint(curriculum_max_depth) else: random_depth", "= \"COPY\" seed = 0 #NOTE: we need both -- for reproducible trees...", "infix_tree = ' '.join(str(e) for e in my_list) #print my_tree return ([infix_tree, infix_tree])", "return ([infix_tree, infix_tree]) def create_examples(num_examples, max_depth, function): data = [] for i in", ">> trans_vocab, i, i #for i in range(len(operators)): # print >> orig_vocab, operators[i],", "open(DATA_DIR + data_subset + '.orig', 'w') for i in range(num_examples): print(data[i][0], file=orig) print(data[i][1],", "\"__main__\": num_examples = 1000 max_depth = 5 data_subset = \"train\" t2t_operation = \"COPY\"", "data_subset + '.copy', 'w') orig = open(DATA_DIR + data_subset + '.orig', 'w') for", "= \"../data/\" def curriculum_depth(i, num_examples, max_depth): curriculum_max_depth= int((max_depth*i)/num_examples) #print(i, curriculum_max_depth,) if curriculum_max_depth >", "range(num_examples): depth = max_depth if np.random.randint(2) == 0: depth = curriculum_depth(i, num_examples, max_depth)", "in range(num_examples): depth = max_depth if np.random.randint(2) == 0: depth = curriculum_depth(i, num_examples,", "#for i in range(1, max_num+1): # print >> orig_vocab, i, i # print", "= [] for i in range(num_examples): depth = max_depth if np.random.randint(2) == 0:", "elif t2t_operation == \"RELABEL_1\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset", "in range(1, max_num+1): # print >> orig_vocab, i, i # print >> trans_vocab,", "for i in range(num_examples): depth = max_depth if np.random.randint(2) == 0: depth =", "from create_tree import * import numpy as np import random DATA_DIR = \"../data/\"", "from __future__ import print_function from create_tree import * import numpy as np import", "if np.random.randint(2) == 0: depth = curriculum_depth(i, num_examples, max_depth) data.append(function(depth)) return data if", "2 #print(\"DEPTH = \", random_depth) return random_depth def copy_t2t(depth): my_tree = generate_data(depth-1) change_nts(my_tree)", "# print >> orig_vocab, i, i # print >> trans_vocab, i, i #for", "curriculum_depth(i, num_examples, max_depth) data.append(function(depth)) return data if __name__ == \"__main__\": num_examples = 1000", ">> orig_vocab, '(', max_num + len(operators) + 1 #print >> orig_vocab, ')', max_num", "curriculum_max_depth > 0: random_depth = 2 + np.random.randint(curriculum_max_depth) else: random_depth = 2 #print(\"DEPTH", "curriculum_max_depth,) if curriculum_max_depth > 0: random_depth = 2 + np.random.randint(curriculum_max_depth) else: random_depth =", "= 1000 max_depth = 5 data_subset = \"train\" t2t_operation = \"COPY\" seed =", "= open(DATA_DIR + data_subset + '.copy', 'w') elif t2t_operation == \"RELABEL_1\": data =", "generate_data(depth-1) change_nts(my_tree) my_list = convert_to_list_inorder(my_tree,[]) infix_tree = ' '.join(str(e) for e in my_list)", "i in range(num_examples): print(data[i][0], file=orig) print(data[i][1], file=trans) #orig_vocab = open(DATA_DIR + 'vocab.train.orig', 'w')", "#orig_vocab = open(DATA_DIR + 'vocab.train.orig', 'w') #trans_vocab = open(DATA_DIR + 'vocab.train.copy', 'w') #max_num", "import print_function from create_tree import * import numpy as np import random DATA_DIR", "i in range(num_examples): depth = max_depth if np.random.randint(2) == 0: depth = curriculum_depth(i,", "max_depth if np.random.randint(2) == 0: depth = curriculum_depth(i, num_examples, max_depth) data.append(function(depth)) return data", "print(data[i][0], file=orig) print(data[i][1], file=trans) #orig_vocab = open(DATA_DIR + 'vocab.train.orig', 'w') #trans_vocab = open(DATA_DIR", "orig_vocab, i, i # print >> trans_vocab, i, i #for i in range(len(operators)):", "'.copy', 'w') orig = open(DATA_DIR + data_subset + '.orig', 'w') for i in", "'.join(str(e) for e in my_list) #print my_tree return ([infix_tree, infix_tree]) def create_examples(num_examples, max_depth,", "([infix_tree, infix_tree]) def create_examples(num_examples, max_depth, function): data = [] for i in range(num_examples):", "'(', max_num + len(operators) + 1 #print >> trans_vocab, ')', max_num + len(operators)", "print >> trans_vocab, operators[i], max_num+i+1 #print >> orig_vocab, '(', max_num + len(operators) +", "reproducible trees... #numpy.random.seed(seed) #random.seed(seed) if t2t_operation == \"COPY\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans", "max_num+i+1 # print >> trans_vocab, operators[i], max_num+i+1 #print >> orig_vocab, '(', max_num +", "need both -- for reproducible trees... #numpy.random.seed(seed) #random.seed(seed) if t2t_operation == \"COPY\": data", "\"RELABEL_1\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset + '.copy', 'w')", "infix_tree]) def create_examples(num_examples, max_depth, function): data = [] for i in range(num_examples): depth", "1 #print >> orig_vocab, ')', max_num + len(operators) + 2 #print >> trans_vocab,", "__name__ == \"__main__\": num_examples = 1000 max_depth = 5 data_subset = \"train\" t2t_operation", "\"COPY\" seed = 0 #NOTE: we need both -- for reproducible trees... #numpy.random.seed(seed)", "print >> trans_vocab, i, i #for i in range(len(operators)): # print >> orig_vocab,", "max_num+i+1 #print >> orig_vocab, '(', max_num + len(operators) + 1 #print >> orig_vocab,", "open(DATA_DIR + 'vocab.train.copy', 'w') #max_num = 256 #operators = ['+','-','*','/'] #for i in", "'w') orig = open(DATA_DIR + data_subset + '.orig', 'w') for i in range(num_examples):", "t2t_operation == \"RELABEL_1\": data = create_examples(num_examples,max_depth, function=copy_t2t) trans = open(DATA_DIR + data_subset +", "max_num + len(operators) + 1 #print >> orig_vocab, ')', max_num + len(operators) +", "else: random_depth = 2 #print(\"DEPTH = \", random_depth) return random_depth def copy_t2t(depth): my_tree", "my_tree = generate_data(depth-1) change_nts(my_tree) my_list = convert_to_list_inorder(my_tree,[]) infix_tree = ' '.join(str(e) for e" ]
[ "setuptools import setup setup( name='GroupMe Notifier', version=0.1, description='GroupMe Notifier is a tool for", "name='GroupMe Notifier', version=0.1, description='GroupMe Notifier is a tool for getting notifications on desktop',", "is a tool for getting notifications on desktop', author='<NAME>', author_email='<EMAIL>', url='http://daconex.me', install_requires=['requests'] )", "version=0.1, description='GroupMe Notifier is a tool for getting notifications on desktop', author='<NAME>', author_email='<EMAIL>',", "description='GroupMe Notifier is a tool for getting notifications on desktop', author='<NAME>', author_email='<EMAIL>', url='http://daconex.me',", "from setuptools import setup setup( name='GroupMe Notifier', version=0.1, description='GroupMe Notifier is a tool", "Notifier', version=0.1, description='GroupMe Notifier is a tool for getting notifications on desktop', author='<NAME>',", "<reponame>daconex/GroupMeNotifier<filename>setup.py from setuptools import setup setup( name='GroupMe Notifier', version=0.1, description='GroupMe Notifier is a", "Notifier is a tool for getting notifications on desktop', author='<NAME>', author_email='<EMAIL>', url='http://daconex.me', install_requires=['requests']", "setup setup( name='GroupMe Notifier', version=0.1, description='GroupMe Notifier is a tool for getting notifications", "import setup setup( name='GroupMe Notifier', version=0.1, description='GroupMe Notifier is a tool for getting", "setup( name='GroupMe Notifier', version=0.1, description='GroupMe Notifier is a tool for getting notifications on" ]
[ "root.value < max_value): return False return check_bst_util(root.left, min, root.value) and check_bst_util(root.right, root.value, max)", "None: return True if not (min_value < root.value < max_value): return False return", "return True if not (min_value < root.value < max_value): return False return check_bst_util(root.left,", "return False return check_bst_util(root.left, min, root.value) and check_bst_util(root.right, root.value, max) def check_bst(root): return", "max_value): if root is None: return True if not (min_value < root.value <", "check_bst_util(root.left, min, root.value) and check_bst_util(root.right, root.value, max) def check_bst(root): return check_bst_util(root, - maxsize,", "if root is None: return True if not (min_value < root.value < max_value):", "min, root.value) and check_bst_util(root.right, root.value, max) def check_bst(root): return check_bst_util(root, - maxsize, maxsize)", "False return check_bst_util(root.left, min, root.value) and check_bst_util(root.right, root.value, max) def check_bst(root): return check_bst_util(root,", "< max_value): return False return check_bst_util(root.left, min, root.value) and check_bst_util(root.right, root.value, max) def", "not (min_value < root.value < max_value): return False return check_bst_util(root.left, min, root.value) and", "root is None: return True if not (min_value < root.value < max_value): return", "(min_value < root.value < max_value): return False return check_bst_util(root.left, min, root.value) and check_bst_util(root.right,", "return check_bst_util(root.left, min, root.value) and check_bst_util(root.right, root.value, max) def check_bst(root): return check_bst_util(root, -", "max_value): return False return check_bst_util(root.left, min, root.value) and check_bst_util(root.right, root.value, max) def check_bst(root):", "True if not (min_value < root.value < max_value): return False return check_bst_util(root.left, min,", "def check_bst_util(root, min_value, max_value): if root is None: return True if not (min_value", "min_value, max_value): if root is None: return True if not (min_value < root.value", "if not (min_value < root.value < max_value): return False return check_bst_util(root.left, min, root.value)", "< root.value < max_value): return False return check_bst_util(root.left, min, root.value) and check_bst_util(root.right, root.value,", "is None: return True if not (min_value < root.value < max_value): return False", "check_bst_util(root, min_value, max_value): if root is None: return True if not (min_value <" ]
[ "for js in js_list: self._page_js_list.append(js) self._page_updated = True self._page_lock.off() return self def head(self,", "head(self, html): self._page_lock.on() self._page_head = html self._page_updated = True self._page_lock.off() return self def", "key_low = key.lower() own_attrs[key_low] = '%s %s' % (val, own_attrs[key_low]) if key_low in", "return self def __rshift__(self, elems): if elems: if isinstance(elems, tuple) or isinstance(elems, list):", "'/': self.url = url else: self.url = '/%s/%s' % (mod_name, url) if not", "view = self._page_view[name] return {'id' : view['id'], 'name' : name, 'url' : '%s/%s'", "= file_path @property def name(self): return self.file_path def read(self): return self.data def close(self):", "self._event_id = createVid() self._event_attr = {'class' : self._event_id, 'page_url' : self._view_url, 'page_view' :", "get(req, *argv, **kargs): return func(req, *argv, **kargs) if 'c' in crud or '*'", "self['elems'].append(elem) return self def __rshift__(self, elems): if elems: if isinstance(elems, tuple) or isinstance(elems,", "= '/%s' % mod_name elif url[0] == '/': self.url = url else: self.url", "self._page_rendered.encode('utf-8') self._page_updated = False self._page_lock.off() return self._page_rendered else: return self._page_rendered def meta(self, *meta_list):", "'%s%s' % (mod_path, static) else: self.static = '%s/%s' % (mod_path, static) self._static_cache =", "= favicon self._page_meta_list = [] self._page_css_list = [] self._page_js_list = [] self._page_head =", "= {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id) def data(self): return", "def delete(req, *argv, **kargs): return func(req, *argv, **kargs) self._page_lock.on() self._page_init = url self._page_updated", "Elements (a.k.a : children) #=========================================================================== def html(self, *elems): for elem in elems: self['elems'].append(elem)", "& links) #=========================================================================== def attr(self, **attrs): own_attrs = self['attrs'] for key, val in", "event & links) #=========================================================================== def attr(self, **attrs): own_attrs = self['attrs'] for key, val", "in css_list: self._page_css_list.append(css) self._page_updated = True self._page_lock.off() return self def js(self, *js_list): self._page_lock.on()", "meta.items(): meta_str += '%s=\"%s\"' % (key, val) self._page_meta_list.append(meta_str) self._page_updated = True self._page_lock.off() return", "return self def footer(self, html): self._page_lock.on() self._page_footer = html self._page_updated = True self._page_lock.off()", "== '/': self.static = '%s%s' % (mod_path, static) else: self.static = '%s/%s' %", ": self._view_id} def event(self): return self._event_attr def get(self, name, *argv): class Get(Page.InteractiveTag): def", "self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id) def data(self): return self._data_attr return Put(self._page_view[name],", "put(self, name, *argv): class Put(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id", "if not os.path.exists(file_path): raise Exception('could not find %s' % file_path) cache = CacheDescriptor(file_path)", "= cache @export('GET', self.url) def send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s'", "__lshift__(self, opts): if opts: return self.attr(**opts) return self def baseattr(self, **attrs): own_attrs =", "if self._page_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could not find %s'", "**attrs): own_attrs = self['attrs'] for key, val in attrs.items(): key_low = key.lower() own_attrs[key_low]", "if 'c' in crud or '*' in crud: @rest('POST', url, **opts) def post(req,", "view['url'] self._event_id = createVid() self._event_attr = {'class' : self._event_id, 'page_url' : self._view_url, 'page_view'", "else: if not os.path.exists(file_path): raise Exception('could not find %s' % path) return open(file_path,", "'%s/%s' % (self.url if self.url != '/' else '', name) self._page_view[name] = {'id'", "url, **opts) def delete(req, *argv, **kargs): return func(req, *argv, **kargs) self._page_lock.on() self._page_init =", "def html(self, *elems): for elem in elems: self['elems'].append(elem) return self def __rshift__(self, elems):", "% self._event_id) def data(self): return self._data_attr return Post(self._page_view[name], *argv) def put(self, name, *argv):", "def meta(self, *meta_list): self._page_lock.on() for meta in meta_list: meta_str = ' ' for", "self.url, content_type=ContentType.TextHtml) def send_template(req): return self.__render__() @export('GET', self.static_url) def send_static(req, *argv): path =", "= [] self._page_head = '' self._page_header = '' self._page_footer = '' self._page_cache =", "self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id) def data(self): return self._data_attr return Post(self._page_view[name],", "*argv): return self.patch(name, *argv) def reload(self, *names): reload = [] for name in", "else view['url'] self._event_id = createVid() self._event_attr = {'class' : self._event_id, 'page_url' : self._view_url,", "= pmd() mod_name = mod_name.replace('.', '/') if not url: self.url = '/%s' %", "self._page_footer = html self._page_updated = True self._page_lock.off() return self #=========================================================================== # View Definition", "Cache._CACHE_DATA[file_path] else: class CacheDescriptor(types.FileType): def __init__(self, file_path): with open(file_path, 'rb') as fd: self.data", "+ '-data' self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id)", "fd.read() self.file_path = file_path @property def name(self): return self.file_path def read(self): return self.data", "__init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id) return Get(self._page_view[name], *argv) def", "self.html(*elems) else: return self.html(*(elems,)) return self class Cache: _CACHE_DATA = {} @classmethod def", "= '%s%s' % (mod_path, static) else: self.static = '%s/%s' % (mod_path, static) self._static_cache", "unicode(elem) ret += '</%s>' % self['tag'] return ret #=========================================================================== # Attributes (a.k.a :", "= '%s/%s' % (mod_path, static) if not static: self.static_url = '%s/static' % self.url", "if not static: self.static = mod_path elif static[0] == '/': self.static = '%s%s'", "self._view_id = view['id'] self._view_url = '%s/%s' % (view['url'] + '/'.join(argv)) if argv else", "self._page_rendered = self._page_template.render({ 'init' : self._page_init, 'title' : self._page_title, 'favicon' : self._page_favicon, 'meta_list'", "self._page_view[name] = {'id' : id, 'name' : name, 'url' : url} if 'r'", "@export('GET', '/page/empty', content_type=ContentType.AppJson) def empty_page(req): return {'error' : 'Page Empty'} @export('GET', '/favicon.ico', content_type=ContentType.AppStream)", "val in meta.items(): meta_str += '%s=\"%s\"' % (key, val) self._page_meta_list.append(meta_str) self._page_updated = True", ": view['id'], 'name' : name, 'url' : '%s/%s' % (view['url'], '/'.join(path)) if path", "def getCache(cls, file_path): if file_path in Cache._CACHE_DATA: return Cache._CACHE_DATA[file_path] else: class CacheDescriptor(types.FileType): def", "name in names: reload.append(self._page_view[name]['id']) return {'reload' : reload} def __getitem__(self, names): if isinstance(names,", "'-data' self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id) def", "getCache(cls, file_path): if file_path in Cache._CACHE_DATA: return Cache._CACHE_DATA[file_path] else: class CacheDescriptor(types.FileType): def __init__(self,", "init(self, method='r', **opts): def wrapper(func): crud = method.lower() id = createVid() name =", "*path): view = self._page_view[name] return {'id' : view['id'], 'name' : name, 'url' :", "= html self._page_updated = True self._page_lock.off() return self #=========================================================================== # View Definition #===========================================================================", "= '/'.join(argv) file_path = '%s/%s' % (self.static, path) if self._static_cache: return Cache.getCache(file_path) else:", "= view['id'] self._view_url = '%s/%s' % (view['url'] + '/'.join(argv)) if argv else view['url']", "# Elements (a.k.a : children) #=========================================================================== def html(self, *elems): for elem in elems:", "Delete(self._page_view[name], *argv) #=============================================================================== # Page Statics #=============================================================================== Page(url='/page', cache=True) @export('GET', '/page/empty', content_type=ContentType.AppJson) def", "Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could not find %s' % path) return", "attrs={}) for key, val in attrs.items(): self['attrs'][key.lower()] = val def __len__(self, *args, **kwargs):", ": self._page_meta_list, 'css_list' : self._page_css_list, 'js_list' : self._page_js_list, 'head' : unicode(self._page_head), 'header' :", "crud: @rest('POST', url, **opts) def post(req, *argv, **kargs): return func(req, *argv, **kargs) if", "self.attr(**opts) return self def baseattr(self, **attrs): own_attrs = self['attrs'] for key, val in", "return Delete(self._page_view[name], *argv) #=============================================================================== # Page Statics #=============================================================================== Page(url='/page', cache=True) @export('GET', '/page/empty', content_type=ContentType.AppJson)", "self._page_favicon, 'meta_list' : self._page_meta_list, 'css_list' : self._page_css_list, 'js_list' : self._page_js_list, 'head' : unicode(self._page_head),", "v in self['attrs'].items(): ret += ' %s=\"%s\"' % (k, v) ret += '>'", "= [] self._page_js_list = [] self._page_head = '' self._page_header = '' self._page_footer =", ": self._page_favicon, 'meta_list' : self._page_meta_list, 'css_list' : self._page_css_list, 'js_list' : self._page_js_list, 'head' :", "or isinstance(elems, list): return self.html(*elems) else: return self.html(*(elems,)) return self class Cache: _CACHE_DATA", "'rb') class Page: def __init__(self, url=None, title='', favicon='/page/static/image/favicon.ico', static='static', cache=True): mod_path, mod_name =", "*argv, **kargs): return func(req, *argv, **kargs) if 'd' in crud or '*' in", "for name in names: reload.append(self._page_view[name]['id']) return {'reload' : reload} def __getitem__(self, names): if", "else: self.static_url = '/%s' % static self._page_init = '/page/empty' self._page_view = {} self._page_title", "'-data' self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id) def", "[] for name in names: reload.append(self._page_view[name]['id']) return {'reload' : reload} def __getitem__(self, names):", "own_attrs[key_low] = '%s %s' % (val, own_attrs[key_low]) if key_low in own_attrs else val", "crud: @rest('GET', url, **opts) def get(req, *argv, **kargs): return func(req, *argv, **kargs) if", "func(req, *argv, **kargs) self._page_lock.on() self._page_init = url self._page_updated = True self._page_lock.off() return wrapper", "return self['elems'].__len__() def __str__(self): ret = '<%s' % self['tag'] for k, v in", "send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s' % (self.static, path) if self._static_cache:", "open(file_path, 'rb') as fd: self.data = fd.read() self.file_path = file_path @property def name(self):", "path = '/'.join(argv) file_path = '%s/%s' % (self.static_path, path) if self._page_cache: return Cache.getCache(file_path)", "'%s=\"%s\"' % (key, val) self._page_meta_list.append(meta_str) self._page_updated = True self._page_lock.off() return self def css(self,", "__init__(self, file_path): with open(file_path, 'rb') as fd: self.data = fd.read() self.file_path = file_path", "def __render__(self): if self._page_updated: self._page_lock.on() self._page_rendered = self._page_template.render({ 'init' : self._page_init, 'title' :", "self['attrs'][key.lower()] = val def __len__(self, *args, **kwargs): return self['elems'].__len__() def __str__(self): ret =", "!= '/': self.static_url = '%s/%s' % (self.url, static) else: self.static_url = '/%s' %", "return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could not find %s' % path)", "list): return self.reload(*names) else: return self.reload(*(names,)) #=========================================================================== # Interactive Functions #=========================================================================== class InteractiveTag(Tag):", "= '%s/%s' % (view['url'], '/'.join(argv)) if argv else view['url'] return Tag('script', Id=id, Page_Url=url).html(", "= '' self._page_footer = '' self._page_cache = cache self._page_cache_data = {} self._page_updated =", "crud: @rest('DELETE', url, **opts) def delete(req, *argv, **kargs): return func(req, *argv, **kargs) self._page_lock.on()", "% mod_name elif url[0] == '/': self.url = url else: self.url = '/%s/%s'", "return self def js(self, *js_list): self._page_lock.on() for js in js_list: self._page_js_list.append(js) self._page_updated =", "'/template.html') as fd: self._page_template = jinja2.Template(fd.read()) @export('GET', self.url, content_type=ContentType.TextHtml) def send_template(req): return self.__render__()", "else val return self #=========================================================================== # Elements (a.k.a : children) #=========================================================================== def html(self,", "class Get(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id) return", "def read(self): return self.data def close(self): return None if not os.path.exists(file_path): raise Exception('could", "val return self #=========================================================================== # Elements (a.k.a : children) #=========================================================================== def html(self, *elems):", "unicode(self._page_head), 'header' : unicode(self._page_header), 'footer' : unicode(self._page_footer) }) self._page_rendered = self._page_rendered.encode('utf-8') self._page_updated =", "for css in css_list: self._page_css_list.append(css) self._page_updated = True self._page_lock.off() return self def js(self,", "id, 'name' : name, 'url' : url} if 'r' in crud or '*'", "id ) def __call__(self, name, *argv): return self.patch(name, *argv) def reload(self, *names): reload", "**kargs) if 'u' in crud or '*' in crud: @rest('PUT', url, **opts) def", "jinja2 from pygics import Lock, ContentType, export, rest def createVid(): return 'v-' +", ": self._event_id, 'page_url' : self._view_url, 'page_view' : self._view_id} def event(self): return self._event_attr def", ": self._page_title, 'favicon' : self._page_favicon, 'meta_list' : self._page_meta_list, 'css_list' : self._page_css_list, 'js_list' :", "% (view['url'], '/'.join(path)) if path else view['url']} #=========================================================================== # View Functions #=========================================================================== def", "'rb') as fd: self.data = fd.read() self.file_path = file_path @property def name(self): return", "footer(self, html): self._page_lock.on() self._page_footer = html self._page_updated = True self._page_lock.off() return self #===========================================================================", "'page_url' : self._view_url, 'page_view' : self._view_id} def event(self): return self._event_attr def get(self, name,", "class Put(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id = self._event_id +", "return Get(self._page_view[name], *argv) def post(self, name, *argv): class Post(Page.InteractiveTag): def __init__(self, view, *argv):", "= '/%s/%s' % (mod_name, url) if static[0] == '/': static = static[1:] if", "None with open(pwd() + '/template.html') as fd: self._page_template = jinja2.Template(fd.read()) @export('GET', self.url, content_type=ContentType.TextHtml)", "% self._event_id) def data(self): return self._data_attr return Put(self._page_view[name], *argv) def delete(self, name, *argv):", "static) else: self.static = '%s/%s' % (mod_path, static) self._static_cache = cache @export('GET', self.url)", "(mod_name, url) if static[0] == '/': static = static[1:] if not static: self.static_path", "__render__(self): if self._page_updated: self._page_lock.on() self._page_rendered = self._page_template.render({ 'init' : self._page_init, 'title' : self._page_title,", "def baseattr(self, **attrs): own_attrs = self['attrs'] for key, val in attrs.items(): key_low =", "name, 'url' : url} if 'r' in crud or '*' in crud: @rest('GET',", "def __lshift__(self, opts): if opts: return self.attr(**opts) return self def baseattr(self, **attrs): own_attrs", "@rest('PUT', url, **opts) def put(req, *argv, **kargs): return func(req, *argv, **kargs) if 'd'", "@export('GET', self.url) def send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s' % (self.static,", "''' Created on 2017. 10. 25. @author: HyechurnJang ''' import os import uuid", "self def __lshift__(self, opts): if opts: return self.attr(**opts) return self def baseattr(self, **attrs):", "*argv): path = '/'.join(argv) file_path = '%s/%s' % (self.static, path) if self._static_cache: return", "'script') self._view_id = view['id'] self._view_url = '%s/%s' % (view['url'] + '/'.join(argv)) if argv", "*argv) def put(self, name, *argv): class Put(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view,", "**opts): def wrapper(func): crud = method.lower() id = createVid() name = func.__name__ url", "return self.attr(**opts) return self def baseattr(self, **attrs): own_attrs = self['attrs'] for key, val", "= self._page_view[name] id = view['id'] url = '%s/%s' % (view['url'], '/'.join(argv)) if argv", ": self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id) def data(self): return self._data_attr return", "if self._page_updated: self._page_lock.on() self._page_rendered = self._page_template.render({ 'init' : self._page_init, 'title' : self._page_title, 'favicon'", "uuid import types import jinja2 from pygics import Lock, ContentType, export, rest def", "if self._static_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could not find %s'", "title='', favicon='/page/static/image/favicon.ico', static='static', cache=True): mod_path, mod_name = pmd() mod_name = mod_name.replace('.', '/') if", "file_path): with open(file_path, 'rb') as fd: self.data = fd.read() self.file_path = file_path @property", "attrs.items(): key_low = key.lower() own_attrs[key_low] = '%s %s' % (own_attrs[key_low], val) if key_low", "self._page_updated = True self._page_lock.off() return wrapper def view(self, method='r', **opts): def wrapper(func): crud", "self._event_id) return Get(self._page_view[name], *argv) def post(self, name, *argv): class Post(Page.InteractiveTag): def __init__(self, view,", "raise Exception('could not find %s' % file_path) cache = CacheDescriptor(file_path) Cache._CACHE_DATA[file_path] = cache", "= self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id) def data(self): return self._data_attr return Post(self._page_view[name], *argv) def", "if 'd' in crud or '*' in crud: @rest('DELETE', url, **opts) def delete(req,", "self.url != '/': self.static_url = '%s/%s' % (self.url, static) else: self.static_url = '/%s'", "path) if self._static_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could not find", "= '%s/%s' % (self.url, static) else: self.static_url = '/%s' % static self._page_init =", "def view(self, method='r', **opts): def wrapper(func): crud = method.lower() id = createVid() name", "(self.static, path) if self._static_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could not", "(view['url'], '/'.join(argv)) if argv else view['url'] return Tag('script', Id=id, Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});' % id", "+ str(uuid.uuid4()) class Tag(dict): def __init__(self, tag, **attrs): dict.__init__(self, tag=tag, elems=[], attrs={}) for", "def data(self): return self._data_attr return Put(self._page_view[name], *argv) def delete(self, name, *argv): class Delete(Page.InteractiveTag):", "import Lock, ContentType, export, rest def createVid(): return 'v-' + str(uuid.uuid4()) class Tag(dict):", "own_attrs[key_low] = '%s %s' % (own_attrs[key_low], val) if key_low in own_attrs else val", "def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id = self._event_id + '-data' self._data_attr", "-*- ''' Created on 2017. 10. 25. @author: HyechurnJang ''' import os import", "os import uuid import types import jinja2 from pygics import Lock, ContentType, export,", "in crud: @rest('DELETE', url, **opts) def delete(req, *argv, **kargs): return func(req, *argv, **kargs)", "url, static='static', cache=True): mod_path, mod_name = pmd() mod_name = mod_name.replace('.', '/') if not", "createVid(): return 'v-' + str(uuid.uuid4()) class Tag(dict): def __init__(self, tag, **attrs): dict.__init__(self, tag=tag,", "= {} @classmethod def getCache(cls, file_path): if file_path in Cache._CACHE_DATA: return Cache._CACHE_DATA[file_path] else:", "self.reload(*names) else: return self.reload(*(names,)) #=========================================================================== # Interactive Functions #=========================================================================== class InteractiveTag(Tag): def __init__(self,", "if not static: self.static_path = mod_path else: self.static_path = '%s/%s' % (mod_path, static)", "key_low = key.lower() own_attrs[key_low] = '%s %s' % (own_attrs[key_low], val) if key_low in", "*argv): class Delete(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id)", "' ' for key, val in meta.items(): meta_str += '%s=\"%s\"' % (key, val)", "def put(self, name, *argv): class Put(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv)", "= True self._page_lock.off() return self def css(self, *css_list): self._page_lock.on() for css in css_list:", "*argv) def post(self, name, *argv): class Post(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view,", "# View Definition #=========================================================================== def init(self, method='r', **opts): def wrapper(func): crud = method.lower()", "meta_list: meta_str = ' ' for key, val in meta.items(): meta_str += '%s=\"%s\"'", "= '%s/%s' % (view['url'] + '/'.join(argv)) if argv else view['url'] self._event_id = createVid()", "return self class Cache: _CACHE_DATA = {} @classmethod def getCache(cls, file_path): if file_path", "find %s' % path) return open(file_path, 'rb') class Page: def __init__(self, url=None, title='',", "title self._page_favicon = favicon self._page_meta_list = [] self._page_css_list = [] self._page_js_list = []", "static self._page_init = '/page/empty' self._page_view = {} self._page_title = title self._page_favicon = favicon", "'/': self.static_url = '%s/%s' % (self.url, static) else: self.static_url = '/%s' % static", "**kargs): return func(req, *argv, **kargs) if 'u' in crud or '*' in crud:", "self._page_lock.off() return wrapper def view(self, method='r', **opts): def wrapper(func): crud = method.lower() id", "self._page_head = '' self._page_header = '' self._page_footer = '' self._page_cache = cache self._page_cache_data", "else: self.static = '%s/%s' % (mod_path, static) self._static_cache = cache @export('GET', self.url) def", "find %s' % path) return open(file_path, 'rb') def __render__(self): if self._page_updated: self._page_lock.on() self._page_rendered", "#=========================================================================== # Interactive Functions #=========================================================================== class InteractiveTag(Tag): def __init__(self, view, *argv): Tag.__init__(self, 'script')", "+= ' %s=\"%s\"' % (k, v) ret += '>' for elem in self['elems']:", "self.file_path = file_path @property def name(self): return self.file_path def read(self): return self.data def", "Cache._CACHE_DATA[file_path] = cache return cache class Static: def __init__(self, url, static='static', cache=True): mod_path,", "if not os.path.exists(file_path): raise Exception('could not find %s' % path) return open(file_path, 'rb')", "self._page_lock.off() return self def js(self, *js_list): self._page_lock.on() for js in js_list: self._page_js_list.append(js) self._page_updated", "%s' % (own_attrs[key_low], val) if key_low in own_attrs else val return self def", "static[0] == '/': self.static = '%s%s' % (mod_path, static) else: self.static = '%s/%s'", "crud or '*' in crud: @rest('PUT', url, **opts) def put(req, *argv, **kargs): return", "__getitem__(self, names): if isinstance(names, tuple) or isinstance(names, list): return self.reload(*names) else: return self.reload(*(names,))", "createVid() self._event_attr = {'class' : self._event_id, 'page_url' : self._view_url, 'page_view' : self._view_id} def", "self._page_js_list = [] self._page_head = '' self._page_header = '' self._page_footer = '' self._page_cache", "self._page_cache_data = {} self._page_updated = True self._page_lock = Lock() self._page_rendered = None with", "str(uuid.uuid4()) class Tag(dict): def __init__(self, tag, **attrs): dict.__init__(self, tag=tag, elems=[], attrs={}) for key,", "@rest('POST', url, **opts) def post(req, *argv, **kargs): return func(req, *argv, **kargs) if 'u'", "def __init__(self, url, static='static', cache=True): mod_path, mod_name = pmd() mod_name = mod_name.replace('.', '/')", "val def __len__(self, *args, **kwargs): return self['elems'].__len__() def __str__(self): ret = '<%s' %", "if file_path in Cache._CACHE_DATA: return Cache._CACHE_DATA[file_path] else: class CacheDescriptor(types.FileType): def __init__(self, file_path): with", "= html self._page_updated = True self._page_lock.off() return self def header(self, html): self._page_lock.on() self._page_header", "view, *argv): Tag.__init__(self, 'script') self._view_id = view['id'] self._view_url = '%s/%s' % (view['url'] +", "'%s/%s' % (self.static_path, path) if self._page_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise", "= self._page_rendered.encode('utf-8') self._page_updated = False self._page_lock.off() return self._page_rendered else: return self._page_rendered def meta(self,", "def __len__(self, *args, **kwargs): return self['elems'].__len__() def __str__(self): ret = '<%s' % self['tag']", "self._page_lock.on() self._page_rendered = self._page_template.render({ 'init' : self._page_init, 'title' : self._page_title, 'favicon' : self._page_favicon,", "Lock() self._page_rendered = None with open(pwd() + '/template.html') as fd: self._page_template = jinja2.Template(fd.read())", "self._data_attr return Post(self._page_view[name], *argv) def put(self, name, *argv): class Put(Page.InteractiveTag): def __init__(self, view,", "}) self._page_rendered = self._page_rendered.encode('utf-8') self._page_updated = False self._page_lock.off() return self._page_rendered else: return self._page_rendered", "'favicon' : self._page_favicon, 'meta_list' : self._page_meta_list, 'css_list' : self._page_css_list, 'js_list' : self._page_js_list, 'head'", "reload} def __getitem__(self, names): if isinstance(names, tuple) or isinstance(names, list): return self.reload(*names) else:", "self._page_lock.off() return self #=========================================================================== # View Definition #=========================================================================== def init(self, method='r', **opts): def", "(self.url if self.url != '/' else '', func.__name__) self._page_view[name] = {'id' : id,", "'/%s/%s' % (mod_name, url) if static[0] == '/': static = static[1:] if not", "func(req, *argv, **kargs) if 'u' in crud or '*' in crud: @rest('PUT', url,", "data(self): return self._data_attr return Post(self._page_view[name], *argv) def put(self, name, *argv): class Put(Page.InteractiveTag): def", "mod_path else: self.static_path = '%s/%s' % (mod_path, static) if not static: self.static_url =", "in self['elems']: ret += unicode(elem) ret += '</%s>' % self['tag'] return ret #===========================================================================", "self._page_init = '/page/empty' self._page_view = {} self._page_title = title self._page_favicon = favicon self._page_meta_list", "return self.reload(*names) else: return self.reload(*(names,)) #=========================================================================== # Interactive Functions #=========================================================================== class InteractiveTag(Tag): def", "name, *argv): class Put(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id =", "opts): if opts: return self.attr(**opts) return self def baseattr(self, **attrs): own_attrs = self['attrs']", "isinstance(names, tuple) or isinstance(names, list): return self.reload(*names) else: return self.reload(*(names,)) #=========================================================================== # Interactive", "'/' else '', func.__name__) self._page_view[name] = {'id' : id, 'name' : name, 'url'", "*argv): view = self._page_view[name] id = view['id'] url = '%s/%s' % (view['url'], '/'.join(argv))", "in meta.items(): meta_str += '%s=\"%s\"' % (key, val) self._page_meta_list.append(meta_str) self._page_updated = True self._page_lock.off()", "Tag('script', Id=id, Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});' % id ) def __call__(self, name, *argv): return self.patch(name,", "url} if 'r' in crud or '*' in crud: @rest('GET', url, **opts) def", "= '%s/static' % self.url elif self.url != '/': self.static_url = '%s/%s' % (self.url,", "**kargs): return func(req, *argv, **kargs) if 'd' in crud or '*' in crud:", "'/page/empty' self._page_view = {} self._page_title = title self._page_favicon = favicon self._page_meta_list = []", "url self._page_updated = True self._page_lock.off() return wrapper def view(self, method='r', **opts): def wrapper(func):", "(k, v) ret += '>' for elem in self['elems']: ret += unicode(elem) ret", "reload = [] for name in names: reload.append(self._page_view[name]['id']) return {'reload' : reload} def", "Attributes (a.k.a : event & links) #=========================================================================== def attr(self, **attrs): own_attrs = self['attrs']", "*argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id) return Get(self._page_view[name], *argv) def post(self, name, *argv): class Post(Page.InteractiveTag):", "self._page_view = {} self._page_title = title self._page_favicon = favicon self._page_meta_list = [] self._page_css_list", "= self._page_view[name] return {'id' : view['id'], 'name' : name, 'url' : '%s/%s' %", "self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id) def data(self): return self._data_attr return Put(self._page_view[name], *argv) def delete(self, name,", "{} self._page_updated = True self._page_lock = Lock() self._page_rendered = None with open(pwd() +", "close(self): return None if not os.path.exists(file_path): raise Exception('could not find %s' % file_path)", "return func(req, *argv, **kargs) if 'c' in crud or '*' in crud: @rest('POST',", "Tag(dict): def __init__(self, tag, **attrs): dict.__init__(self, tag=tag, elems=[], attrs={}) for key, val in", ": self._page_init, 'title' : self._page_title, 'favicon' : self._page_favicon, 'meta_list' : self._page_meta_list, 'css_list' :", "post(req, *argv, **kargs): return func(req, *argv, **kargs) if 'u' in crud or '*'", "__init__(self, url=None, title='', favicon='/page/static/image/favicon.ico', static='static', cache=True): mod_path, mod_name = pmd() mod_name = mod_name.replace('.',", "self._event_id) def data(self): return self._data_attr return Put(self._page_view[name], *argv) def delete(self, name, *argv): class", "view['id'] self._view_url = '%s/%s' % (view['url'] + '/'.join(argv)) if argv else view['url'] self._event_id", "'%s/static' % self.url elif self.url != '/': self.static_url = '%s/%s' % (self.url, static)", "in crud or '*' in crud: @rest('POST', url, **opts) def post(req, *argv, **kargs):", "self._event_id) return Delete(self._page_view[name], *argv) #=============================================================================== # Page Statics #=============================================================================== Page(url='/page', cache=True) @export('GET', '/page/empty',", "% (key, val) self._page_meta_list.append(meta_str) self._page_updated = True self._page_lock.off() return self def css(self, *css_list):", "% path) return open(file_path, 'rb') class Page: def __init__(self, url=None, title='', favicon='/page/static/image/favicon.ico', static='static',", "True self._page_lock.off() return self #=========================================================================== # View Definition #=========================================================================== def init(self, method='r', **opts):", "wrapper(func): crud = method.lower() id = createVid() name = func.__name__ url = '%s/%s'", "= jinja2.Template(fd.read()) @export('GET', self.url, content_type=ContentType.TextHtml) def send_template(req): return self.__render__() @export('GET', self.static_url) def send_static(req,", "**kargs): return func(req, *argv, **kargs) self._page_lock.on() self._page_init = url self._page_updated = True self._page_lock.off()", "return self def baseattr(self, **attrs): own_attrs = self['attrs'] for key, val in attrs.items():", "val in attrs.items(): key_low = key.lower() own_attrs[key_low] = '%s %s' % (val, own_attrs[key_low])", "self._page_header = html self._page_updated = True self._page_lock.off() return self def footer(self, html): self._page_lock.on()", "open(pwd() + '/template.html') as fd: self._page_template = jinja2.Template(fd.read()) @export('GET', self.url, content_type=ContentType.TextHtml) def send_template(req):", "or '*' in crud: @rest('DELETE', url, **opts) def delete(req, *argv, **kargs): return func(req,", "def close(self): return None if not os.path.exists(file_path): raise Exception('could not find %s' %", "key_low in own_attrs else val return self #=========================================================================== # Elements (a.k.a : children)", "= '%s/%s' % (self.static, path) if self._static_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path):", "self._static_cache = cache @export('GET', self.url) def send_static(req, *argv): path = '/'.join(argv) file_path =", "' %s=\"%s\"' % (k, v) ret += '>' for elem in self['elems']: ret", "if not static: self.static_url = '%s/static' % self.url elif self.url != '/': self.static_url", "'/' else '', name) self._page_view[name] = {'id' : id, 'name' : name, 'url'", "cache = CacheDescriptor(file_path) Cache._CACHE_DATA[file_path] = cache return cache class Static: def __init__(self, url,", "self def head(self, html): self._page_lock.on() self._page_head = html self._page_updated = True self._page_lock.off() return", "self._page_footer = '' self._page_cache = cache self._page_cache_data = {} self._page_updated = True self._page_lock", "% self._event_id) return Delete(self._page_view[name], *argv) #=============================================================================== # Page Statics #=============================================================================== Page(url='/page', cache=True) @export('GET',", "*argv) #=============================================================================== # Page Statics #=============================================================================== Page(url='/page', cache=True) @export('GET', '/page/empty', content_type=ContentType.AppJson) def empty_page(req):", "path) return open(file_path, 'rb') def __render__(self): if self._page_updated: self._page_lock.on() self._page_rendered = self._page_template.render({ 'init'", "elem in elems: self['elems'].append(elem) return self def __rshift__(self, elems): if elems: if isinstance(elems,", "static[1:] if not static: self.static_path = mod_path else: self.static_path = '%s/%s' % (mod_path,", "return self def header(self, html): self._page_lock.on() self._page_header = html self._page_updated = True self._page_lock.off()", "% (view['url'], '/'.join(argv)) if argv else view['url'] return Tag('script', Id=id, Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});' %", "getView(self, name, *path): view = self._page_view[name] return {'id' : view['id'], 'name' : name,", "*argv) def reload(self, *names): reload = [] for name in names: reload.append(self._page_view[name]['id']) return", ": self._view_url, 'page_view' : self._view_id} def event(self): return self._event_attr def get(self, name, *argv):", "self._page_lock.on() self._page_init = url self._page_updated = True self._page_lock.off() return wrapper def view(self, method='r',", "= None with open(pwd() + '/template.html') as fd: self._page_template = jinja2.Template(fd.read()) @export('GET', self.url,", "''' import os import uuid import types import jinja2 from pygics import Lock,", "ret += ' %s=\"%s\"' % (k, v) ret += '>' for elem in", "*argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id) return Delete(self._page_view[name], *argv) #=============================================================================== # Page", "__len__(self, *args, **kwargs): return self['elems'].__len__() def __str__(self): ret = '<%s' % self['tag'] for", "html): self._page_lock.on() self._page_head = html self._page_updated = True self._page_lock.off() return self def header(self,", "if isinstance(names, tuple) or isinstance(names, list): return self.reload(*names) else: return self.reload(*(names,)) #=========================================================================== #", "static) else: self.static_url = '/%s' % static self._page_init = '/page/empty' self._page_view = {}", "self._event_id + '-data' self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' %", "in crud: @rest('GET', url, **opts) def get(req, *argv, **kargs): return func(req, *argv, **kargs)", "self.url != '/' else '', func.__name__) self._page_view[name] = {'id' : id, 'name' :", "*argv, **kargs) if 'd' in crud or '*' in crud: @rest('DELETE', url, **opts)", "not find %s' % path) return open(file_path, 'rb') def __render__(self): if self._page_updated: self._page_lock.on()", "self['attrs'].items(): ret += ' %s=\"%s\"' % (k, v) ret += '>' for elem", "self.static_url = '%s/%s' % (self.url, static) else: self.static_url = '/%s' % static self._page_init", "def post(self, name, *argv): class Post(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv)", "Get(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id) return Get(self._page_view[name],", "return {'id' : view['id'], 'name' : name, 'url' : '%s/%s' % (view['url'], '/'.join(path))", "url, **opts) def get(req, *argv, **kargs): return func(req, *argv, **kargs) if 'c' in", "return Post(self._page_view[name], *argv) def put(self, name, *argv): class Put(Page.InteractiveTag): def __init__(self, view, *argv):", "meta_str = ' ' for key, val in meta.items(): meta_str += '%s=\"%s\"' %", "return cache class Static: def __init__(self, url, static='static', cache=True): mod_path, mod_name = pmd()", "self.reload(*(names,)) #=========================================================================== # Interactive Functions #=========================================================================== class InteractiveTag(Tag): def __init__(self, view, *argv): Tag.__init__(self,", "# Interactive Functions #=========================================================================== class InteractiveTag(Tag): def __init__(self, view, *argv): Tag.__init__(self, 'script') self._view_id", "%s' % file_path) cache = CacheDescriptor(file_path) Cache._CACHE_DATA[file_path] = cache return cache class Static:", "in crud or '*' in crud: @rest('GET', url, **opts) def get(req, *argv, **kargs):", "def __str__(self): ret = '<%s' % self['tag'] for k, v in self['attrs'].items(): ret", "self._page_updated = True self._page_lock.off() return self def header(self, html): self._page_lock.on() self._page_header = html", "'>' for elem in self['elems']: ret += unicode(elem) ret += '</%s>' % self['tag']", "ret += '</%s>' % self['tag'] return ret #=========================================================================== # Attributes (a.k.a : event", "% (self.url if self.url != '/' else '', name) self._page_view[name] = {'id' :", "url, **opts) def post(req, *argv, **kargs): return func(req, *argv, **kargs) if 'u' in", "#=========================================================================== # View Definition #=========================================================================== def init(self, method='r', **opts): def wrapper(func): crud =", "for elem in self['elems']: ret += unicode(elem) ret += '</%s>' % self['tag'] return", ": children) #=========================================================================== def html(self, *elems): for elem in elems: self['elems'].append(elem) return self", "not os.path.exists(file_path): raise Exception('could not find %s' % path) return open(file_path, 'rb') class", "v) ret += '>' for elem in self['elems']: ret += unicode(elem) ret +=", "self.url = '/%s/%s' % (mod_name, url) if static[0] == '/': static = static[1:]", "= '%s %s' % (own_attrs[key_low], val) if key_low in own_attrs else val return", "Cache: _CACHE_DATA = {} @classmethod def getCache(cls, file_path): if file_path in Cache._CACHE_DATA: return", "# View Functions #=========================================================================== def patch(self, name, *argv): view = self._page_view[name] id =", "View Functions #=========================================================================== def patch(self, name, *argv): view = self._page_view[name] id = view['id']", "25. @author: HyechurnJang ''' import os import uuid import types import jinja2 from", "self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id) return Get(self._page_view[name], *argv) def post(self, name, *argv): class Post(Page.InteractiveTag): def", "def empty_page(req): return {'error' : 'Page Empty'} @export('GET', '/favicon.ico', content_type=ContentType.AppStream) def default_favicon(req, *argv):", "val) if key_low in own_attrs else val return self def __lshift__(self, opts): if", "'%s/%s' % (view['url'], '/'.join(argv)) if argv else view['url'] return Tag('script', Id=id, Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});'", "unicode(self._page_header), 'footer' : unicode(self._page_footer) }) self._page_rendered = self._page_rendered.encode('utf-8') self._page_updated = False self._page_lock.off() return", "self.url elif self.url != '/': self.static_url = '%s/%s' % (self.url, static) else: self.static_url", "Interactive Functions #=========================================================================== class InteractiveTag(Tag): def __init__(self, view, *argv): Tag.__init__(self, 'script') self._view_id =", "__rshift__(self, elems): if elems: if isinstance(elems, tuple) or isinstance(elems, list): return self.html(*elems) else:", "**opts) def delete(req, *argv, **kargs): return func(req, *argv, **kargs) return wrapper def getView(self,", "__call__(self, name, *argv): return self.patch(name, *argv) def reload(self, *names): reload = [] for", "True self._page_lock.off() return self def head(self, html): self._page_lock.on() self._page_head = html self._page_updated =", "+= '%s=\"%s\"' % (key, val) self._page_meta_list.append(meta_str) self._page_updated = True self._page_lock.off() return self def", "view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id) return Get(self._page_view[name], *argv) def post(self,", "= [] self._page_css_list = [] self._page_js_list = [] self._page_head = '' self._page_header =", "elems=[], attrs={}) for key, val in attrs.items(): self['attrs'][key.lower()] = val def __len__(self, *args,", "mod_path elif static[0] == '/': self.static = '%s%s' % (mod_path, static) else: self.static", "self._page_css_list, 'js_list' : self._page_js_list, 'head' : unicode(self._page_head), 'header' : unicode(self._page_header), 'footer' : unicode(self._page_footer)", "self def footer(self, html): self._page_lock.on() self._page_footer = html self._page_updated = True self._page_lock.off() return", "= val def __len__(self, *args, **kwargs): return self['elems'].__len__() def __str__(self): ret = '<%s'", "self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id) def data(self): return self._data_attr return Put(self._page_view[name], *argv)", "self._page_template.render({ 'init' : self._page_init, 'title' : self._page_title, 'favicon' : self._page_favicon, 'meta_list' : self._page_meta_list,", "'<%s' % self['tag'] for k, v in self['attrs'].items(): ret += ' %s=\"%s\"' %", "self._page_init = url self._page_updated = True self._page_lock.off() return wrapper def view(self, method='r', **opts):", "= key.lower() own_attrs[key_low] = '%s %s' % (own_attrs[key_low], val) if key_low in own_attrs", "self.url = '/%s/%s' % (mod_name, url) if not static: self.static = mod_path elif", "'' self._page_header = '' self._page_footer = '' self._page_cache = cache self._page_cache_data = {}", "class Cache: _CACHE_DATA = {} @classmethod def getCache(cls, file_path): if file_path in Cache._CACHE_DATA:", "def header(self, html): self._page_lock.on() self._page_header = html self._page_updated = True self._page_lock.off() return self", "= '%s/%s' % (self.static_path, path) if self._page_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path):", "os.path.exists(file_path): raise Exception('could not find %s' % path) return open(file_path, 'rb') class Page:", "view['id'] url = '%s/%s' % (view['url'], '/'.join(argv)) if argv else view['url'] return Tag('script',", "not url: self.url = '/%s' % mod_name elif url[0] == '/': self.url =", "@author: HyechurnJang ''' import os import uuid import types import jinja2 from pygics", "val in attrs.items(): self['attrs'][key.lower()] = val def __len__(self, *args, **kwargs): return self['elems'].__len__() def", "pygics import Lock, ContentType, export, rest def createVid(): return 'v-' + str(uuid.uuid4()) class", "self.__render__() @export('GET', self.static_url) def send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s' %", "return None if not os.path.exists(file_path): raise Exception('could not find %s' % file_path) cache", "Page: def __init__(self, url=None, title='', favicon='/page/static/image/favicon.ico', static='static', cache=True): mod_path, mod_name = pmd() mod_name", "= static[1:] if not static: self.static_path = mod_path else: self.static_path = '%s/%s' %", "'%s/%s' % (self.url, static) else: self.static_url = '/%s' % static self._page_init = '/page/empty'", "(mod_path, static) self._static_cache = cache @export('GET', self.url) def send_static(req, *argv): path = '/'.join(argv)", "return self def head(self, html): self._page_lock.on() self._page_head = html self._page_updated = True self._page_lock.off()", "opts: return self.attr(**opts) return self def baseattr(self, **attrs): own_attrs = self['attrs'] for key,", "for key, val in meta.items(): meta_str += '%s=\"%s\"' % (key, val) self._page_meta_list.append(meta_str) self._page_updated", "url else: self.url = '/%s/%s' % (mod_name, url) if static[0] == '/': static", "self._page_lock = Lock() self._page_rendered = None with open(pwd() + '/template.html') as fd: self._page_template", "self._view_id} def event(self): return self._event_attr def get(self, name, *argv): class Get(Page.InteractiveTag): def __init__(self,", "self._event_attr = {'class' : self._event_id, 'page_url' : self._view_url, 'page_view' : self._view_id} def event(self):", "css(self, *css_list): self._page_lock.on() for css in css_list: self._page_css_list.append(css) self._page_updated = True self._page_lock.off() return", "View Definition #=========================================================================== def init(self, method='r', **opts): def wrapper(func): crud = method.lower() id", "= {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id) def data(self): return", "ContentType, export, rest def createVid(): return 'v-' + str(uuid.uuid4()) class Tag(dict): def __init__(self,", "self.url != '/' else '', name) self._page_view[name] = {'id' : id, 'name' :", "import uuid import types import jinja2 from pygics import Lock, ContentType, export, rest", "% path) return open(file_path, 'rb') def __render__(self): if self._page_updated: self._page_lock.on() self._page_rendered = self._page_template.render({", "'/'.join(path)) if path else view['url']} #=========================================================================== # View Functions #=========================================================================== def patch(self, name,", "Put(self._page_view[name], *argv) def delete(self, name, *argv): class Delete(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self,", "*argv): class Get(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id)", "!= '/' else '', name) self._page_view[name] = {'id' : id, 'name' : name,", "not find %s' % path) return open(file_path, 'rb') class Page: def __init__(self, url=None,", "'rb') def __render__(self): if self._page_updated: self._page_lock.on() self._page_rendered = self._page_template.render({ 'init' : self._page_init, 'title'", "+ '-data' self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id)", "in meta_list: meta_str = ' ' for key, val in meta.items(): meta_str +=", "__init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id = self._event_id + '-data' self._data_attr =", "= '/page/empty' self._page_view = {} self._page_title = title self._page_favicon = favicon self._page_meta_list =", "if static[0] == '/': static = static[1:] if not static: self.static_path = mod_path", "list): return self.html(*elems) else: return self.html(*(elems,)) return self class Cache: _CACHE_DATA = {}", "if argv else view['url'] self._event_id = createVid() self._event_attr = {'class' : self._event_id, 'page_url'", "def send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s' % (self.static, path) if", "def __init__(self, file_path): with open(file_path, 'rb') as fd: self.data = fd.read() self.file_path =", "val return self def __lshift__(self, opts): if opts: return self.attr(**opts) return self def", "not os.path.exists(file_path): raise Exception('could not find %s' % file_path) cache = CacheDescriptor(file_path) Cache._CACHE_DATA[file_path]", "*js_list): self._page_lock.on() for js in js_list: self._page_js_list.append(js) self._page_updated = True self._page_lock.off() return self", "argv else view['url'] return Tag('script', Id=id, Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});' % id ) def __call__(self,", "def __call__(self, name, *argv): return self.patch(name, *argv) def reload(self, *names): reload = []", "url = '%s/%s' % (view['url'], '/'.join(argv)) if argv else view['url'] return Tag('script', Id=id,", "'%s/%s' % (self.url if self.url != '/' else '', func.__name__) self._page_view[name] = {'id'", "static: self.static = mod_path elif static[0] == '/': self.static = '%s%s' % (mod_path,", "'%s/%s' % (self.static, path) if self._static_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise", "= {'class' : self._event_id, 'page_url' : self._view_url, 'page_view' : self._view_id} def event(self): return", "path = '/'.join(argv) file_path = '%s/%s' % (self.static, path) if self._static_cache: return Cache.getCache(file_path)", "self.url = url else: self.url = '/%s/%s' % (mod_name, url) if not static:", ": self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id) def data(self): return self._data_attr return", "(self.url, static) else: self.static_url = '/%s' % static self._page_init = '/page/empty' self._page_view =", "raise Exception('could not find %s' % path) return open(file_path, 'rb') def __render__(self): if", "reload.append(self._page_view[name]['id']) return {'reload' : reload} def __getitem__(self, names): if isinstance(names, tuple) or isinstance(names,", "import os import uuid import types import jinja2 from pygics import Lock, ContentType,", "'%s/%s' % (mod_path, static) if not static: self.static_url = '%s/static' % self.url elif", "= '' self._page_cache = cache self._page_cache_data = {} self._page_updated = True self._page_lock =", "def footer(self, html): self._page_lock.on() self._page_footer = html self._page_updated = True self._page_lock.off() return self", "self._event_id + '-data' self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' %", "return self.html(*(elems,)) return self class Cache: _CACHE_DATA = {} @classmethod def getCache(cls, file_path):", "False self._page_lock.off() return self._page_rendered else: return self._page_rendered def meta(self, *meta_list): self._page_lock.on() for meta", "read(self): return self.data def close(self): return None if not os.path.exists(file_path): raise Exception('could not", "True self._page_lock.off() return wrapper def view(self, method='r', **opts): def wrapper(func): crud = method.lower()", "class InteractiveTag(Tag): def __init__(self, view, *argv): Tag.__init__(self, 'script') self._view_id = view['id'] self._view_url =", ": unicode(self._page_header), 'footer' : unicode(self._page_footer) }) self._page_rendered = self._page_rendered.encode('utf-8') self._page_updated = False self._page_lock.off()", "os.path.exists(file_path): raise Exception('could not find %s' % path) return open(file_path, 'rb') def __render__(self):", "val) self._page_meta_list.append(meta_str) self._page_updated = True self._page_lock.off() return self def css(self, *css_list): self._page_lock.on() for", "return self.file_path def read(self): return self.data def close(self): return None if not os.path.exists(file_path):", "self.html(*(elems,)) return self class Cache: _CACHE_DATA = {} @classmethod def getCache(cls, file_path): if", "crud or '*' in crud: @rest('GET', url, **opts) def get(req, *argv, **kargs): return", "self.data def close(self): return None if not os.path.exists(file_path): raise Exception('could not find %s'", "html): self._page_lock.on() self._page_header = html self._page_updated = True self._page_lock.off() return self def footer(self,", "return self.data def close(self): return None if not os.path.exists(file_path): raise Exception('could not find", "Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});' % id ) def __call__(self, name, *argv): return self.patch(name, *argv) def", "Id=id, Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});' % id ) def __call__(self, name, *argv): return self.patch(name, *argv)", "self['tag'] for k, v in self['attrs'].items(): ret += ' %s=\"%s\"' % (k, v)", "cache=True) @export('GET', '/page/empty', content_type=ContentType.AppJson) def empty_page(req): return {'error' : 'Page Empty'} @export('GET', '/favicon.ico',", "if self.url != '/' else '', func.__name__) self._page_view[name] = {'id' : id, 'name'", "file_path @property def name(self): return self.file_path def read(self): return self.data def close(self): return", "{'class' : self._event_id, 'page_url' : self._view_url, 'page_view' : self._view_id} def event(self): return self._event_attr", "self._page_rendered = self._page_rendered.encode('utf-8') self._page_updated = False self._page_lock.off() return self._page_rendered else: return self._page_rendered def", "def css(self, *css_list): self._page_lock.on() for css in css_list: self._page_css_list.append(css) self._page_updated = True self._page_lock.off()", ": 'Page Empty'} @export('GET', '/favicon.ico', content_type=ContentType.AppStream) def default_favicon(req, *argv): return Cache.getCache(pwd() + '/static/image/favicon.ico')", "elif self.url != '/': self.static_url = '%s/%s' % (self.url, static) else: self.static_url =", "'%s %s' % (own_attrs[key_low], val) if key_low in own_attrs else val return self", "% (mod_path, static) self._static_cache = cache @export('GET', self.url) def send_static(req, *argv): path =", "return self.reload(*(names,)) #=========================================================================== # Interactive Functions #=========================================================================== class InteractiveTag(Tag): def __init__(self, view, *argv):", "html): self._page_lock.on() self._page_footer = html self._page_updated = True self._page_lock.off() return self #=========================================================================== #", "def delete(self, name, *argv): class Delete(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv)", "own_attrs else val return self def __lshift__(self, opts): if opts: return self.attr(**opts) return", "class Delete(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id) return", "= '%s %s' % (val, own_attrs[key_low]) if key_low in own_attrs else val return", "css_list: self._page_css_list.append(css) self._page_updated = True self._page_lock.off() return self def js(self, *js_list): self._page_lock.on() for", "#=========================================================================== class InteractiveTag(Tag): def __init__(self, view, *argv): Tag.__init__(self, 'script') self._view_id = view['id'] self._view_url", "self._page_cache = cache self._page_cache_data = {} self._page_updated = True self._page_lock = Lock() self._page_rendered", "in attrs.items(): key_low = key.lower() own_attrs[key_low] = '%s %s' % (val, own_attrs[key_low]) if", "**kargs) self._page_lock.on() self._page_init = url self._page_updated = True self._page_lock.off() return wrapper def view(self,", "self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id) def data(self):", "-*- coding: utf-8 -*- ''' Created on 2017. 10. 25. @author: HyechurnJang '''", "'name' : name, 'url' : '%s/%s' % (view['url'], '/'.join(path)) if path else view['url']}", "def __init__(self, tag, **attrs): dict.__init__(self, tag=tag, elems=[], attrs={}) for key, val in attrs.items():", "'/%s/%s' % (mod_name, url) if not static: self.static = mod_path elif static[0] ==", "else: self.static_path = '%s/%s' % (mod_path, static) if not static: self.static_url = '%s/static'", "= '/%s' % static self._page_init = '/page/empty' self._page_view = {} self._page_title = title", "not static: self.static_url = '%s/static' % self.url elif self.url != '/': self.static_url =", "return self def css(self, *css_list): self._page_lock.on() for css in css_list: self._page_css_list.append(css) self._page_updated =", "view = self._page_view[name] id = view['id'] url = '%s/%s' % (view['url'], '/'.join(argv)) if", "'$(document).ready(function(){page_patch(\"%s\")});' % id ) def __call__(self, name, *argv): return self.patch(name, *argv) def reload(self,", "InteractiveTag(Tag): def __init__(self, view, *argv): Tag.__init__(self, 'script') self._view_id = view['id'] self._view_url = '%s/%s'", "%s' % (val, own_attrs[key_low]) if key_low in own_attrs else val return self #===========================================================================", "send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s' % (self.static_path, path) if self._page_cache:", "= self._page_template.render({ 'init' : self._page_init, 'title' : self._page_title, 'favicon' : self._page_favicon, 'meta_list' :", "self._page_css_list.append(css) self._page_updated = True self._page_lock.off() return self def js(self, *js_list): self._page_lock.on() for js", "= [] for name in names: reload.append(self._page_view[name]['id']) return {'reload' : reload} def __getitem__(self,", "in self['attrs'].items(): ret += ' %s=\"%s\"' % (k, v) ret += '>' for", "'/') if not url: self.url = '/%s' % mod_name elif url[0] == '/':", "def __getitem__(self, names): if isinstance(names, tuple) or isinstance(names, list): return self.reload(*names) else: return", "% self._event_id) return Get(self._page_view[name], *argv) def post(self, name, *argv): class Post(Page.InteractiveTag): def __init__(self,", "'/': self.url = url else: self.url = '/%s/%s' % (mod_name, url) if static[0]", "k, v in self['attrs'].items(): ret += ' %s=\"%s\"' % (k, v) ret +=", "view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id) return Delete(self._page_view[name], *argv) #=============================================================================== # Page Statics #===============================================================================", "% (own_attrs[key_low], val) if key_low in own_attrs else val return self def __lshift__(self,", "#=========================================================================== def init(self, method='r', **opts): def wrapper(func): crud = method.lower() id = createVid()", "self._page_updated = True self._page_lock.off() return self def head(self, html): self._page_lock.on() self._page_head = html", "2017. 10. 25. @author: HyechurnJang ''' import os import uuid import types import", "__init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id) return Delete(self._page_view[name], *argv) #===============================================================================", "(mod_path, static) if not static: self.static_url = '%s/static' % self.url elif self.url !=", "'/': self.static = '%s%s' % (mod_path, static) else: self.static = '%s/%s' % (mod_path,", "+ '/'.join(argv)) if argv else view['url'] self._event_id = createVid() self._event_attr = {'class' :", "os.path.exists(file_path): raise Exception('could not find %s' % file_path) cache = CacheDescriptor(file_path) Cache._CACHE_DATA[file_path] =", "self._page_lock.on() self._page_footer = html self._page_updated = True self._page_lock.off() return self #=========================================================================== # View", "open(file_path, 'rb') class Page: def __init__(self, url=None, title='', favicon='/page/static/image/favicon.ico', static='static', cache=True): mod_path, mod_name", "#=========================================================================== # View Functions #=========================================================================== def patch(self, name, *argv): view = self._page_view[name] id", "**kargs) if 'd' in crud or '*' in crud: @rest('DELETE', url, **opts) def", "self._page_updated = True self._page_lock.off() return self def css(self, *css_list): self._page_lock.on() for css in", "delete(req, *argv, **kargs): return func(req, *argv, **kargs) self._page_lock.on() self._page_init = url self._page_updated =", "% static self._page_init = '/page/empty' self._page_view = {} self._page_title = title self._page_favicon =", "return self.patch(name, *argv) def reload(self, *names): reload = [] for name in names:", "{} @classmethod def getCache(cls, file_path): if file_path in Cache._CACHE_DATA: return Cache._CACHE_DATA[file_path] else: class", "if isinstance(elems, tuple) or isinstance(elems, list): return self.html(*elems) else: return self.html(*(elems,)) return self", "self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id) def data(self): return self._data_attr return Post(self._page_view[name], *argv) def put(self, name,", "*argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id = self._event_id + '-data' self._data_attr = {'class' :", "key, val in attrs.items(): key_low = key.lower() own_attrs[key_low] = '%s %s' % (own_attrs[key_low],", "{'id' : id, 'name' : name, 'url' : url} if 'r' in crud", "self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id) def data(self):", "in Cache._CACHE_DATA: return Cache._CACHE_DATA[file_path] else: class CacheDescriptor(types.FileType): def __init__(self, file_path): with open(file_path, 'rb')", "return self._page_rendered def meta(self, *meta_list): self._page_lock.on() for meta in meta_list: meta_str = '", "argv else view['url'] self._event_id = createVid() self._event_attr = {'class' : self._event_id, 'page_url' :", "send_template(req): return self.__render__() @export('GET', self.static_url) def send_static(req, *argv): path = '/'.join(argv) file_path =", "favicon='/page/static/image/favicon.ico', static='static', cache=True): mod_path, mod_name = pmd() mod_name = mod_name.replace('.', '/') if not", "self['elems'].__len__() def __str__(self): ret = '<%s' % self['tag'] for k, v in self['attrs'].items():", "True self._page_lock.off() return self def header(self, html): self._page_lock.on() self._page_header = html self._page_updated =", "else view['url'] return Tag('script', Id=id, Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});' % id ) def __call__(self, name,", "links) #=========================================================================== def attr(self, **attrs): own_attrs = self['attrs'] for key, val in attrs.items():", "'%s/%s' % (mod_path, static) self._static_cache = cache @export('GET', self.url) def send_static(req, *argv): path", "else: return self.reload(*(names,)) #=========================================================================== # Interactive Functions #=========================================================================== class InteractiveTag(Tag): def __init__(self, view,", "self._page_header = '' self._page_footer = '' self._page_cache = cache self._page_cache_data = {} self._page_updated", "% (mod_name, url) if not static: self.static = mod_path elif static[0] == '/':", "'head' : unicode(self._page_head), 'header' : unicode(self._page_header), 'footer' : unicode(self._page_footer) }) self._page_rendered = self._page_rendered.encode('utf-8')", "event(self): return self._event_attr def get(self, name, *argv): class Get(Page.InteractiveTag): def __init__(self, view, *argv):", "Page.InteractiveTag.__init__(self, view, *argv) self._data_id = self._event_id + '-data' self._data_attr = {'class' : self._data_id}", "for elem in elems: self['elems'].append(elem) return self def __rshift__(self, elems): if elems: if", ": reload} def __getitem__(self, names): if isinstance(names, tuple) or isinstance(names, list): return self.reload(*names)", "self._page_rendered = None with open(pwd() + '/template.html') as fd: self._page_template = jinja2.Template(fd.read()) @export('GET',", "= {'id' : id, 'name' : name, 'url' : url} if 'r' in", "elem in self['elems']: ret += unicode(elem) ret += '</%s>' % self['tag'] return ret", "key, val in attrs.items(): key_low = key.lower() own_attrs[key_low] = '%s %s' % (val,", "Exception('could not find %s' % file_path) cache = CacheDescriptor(file_path) Cache._CACHE_DATA[file_path] = cache return", "self._page_lock.off() return self def footer(self, html): self._page_lock.on() self._page_footer = html self._page_updated = True", "if opts: return self.attr(**opts) return self def baseattr(self, **attrs): own_attrs = self['attrs'] for", "*argv) self._data_id = self._event_id + '-data' self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] =", "= '<%s' % self['tag'] for k, v in self['attrs'].items(): ret += ' %s=\"%s\"'", "Exception('could not find %s' % path) return open(file_path, 'rb') class Page: def __init__(self,", ": self._page_js_list, 'head' : unicode(self._page_head), 'header' : unicode(self._page_header), 'footer' : unicode(self._page_footer) }) self._page_rendered", "!= '/' else '', func.__name__) self._page_view[name] = {'id' : id, 'name' : name,", "return func(req, *argv, **kargs) return wrapper def getView(self, name, *path): view = self._page_view[name]", "'header' : unicode(self._page_header), 'footer' : unicode(self._page_footer) }) self._page_rendered = self._page_rendered.encode('utf-8') self._page_updated = False", "self._event_id, 'page_url' : self._view_url, 'page_view' : self._view_id} def event(self): return self._event_attr def get(self,", "js(self, *js_list): self._page_lock.on() for js in js_list: self._page_js_list.append(js) self._page_updated = True self._page_lock.off() return", "elems): if elems: if isinstance(elems, tuple) or isinstance(elems, list): return self.html(*elems) else: return", "class Post(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id = self._event_id +", "= True self._page_lock.off() return self def head(self, html): self._page_lock.on() self._page_head = html self._page_updated", "__init__(self, view, *argv): Tag.__init__(self, 'script') self._view_id = view['id'] self._view_url = '%s/%s' % (view['url']", "isinstance(elems, tuple) or isinstance(elems, list): return self.html(*elems) else: return self.html(*(elems,)) return self class", "*argv, **kargs): return func(req, *argv, **kargs) if 'c' in crud or '*' in", "def get(self, name, *argv): class Get(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv)", "in own_attrs else val return self #=========================================================================== # Elements (a.k.a : children) #===========================================================================", "'meta_list' : self._page_meta_list, 'css_list' : self._page_css_list, 'js_list' : self._page_js_list, 'head' : unicode(self._page_head), 'header'", "== '/': static = static[1:] if not static: self.static_path = mod_path else: self.static_path", "= cache return cache class Static: def __init__(self, url, static='static', cache=True): mod_path, mod_name", "self #=========================================================================== # Elements (a.k.a : children) #=========================================================================== def html(self, *elems): for elem", "Get(self._page_view[name], *argv) def post(self, name, *argv): class Post(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self,", "+= unicode(elem) ret += '</%s>' % self['tag'] return ret #=========================================================================== # Attributes (a.k.a", "method.lower() id = createVid() name = func.__name__ url = '%s/%s' % (self.url if", "js in js_list: self._page_js_list.append(js) self._page_updated = True self._page_lock.off() return self def head(self, html):", "with open(pwd() + '/template.html') as fd: self._page_template = jinja2.Template(fd.read()) @export('GET', self.url, content_type=ContentType.TextHtml) def", "'*' in crud: @rest('POST', url, **opts) def post(req, *argv, **kargs): return func(req, *argv,", "if self.url != '/' else '', name) self._page_view[name] = {'id' : id, 'name'", "self._page_updated: self._page_lock.on() self._page_rendered = self._page_template.render({ 'init' : self._page_init, 'title' : self._page_title, 'favicon' :", "def patch(self, name, *argv): view = self._page_view[name] id = view['id'] url = '%s/%s'", "in own_attrs else val return self def __lshift__(self, opts): if opts: return self.attr(**opts)", "name, *argv): view = self._page_view[name] id = view['id'] url = '%s/%s' % (view['url'],", "% file_path) cache = CacheDescriptor(file_path) Cache._CACHE_DATA[file_path] = cache return cache class Static: def", "file_path) cache = CacheDescriptor(file_path) Cache._CACHE_DATA[file_path] = cache return cache class Static: def __init__(self,", "js_list: self._page_js_list.append(js) self._page_updated = True self._page_lock.off() return self def head(self, html): self._page_lock.on() self._page_head", "{'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id) def data(self): return self._data_attr", "url=None, title='', favicon='/page/static/image/favicon.ico', static='static', cache=True): mod_path, mod_name = pmd() mod_name = mod_name.replace('.', '/')", "%s=\"%s\"' % (k, v) ret += '>' for elem in self['elems']: ret +=", "def get(req, *argv, **kargs): return func(req, *argv, **kargs) if 'c' in crud or", "return wrapper def view(self, method='r', **opts): def wrapper(func): crud = method.lower() id =", "return Tag('script', Id=id, Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});' % id ) def __call__(self, name, *argv): return", "# -*- coding: utf-8 -*- ''' Created on 2017. 10. 25. @author: HyechurnJang", "elems: if isinstance(elems, tuple) or isinstance(elems, list): return self.html(*elems) else: return self.html(*(elems,)) return", "html self._page_updated = True self._page_lock.off() return self def footer(self, html): self._page_lock.on() self._page_footer =", "'v-' + str(uuid.uuid4()) class Tag(dict): def __init__(self, tag, **attrs): dict.__init__(self, tag=tag, elems=[], attrs={})", "return self def __lshift__(self, opts): if opts: return self.attr(**opts) return self def baseattr(self,", "% (self.static_path, path) if self._page_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could", "== '/': self.url = url else: self.url = '/%s/%s' % (mod_name, url) if", "self._page_lock.off() return self def header(self, html): self._page_lock.on() self._page_header = html self._page_updated = True", "= title self._page_favicon = favicon self._page_meta_list = [] self._page_css_list = [] self._page_js_list =", "file_path in Cache._CACHE_DATA: return Cache._CACHE_DATA[file_path] else: class CacheDescriptor(types.FileType): def __init__(self, file_path): with open(file_path,", "Definition #=========================================================================== def init(self, method='r', **opts): def wrapper(func): crud = method.lower() id =", "cache self._page_cache_data = {} self._page_updated = True self._page_lock = Lock() self._page_rendered = None", "@export('GET', self.static_url) def send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s' % (self.static_path,", "= '%s/%s' % (mod_path, static) self._static_cache = cache @export('GET', self.url) def send_static(req, *argv):", "cache @export('GET', self.url) def send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s' %", "*argv, **kargs) self._page_lock.on() self._page_init = url self._page_updated = True self._page_lock.off() return wrapper def", "*elems): for elem in elems: self['elems'].append(elem) return self def __rshift__(self, elems): if elems:", "self.static_path = '%s/%s' % (mod_path, static) if not static: self.static_url = '%s/static' %", "self.static_url = '/%s' % static self._page_init = '/page/empty' self._page_view = {} self._page_title =", "self._page_favicon = favicon self._page_meta_list = [] self._page_css_list = [] self._page_js_list = [] self._page_head", "if key_low in own_attrs else val return self def __lshift__(self, opts): if opts:", "self._page_lock.on() self._page_head = html self._page_updated = True self._page_lock.off() return self def header(self, html):", "view, *argv) self._data_id = self._event_id + '-data' self._data_attr = {'class' : self._data_id} self._event_attr['page_data']", "*argv, **kargs) if 'c' in crud or '*' in crud: @rest('POST', url, **opts)", "self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id) def data(self): return self._data_attr return Post(self._page_view[name], *argv)", "crud or '*' in crud: @rest('DELETE', url, **opts) def delete(req, *argv, **kargs): return", "names): if isinstance(names, tuple) or isinstance(names, list): return self.reload(*names) else: return self.reload(*(names,)) #===========================================================================", "'u' in crud or '*' in crud: @rest('PUT', url, **opts) def put(req, *argv,", "in crud: @rest('POST', url, **opts) def post(req, *argv, **kargs): return func(req, *argv, **kargs)", "*argv): path = '/'.join(argv) file_path = '%s/%s' % (self.static_path, path) if self._page_cache: return", "(a.k.a : event & links) #=========================================================================== def attr(self, **attrs): own_attrs = self['attrs'] for", "in crud or '*' in crud: @rest('PUT', url, **opts) def put(req, *argv, **kargs):", "wrapper def view(self, method='r', **opts): def wrapper(func): crud = method.lower() id = createVid()", "if 'u' in crud or '*' in crud: @rest('PUT', url, **opts) def put(req,", "url, **opts) def delete(req, *argv, **kargs): return func(req, *argv, **kargs) return wrapper def", "html self._page_updated = True self._page_lock.off() return self #=========================================================================== # View Definition #=========================================================================== def", "% (self.url if self.url != '/' else '', func.__name__) self._page_view[name] = {'id' :", "= '' self._page_header = '' self._page_footer = '' self._page_cache = cache self._page_cache_data =", "= '/'.join(argv) file_path = '%s/%s' % (self.static_path, path) if self._page_cache: return Cache.getCache(file_path) else:", "class Page: def __init__(self, url=None, title='', favicon='/page/static/image/favicon.ico', static='static', cache=True): mod_path, mod_name = pmd()", "export, rest def createVid(): return 'v-' + str(uuid.uuid4()) class Tag(dict): def __init__(self, tag,", "'' self._page_cache = cache self._page_cache_data = {} self._page_updated = True self._page_lock = Lock()", "Exception('could not find %s' % path) return open(file_path, 'rb') def __render__(self): if self._page_updated:", "cache return cache class Static: def __init__(self, url, static='static', cache=True): mod_path, mod_name =", "**attrs): dict.__init__(self, tag=tag, elems=[], attrs={}) for key, val in attrs.items(): self['attrs'][key.lower()] = val", "'js_list' : self._page_js_list, 'head' : unicode(self._page_head), 'header' : unicode(self._page_header), 'footer' : unicode(self._page_footer) })", "'r' in crud or '*' in crud: @rest('GET', url, **opts) def get(req, *argv,", "file_path): if file_path in Cache._CACHE_DATA: return Cache._CACHE_DATA[file_path] else: class CacheDescriptor(types.FileType): def __init__(self, file_path):", "self def css(self, *css_list): self._page_lock.on() for css in css_list: self._page_css_list.append(css) self._page_updated = True", "for meta in meta_list: meta_str = ' ' for key, val in meta.items():", "class CacheDescriptor(types.FileType): def __init__(self, file_path): with open(file_path, 'rb') as fd: self.data = fd.read()", "def head(self, html): self._page_lock.on() self._page_head = html self._page_updated = True self._page_lock.off() return self", "'/'.join(argv) file_path = '%s/%s' % (self.static, path) if self._static_cache: return Cache.getCache(file_path) else: if", "view['id'], 'name' : name, 'url' : '%s/%s' % (view['url'], '/'.join(path)) if path else", "self['tag'] return ret #=========================================================================== # Attributes (a.k.a : event & links) #=========================================================================== def", "html(self, *elems): for elem in elems: self['elems'].append(elem) return self def __rshift__(self, elems): if", "self._page_updated = True self._page_lock = Lock() self._page_rendered = None with open(pwd() + '/template.html')", "*argv): class Post(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id = self._event_id", "% (self.static, path) if self._static_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could", "def data(self): return self._data_attr return Post(self._page_view[name], *argv) def put(self, name, *argv): class Put(Page.InteractiveTag):", "def send_template(req): return self.__render__() @export('GET', self.static_url) def send_static(req, *argv): path = '/'.join(argv) file_path", "self._view_url = '%s/%s' % (view['url'] + '/'.join(argv)) if argv else view['url'] self._event_id =", "return func(req, *argv, **kargs) self._page_lock.on() self._page_init = url self._page_updated = True self._page_lock.off() return", "def __rshift__(self, elems): if elems: if isinstance(elems, tuple) or isinstance(elems, list): return self.html(*elems)", "if not url: self.url = '/%s' % mod_name elif url[0] == '/': self.url", "name(self): return self.file_path def read(self): return self.data def close(self): return None if not", ": unicode(self._page_head), 'header' : unicode(self._page_header), 'footer' : unicode(self._page_footer) }) self._page_rendered = self._page_rendered.encode('utf-8') self._page_updated", "'</%s>' % self['tag'] return ret #=========================================================================== # Attributes (a.k.a : event & links)", "view(self, method='r', **opts): def wrapper(func): crud = method.lower() id = createVid() name =", "*css_list): self._page_lock.on() for css in css_list: self._page_css_list.append(css) self._page_updated = True self._page_lock.off() return self", "def delete(req, *argv, **kargs): return func(req, *argv, **kargs) return wrapper def getView(self, name,", "reload(self, *names): reload = [] for name in names: reload.append(self._page_view[name]['id']) return {'reload' :", "Functions #=========================================================================== class InteractiveTag(Tag): def __init__(self, view, *argv): Tag.__init__(self, 'script') self._view_id = view['id']", "self._page_updated = True self._page_lock.off() return self def footer(self, html): self._page_lock.on() self._page_footer = html", "return self._data_attr return Post(self._page_view[name], *argv) def put(self, name, *argv): class Put(Page.InteractiveTag): def __init__(self,", "mod_name = pmd() mod_name = mod_name.replace('.', '/') if not url: self.url = '/%s'", "utf-8 -*- ''' Created on 2017. 10. 25. @author: HyechurnJang ''' import os", "self._page_title, 'favicon' : self._page_favicon, 'meta_list' : self._page_meta_list, 'css_list' : self._page_css_list, 'js_list' : self._page_js_list,", "= '/%s/%s' % (mod_name, url) if not static: self.static = mod_path elif static[0]", "+ '/template.html') as fd: self._page_template = jinja2.Template(fd.read()) @export('GET', self.url, content_type=ContentType.TextHtml) def send_template(req): return", "url[0] == '/': self.url = url else: self.url = '/%s/%s' % (mod_name, url)", "def __init__(self, url=None, title='', favicon='/page/static/image/favicon.ico', static='static', cache=True): mod_path, mod_name = pmd() mod_name =", "**kargs): return func(req, *argv, **kargs) return wrapper def getView(self, name, *path): view =", "own_attrs = self['attrs'] for key, val in attrs.items(): key_low = key.lower() own_attrs[key_low] =", "url: self.url = '/%s' % mod_name elif url[0] == '/': self.url = url", "'init' : self._page_init, 'title' : self._page_title, 'favicon' : self._page_favicon, 'meta_list' : self._page_meta_list, 'css_list'", "key_low in own_attrs else val return self def __lshift__(self, opts): if opts: return", "mod_path, mod_name = pmd() mod_name = mod_name.replace('.', '/') if not url: self.url =", "name = func.__name__ url = '%s/%s' % (self.url if self.url != '/' else", "= fd.read() self.file_path = file_path @property def name(self): return self.file_path def read(self): return", "with open(file_path, 'rb') as fd: self.data = fd.read() self.file_path = file_path @property def", "__init__(self, url, static='static', cache=True): mod_path, mod_name = pmd() mod_name = mod_name.replace('.', '/') if", "[] self._page_head = '' self._page_header = '' self._page_footer = '' self._page_cache = cache", "CacheDescriptor(file_path) Cache._CACHE_DATA[file_path] = cache return cache class Static: def __init__(self, url, static='static', cache=True):", "self.static_path = mod_path else: self.static_path = '%s/%s' % (mod_path, static) if not static:", ": id, 'name' : name, 'url' : url} if 'r' in crud or", "in crud: @rest('PUT', url, **opts) def put(req, *argv, **kargs): return func(req, *argv, **kargs)", "def send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s' % (self.static_path, path) if", "self._page_js_list, 'head' : unicode(self._page_head), 'header' : unicode(self._page_header), 'footer' : unicode(self._page_footer) }) self._page_rendered =", "# Page Statics #=============================================================================== Page(url='/page', cache=True) @export('GET', '/page/empty', content_type=ContentType.AppJson) def empty_page(req): return {'error'", "return {'error' : 'Page Empty'} @export('GET', '/favicon.ico', content_type=ContentType.AppStream) def default_favicon(req, *argv): return Cache.getCache(pwd()", "static: self.static_url = '%s/static' % self.url elif self.url != '/': self.static_url = '%s/%s'", "import jinja2 from pygics import Lock, ContentType, export, rest def createVid(): return 'v-'", "elems: self['elems'].append(elem) return self def __rshift__(self, elems): if elems: if isinstance(elems, tuple) or", "[] self._page_js_list = [] self._page_head = '' self._page_header = '' self._page_footer = ''", "url = '%s/%s' % (self.url if self.url != '/' else '', func.__name__) self._page_view[name]", "self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id) return Delete(self._page_view[name], *argv) #=============================================================================== # Page Statics #=============================================================================== Page(url='/page', cache=True)", "*argv) def delete(self, name, *argv): class Delete(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view,", "path) return open(file_path, 'rb') class Page: def __init__(self, url=None, title='', favicon='/page/static/image/favicon.ico', static='static', cache=True):", "def put(req, *argv, **kargs): return func(req, *argv, **kargs) if 'd' in crud or", "val in attrs.items(): key_low = key.lower() own_attrs[key_low] = '%s %s' % (own_attrs[key_low], val)", "id = createVid() name = func.__name__ url = '%s/%s' % (self.url if self.url", "return func(req, *argv, **kargs) if 'u' in crud or '*' in crud: @rest('PUT',", "*argv, **kargs) if 'u' in crud or '*' in crud: @rest('PUT', url, **opts)", "func(req, *argv, **kargs) if 'c' in crud or '*' in crud: @rest('POST', url,", "def createVid(): return 'v-' + str(uuid.uuid4()) class Tag(dict): def __init__(self, tag, **attrs): dict.__init__(self,", "else val return self def __lshift__(self, opts): if opts: return self.attr(**opts) return self", "rest def createVid(): return 'v-' + str(uuid.uuid4()) class Tag(dict): def __init__(self, tag, **attrs):", "Page Statics #=============================================================================== Page(url='/page', cache=True) @export('GET', '/page/empty', content_type=ContentType.AppJson) def empty_page(req): return {'error' :", "self.url = url else: self.url = '/%s/%s' % (mod_name, url) if static[0] ==", "'url' : url} if 'r' in crud or '*' in crud: @rest('GET', url,", "self._page_meta_list.append(meta_str) self._page_updated = True self._page_lock.off() return self def css(self, *css_list): self._page_lock.on() for css", "% (self.url, static) else: self.static_url = '/%s' % static self._page_init = '/page/empty' self._page_view", "'/'.join(argv)) if argv else view['url'] return Tag('script', Id=id, Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});' % id )", "self def js(self, *js_list): self._page_lock.on() for js in js_list: self._page_js_list.append(js) self._page_updated = True", "= self._event_id + '-data' self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});'", "data(self): return self._data_attr return Put(self._page_view[name], *argv) def delete(self, name, *argv): class Delete(Page.InteractiveTag): def", "self._page_view[name] id = view['id'] url = '%s/%s' % (view['url'], '/'.join(argv)) if argv else", "url) if not static: self.static = mod_path elif static[0] == '/': self.static =", "Put(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id = self._event_id + '-data'", "#=========================================================================== def html(self, *elems): for elem in elems: self['elems'].append(elem) return self def __rshift__(self,", "% (mod_path, static) if not static: self.static_url = '%s/static' % self.url elif self.url", "self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});' % self._event_id) def data(self): return self._data_attr return Post(self._page_view[name], *argv) def put(self,", "(self.url if self.url != '/' else '', name) self._page_view[name] = {'id' : id,", "types import jinja2 from pygics import Lock, ContentType, export, rest def createVid(): return", "self['elems']: ret += unicode(elem) ret += '</%s>' % self['tag'] return ret #=========================================================================== #", "#=========================================================================== # Attributes (a.k.a : event & links) #=========================================================================== def attr(self, **attrs): own_attrs", "else '', func.__name__) self._page_view[name] = {'id' : id, 'name' : name, 'url' :", "cache class Static: def __init__(self, url, static='static', cache=True): mod_path, mod_name = pmd() mod_name", "if key_low in own_attrs else val return self #=========================================================================== # Elements (a.k.a :", "*argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id) return Get(self._page_view[name], *argv) def post(self, name,", "Created on 2017. 10. 25. @author: HyechurnJang ''' import os import uuid import", "path) if self._page_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could not find", "key, val in attrs.items(): self['attrs'][key.lower()] = val def __len__(self, *args, **kwargs): return self['elems'].__len__()", "from pygics import Lock, ContentType, export, rest def createVid(): return 'v-' + str(uuid.uuid4())", "self.static = '%s%s' % (mod_path, static) else: self.static = '%s/%s' % (mod_path, static)", "or '*' in crud: @rest('GET', url, **opts) def get(req, *argv, **kargs): return func(req,", "% (k, v) ret += '>' for elem in self['elems']: ret += unicode(elem)", "= {} self._page_title = title self._page_favicon = favicon self._page_meta_list = [] self._page_css_list =", "favicon self._page_meta_list = [] self._page_css_list = [] self._page_js_list = [] self._page_head = ''", "def post(req, *argv, **kargs): return func(req, *argv, **kargs) if 'u' in crud or", "Tag.__init__(self, 'script') self._view_id = view['id'] self._view_url = '%s/%s' % (view['url'] + '/'.join(argv)) if", "**opts) def post(req, *argv, **kargs): return func(req, *argv, **kargs) if 'u' in crud", "view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id) return Get(self._page_view[name], *argv) def post(self, name, *argv): class", "**kargs): return func(req, *argv, **kargs) if 'c' in crud or '*' in crud:", "if 'r' in crud or '*' in crud: @rest('GET', url, **opts) def get(req,", "self def baseattr(self, **attrs): own_attrs = self['attrs'] for key, val in attrs.items(): key_low", "mod_name = mod_name.replace('.', '/') if not url: self.url = '/%s' % mod_name elif", "find %s' % file_path) cache = CacheDescriptor(file_path) Cache._CACHE_DATA[file_path] = cache return cache class", "= CacheDescriptor(file_path) Cache._CACHE_DATA[file_path] = cache return cache class Static: def __init__(self, url, static='static',", "*argv, **kargs): return func(req, *argv, **kargs) return wrapper def getView(self, name, *path): view", "return self #=========================================================================== # Elements (a.k.a : children) #=========================================================================== def html(self, *elems): for", "(mod_path, static) else: self.static = '%s/%s' % (mod_path, static) self._static_cache = cache @export('GET',", "name) self._page_view[name] = {'id' : id, 'name' : name, 'url' : url} if", "Statics #=============================================================================== Page(url='/page', cache=True) @export('GET', '/page/empty', content_type=ContentType.AppJson) def empty_page(req): return {'error' : 'Page", "own_attrs else val return self #=========================================================================== # Elements (a.k.a : children) #=========================================================================== def", "'' self._page_footer = '' self._page_cache = cache self._page_cache_data = {} self._page_updated = True", "'*' in crud: @rest('GET', url, **opts) def get(req, *argv, **kargs): return func(req, *argv,", "not os.path.exists(file_path): raise Exception('could not find %s' % path) return open(file_path, 'rb') def", "elif static[0] == '/': self.static = '%s%s' % (mod_path, static) else: self.static =", "self.file_path def read(self): return self.data def close(self): return None if not os.path.exists(file_path): raise", "% id ) def __call__(self, name, *argv): return self.patch(name, *argv) def reload(self, *names):", "view['url'] return Tag('script', Id=id, Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});' % id ) def __call__(self, name, *argv):", "view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id = self._event_id + '-data' self._data_attr = {'class'", "'css_list' : self._page_css_list, 'js_list' : self._page_js_list, 'head' : unicode(self._page_head), 'header' : unicode(self._page_header), 'footer'", "'%s %s' % (val, own_attrs[key_low]) if key_low in own_attrs else val return self", "self._page_updated = False self._page_lock.off() return self._page_rendered else: return self._page_rendered def meta(self, *meta_list): self._page_lock.on()", "__init__(self, tag, **attrs): dict.__init__(self, tag=tag, elems=[], attrs={}) for key, val in attrs.items(): self['attrs'][key.lower()]", "raise Exception('could not find %s' % path) return open(file_path, 'rb') class Page: def", "post(self, name, *argv): class Post(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id", "@rest('GET', url, **opts) def get(req, *argv, **kargs): return func(req, *argv, **kargs) if 'c'", "= mod_name.replace('.', '/') if not url: self.url = '/%s' % mod_name elif url[0]", "if elems: if isinstance(elems, tuple) or isinstance(elems, list): return self.html(*elems) else: return self.html(*(elems,))", "= True self._page_lock.off() return self def footer(self, html): self._page_lock.on() self._page_footer = html self._page_updated", "Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id) return Delete(self._page_view[name], *argv) #=============================================================================== # Page Statics", "self._page_js_list.append(js) self._page_updated = True self._page_lock.off() return self def head(self, html): self._page_lock.on() self._page_head =", "else '', name) self._page_view[name] = {'id' : id, 'name' : name, 'url' :", "self._page_init, 'title' : self._page_title, 'favicon' : self._page_favicon, 'meta_list' : self._page_meta_list, 'css_list' : self._page_css_list,", "return open(file_path, 'rb') def __render__(self): if self._page_updated: self._page_lock.on() self._page_rendered = self._page_template.render({ 'init' :", ": name, 'url' : url} if 'r' in crud or '*' in crud:", "= True self._page_lock.off() return wrapper def view(self, method='r', **opts): def wrapper(func): crud =", "crud = method.lower() id = createVid() name = func.__name__ url = '%s/%s' %", "'/'.join(argv)) if argv else view['url'] self._event_id = createVid() self._event_attr = {'class' : self._event_id,", "self._page_lock.on() self._page_header = html self._page_updated = True self._page_lock.off() return self def footer(self, html):", "self._data_attr return Put(self._page_view[name], *argv) def delete(self, name, *argv): class Delete(Page.InteractiveTag): def __init__(self, view,", "'/': static = static[1:] if not static: self.static_path = mod_path else: self.static_path =", "empty_page(req): return {'error' : 'Page Empty'} @export('GET', '/favicon.ico', content_type=ContentType.AppStream) def default_favicon(req, *argv): return", "static='static', cache=True): mod_path, mod_name = pmd() mod_name = mod_name.replace('.', '/') if not url:", "'title' : self._page_title, 'favicon' : self._page_favicon, 'meta_list' : self._page_meta_list, 'css_list' : self._page_css_list, 'js_list'", "**opts) def put(req, *argv, **kargs): return func(req, *argv, **kargs) if 'd' in crud", ": '%s/%s' % (view['url'], '/'.join(path)) if path else view['url']} #=========================================================================== # View Functions", "def reload(self, *names): reload = [] for name in names: reload.append(self._page_view[name]['id']) return {'reload'", "= self._event_id + '-data' self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_post($(this));});});'", "10. 25. @author: HyechurnJang ''' import os import uuid import types import jinja2", "header(self, html): self._page_lock.on() self._page_header = html self._page_updated = True self._page_lock.off() return self def", ") def __call__(self, name, *argv): return self.patch(name, *argv) def reload(self, *names): reload =", "(a.k.a : children) #=========================================================================== def html(self, *elems): for elem in elems: self['elems'].append(elem) return", "file_path = '%s/%s' % (self.static, path) if self._static_cache: return Cache.getCache(file_path) else: if not", "crud or '*' in crud: @rest('POST', url, **opts) def post(req, *argv, **kargs): return", "(view['url'] + '/'.join(argv)) if argv else view['url'] self._event_id = createVid() self._event_attr = {'class'", "url else: self.url = '/%s/%s' % (mod_name, url) if not static: self.static =", "self._page_view[name] return {'id' : view['id'], 'name' : name, 'url' : '%s/%s' % (view['url'],", "file_path = '%s/%s' % (self.static_path, path) if self._page_cache: return Cache.getCache(file_path) else: if not", "Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id) return Get(self._page_view[name], *argv) def post(self, name, *argv):", "**kargs) if 'c' in crud or '*' in crud: @rest('POST', url, **opts) def", "% (view['url'] + '/'.join(argv)) if argv else view['url'] self._event_id = createVid() self._event_attr =", "return Put(self._page_view[name], *argv) def delete(self, name, *argv): class Delete(Page.InteractiveTag): def __init__(self, view, *argv):", "baseattr(self, **attrs): own_attrs = self['attrs'] for key, val in attrs.items(): key_low = key.lower()", "pmd() mod_name = mod_name.replace('.', '/') if not url: self.url = '/%s' % mod_name", "% self.url elif self.url != '/': self.static_url = '%s/%s' % (self.url, static) else:", "{'error' : 'Page Empty'} @export('GET', '/favicon.ico', content_type=ContentType.AppStream) def default_favicon(req, *argv): return Cache.getCache(pwd() +", "tuple) or isinstance(names, list): return self.reload(*names) else: return self.reload(*(names,)) #=========================================================================== # Interactive Functions", "name, *argv): class Get(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' %", "True self._page_lock = Lock() self._page_rendered = None with open(pwd() + '/template.html') as fd:", "tag, **attrs): dict.__init__(self, tag=tag, elems=[], attrs={}) for key, val in attrs.items(): self['attrs'][key.lower()] =", "% self['tag'] return ret #=========================================================================== # Attributes (a.k.a : event & links) #===========================================================================", "'/%s' % static self._page_init = '/page/empty' self._page_view = {} self._page_title = title self._page_favicon", "'/'.join(argv) file_path = '%s/%s' % (self.static_path, path) if self._page_cache: return Cache.getCache(file_path) else: if", ": name, 'url' : '%s/%s' % (view['url'], '/'.join(path)) if path else view['url']} #===========================================================================", "(view['url'], '/'.join(path)) if path else view['url']} #=========================================================================== # View Functions #=========================================================================== def patch(self,", "path else view['url']} #=========================================================================== # View Functions #=========================================================================== def patch(self, name, *argv): view", "None if not os.path.exists(file_path): raise Exception('could not find %s' % file_path) cache =", "return self._page_rendered else: return self._page_rendered def meta(self, *meta_list): self._page_lock.on() for meta in meta_list:", "id = view['id'] url = '%s/%s' % (view['url'], '/'.join(argv)) if argv else view['url']", "' for key, val in meta.items(): meta_str += '%s=\"%s\"' % (key, val) self._page_meta_list.append(meta_str)", "crud: @rest('DELETE', url, **opts) def delete(req, *argv, **kargs): return func(req, *argv, **kargs) return", "return wrapper def getView(self, name, *path): view = self._page_view[name] return {'id' : view['id'],", "[] self._page_css_list = [] self._page_js_list = [] self._page_head = '' self._page_header = ''", "*argv): class Put(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id = self._event_id", "= mod_path else: self.static_path = '%s/%s' % (mod_path, static) if not static: self.static_url", "'', func.__name__) self._page_view[name] = {'id' : id, 'name' : name, 'url' : url}", "self.static_url = '%s/static' % self.url elif self.url != '/': self.static_url = '%s/%s' %", "for key, val in attrs.items(): self['attrs'][key.lower()] = val def __len__(self, *args, **kwargs): return", "'page_view' : self._view_id} def event(self): return self._event_attr def get(self, name, *argv): class Get(Page.InteractiveTag):", "self._page_meta_list = [] self._page_css_list = [] self._page_js_list = [] self._page_head = '' self._page_header", "if path else view['url']} #=========================================================================== # View Functions #=========================================================================== def patch(self, name, *argv):", "HyechurnJang ''' import os import uuid import types import jinja2 from pygics import", "{'class' : self._data_id} self._event_attr['page_data'] = self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id) def data(self): return self._data_attr", "{} self._page_title = title self._page_favicon = favicon self._page_meta_list = [] self._page_css_list = []", "__str__(self): ret = '<%s' % self['tag'] for k, v in self['attrs'].items(): ret +=", "not find %s' % file_path) cache = CacheDescriptor(file_path) Cache._CACHE_DATA[file_path] = cache return cache", "dict.__init__(self, tag=tag, elems=[], attrs={}) for key, val in attrs.items(): self['attrs'][key.lower()] = val def", "in elems: self['elems'].append(elem) return self def __rshift__(self, elems): if elems: if isinstance(elems, tuple)", "Functions #=========================================================================== def patch(self, name, *argv): view = self._page_view[name] id = view['id'] url", "= True self._page_lock.off() return self def js(self, *js_list): self._page_lock.on() for js in js_list:", "@classmethod def getCache(cls, file_path): if file_path in Cache._CACHE_DATA: return Cache._CACHE_DATA[file_path] else: class CacheDescriptor(types.FileType):", "css in css_list: self._page_css_list.append(css) self._page_updated = True self._page_lock.off() return self def js(self, *js_list):", "html self._page_updated = True self._page_lock.off() return self def header(self, html): self._page_lock.on() self._page_header =", "**kwargs): return self['elems'].__len__() def __str__(self): ret = '<%s' % self['tag'] for k, v", "Lock, ContentType, export, rest def createVid(): return 'v-' + str(uuid.uuid4()) class Tag(dict): def", "class Tag(dict): def __init__(self, tag, **attrs): dict.__init__(self, tag=tag, elems=[], attrs={}) for key, val", "elif url[0] == '/': self.url = url else: self.url = '/%s/%s' % (mod_name,", "else: self.url = '/%s/%s' % (mod_name, url) if static[0] == '/': static =", "self.static_url) def send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s' % (self.static_path, path)", "% self['tag'] for k, v in self['attrs'].items(): ret += ' %s=\"%s\"' % (k,", "'name' : name, 'url' : url} if 'r' in crud or '*' in", "= self['attrs'] for key, val in attrs.items(): key_low = key.lower() own_attrs[key_low] = '%s", "= False self._page_lock.off() return self._page_rendered else: return self._page_rendered def meta(self, *meta_list): self._page_lock.on() for", "Post(self._page_view[name], *argv) def put(self, name, *argv): class Put(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self,", "*argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id) return Delete(self._page_view[name], *argv) #=============================================================================== # Page Statics #=============================================================================== Page(url='/page',", "(mod_name, url) if not static: self.static = mod_path elif static[0] == '/': self.static", "isinstance(names, list): return self.reload(*names) else: return self.reload(*(names,)) #=========================================================================== # Interactive Functions #=========================================================================== class", "<reponame>pygics-mod/page<gh_stars>0 # -*- coding: utf-8 -*- ''' Created on 2017. 10. 25. @author:", "@property def name(self): return self.file_path def read(self): return self.data def close(self): return None", "'/%s' % mod_name elif url[0] == '/': self.url = url else: self.url =", "self._page_rendered else: return self._page_rendered def meta(self, *meta_list): self._page_lock.on() for meta in meta_list: meta_str", "**kargs) return wrapper def getView(self, name, *path): view = self._page_view[name] return {'id' :", "*meta_list): self._page_lock.on() for meta in meta_list: meta_str = ' ' for key, val", "self.url) def send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s' % (self.static, path)", "fd: self._page_template = jinja2.Template(fd.read()) @export('GET', self.url, content_type=ContentType.TextHtml) def send_template(req): return self.__render__() @export('GET', self.static_url)", "True self._page_lock.off() return self def footer(self, html): self._page_lock.on() self._page_footer = html self._page_updated =", "+= '</%s>' % self['tag'] return ret #=========================================================================== # Attributes (a.k.a : event &", "static) if not static: self.static_url = '%s/static' % self.url elif self.url != '/':", "as fd: self._page_template = jinja2.Template(fd.read()) @export('GET', self.url, content_type=ContentType.TextHtml) def send_template(req): return self.__render__() @export('GET',", "as fd: self.data = fd.read() self.file_path = file_path @property def name(self): return self.file_path", "'d' in crud or '*' in crud: @rest('DELETE', url, **opts) def delete(req, *argv,", "def __init__(self, view, *argv): Tag.__init__(self, 'script') self._view_id = view['id'] self._view_url = '%s/%s' %", "in attrs.items(): key_low = key.lower() own_attrs[key_low] = '%s %s' % (own_attrs[key_low], val) if", "% (mod_path, static) else: self.static = '%s/%s' % (mod_path, static) self._static_cache = cache", "attrs.items(): self['attrs'][key.lower()] = val def __len__(self, *args, **kwargs): return self['elems'].__len__() def __str__(self): ret", "class Static: def __init__(self, url, static='static', cache=True): mod_path, mod_name = pmd() mod_name =", "unicode(self._page_footer) }) self._page_rendered = self._page_rendered.encode('utf-8') self._page_updated = False self._page_lock.off() return self._page_rendered else: return", "return self #=========================================================================== # View Definition #=========================================================================== def init(self, method='r', **opts): def wrapper(func):", "func(req, *argv, **kargs) return wrapper def getView(self, name, *path): view = self._page_view[name] return", ": unicode(self._page_footer) }) self._page_rendered = self._page_rendered.encode('utf-8') self._page_updated = False self._page_lock.off() return self._page_rendered else:", "static: self.static_path = mod_path else: self.static_path = '%s/%s' % (mod_path, static) if not", "self._data_id = self._event_id + '-data' self._data_attr = {'class' : self._data_id} self._event_attr['page_data'] = self._data_id", "'%s/%s' % (view['url'] + '/'.join(argv)) if argv else view['url'] self._event_id = createVid() self._event_attr", "self._page_css_list = [] self._page_js_list = [] self._page_head = '' self._page_header = '' self._page_footer", "name, *argv): class Delete(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' %", "= True self._page_lock.off() return self def header(self, html): self._page_lock.on() self._page_header = html self._page_updated", "def name(self): return self.file_path def read(self): return self.data def close(self): return None if", "ret += '>' for elem in self['elems']: ret += unicode(elem) ret += '</%s>'", "else: return self._page_rendered def meta(self, *meta_list): self._page_lock.on() for meta in meta_list: meta_str =", "= url self._page_updated = True self._page_lock.off() return wrapper def view(self, method='r', **opts): def", "self._page_updated = True self._page_lock.off() return self #=========================================================================== # View Definition #=========================================================================== def init(self,", "for k, v in self['attrs'].items(): ret += ' %s=\"%s\"' % (k, v) ret", "func(req, *argv, **kargs) if 'd' in crud or '*' in crud: @rest('DELETE', url,", "*argv, **kargs): return func(req, *argv, **kargs) self._page_lock.on() self._page_init = url self._page_updated = True", "mod_name elif url[0] == '/': self.url = url else: self.url = '/%s/%s' %", "= method.lower() id = createVid() name = func.__name__ url = '%s/%s' % (self.url", "delete(self, name, *argv): class Delete(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});'", ": self._page_css_list, 'js_list' : self._page_js_list, 'head' : unicode(self._page_head), 'header' : unicode(self._page_header), 'footer' :", "in crud or '*' in crud: @rest('DELETE', url, **opts) def delete(req, *argv, **kargs):", "(key, val) self._page_meta_list.append(meta_str) self._page_updated = True self._page_lock.off() return self def css(self, *css_list): self._page_lock.on()", "name, *argv): class Post(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id =", ": event & links) #=========================================================================== def attr(self, **attrs): own_attrs = self['attrs'] for key,", "return self.__render__() @export('GET', self.static_url) def send_static(req, *argv): path = '/'.join(argv) file_path = '%s/%s'", "cache=True): mod_path, mod_name = pmd() mod_name = mod_name.replace('.', '/') if not url: self.url", "= True self._page_lock.off() return self #=========================================================================== # View Definition #=========================================================================== def init(self, method='r',", "not static: self.static_path = mod_path else: self.static_path = '%s/%s' % (mod_path, static) if", "in js_list: self._page_js_list.append(js) self._page_updated = True self._page_lock.off() return self def head(self, html): self._page_lock.on()", "def init(self, method='r', **opts): def wrapper(func): crud = method.lower() id = createVid() name", "= True self._page_lock = Lock() self._page_rendered = None with open(pwd() + '/template.html') as", "'*' in crud: @rest('PUT', url, **opts) def put(req, *argv, **kargs): return func(req, *argv,", "= url else: self.url = '/%s/%s' % (mod_name, url) if not static: self.static", "static = static[1:] if not static: self.static_path = mod_path else: self.static_path = '%s/%s'", "self._event_id) def data(self): return self._data_attr return Post(self._page_view[name], *argv) def put(self, name, *argv): class", "CacheDescriptor(types.FileType): def __init__(self, file_path): with open(file_path, 'rb') as fd: self.data = fd.read() self.file_path", "open(file_path, 'rb') def __render__(self): if self._page_updated: self._page_lock.on() self._page_rendered = self._page_template.render({ 'init' : self._page_init,", "attrs.items(): key_low = key.lower() own_attrs[key_low] = '%s %s' % (val, own_attrs[key_low]) if key_low", "self.url = '/%s' % mod_name elif url[0] == '/': self.url = url else:", "'url' : '%s/%s' % (view['url'], '/'.join(path)) if path else view['url']} #=========================================================================== # View", "return self._event_attr def get(self, name, *argv): class Get(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self,", "= ' ' for key, val in meta.items(): meta_str += '%s=\"%s\"' % (key,", "func.__name__ url = '%s/%s' % (self.url if self.url != '/' else '', func.__name__)", "name, 'url' : '%s/%s' % (view['url'], '/'.join(path)) if path else view['url']} #=========================================================================== #", "patch(self, name, *argv): view = self._page_view[name] id = view['id'] url = '%s/%s' %", "{'reload' : reload} def __getitem__(self, names): if isinstance(names, tuple) or isinstance(names, list): return", "key.lower() own_attrs[key_low] = '%s %s' % (own_attrs[key_low], val) if key_low in own_attrs else", "return func(req, *argv, **kargs) if 'd' in crud or '*' in crud: @rest('DELETE',", "%s' % path) return open(file_path, 'rb') class Page: def __init__(self, url=None, title='', favicon='/page/static/image/favicon.ico',", "not static: self.static = mod_path elif static[0] == '/': self.static = '%s%s' %", "= html self._page_updated = True self._page_lock.off() return self def footer(self, html): self._page_lock.on() self._page_footer", "attr(self, **attrs): own_attrs = self['attrs'] for key, val in attrs.items(): key_low = key.lower()", "def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});' % self._event_id) return Get(self._page_view[name], *argv)", "{'id' : view['id'], 'name' : name, 'url' : '%s/%s' % (view['url'], '/'.join(path)) if", "*names): reload = [] for name in names: reload.append(self._page_view[name]['id']) return {'reload' : reload}", "ret += unicode(elem) ret += '</%s>' % self['tag'] return ret #=========================================================================== # Attributes", "name, *path): view = self._page_view[name] return {'id' : view['id'], 'name' : name, 'url'", "= mod_path elif static[0] == '/': self.static = '%s%s' % (mod_path, static) else:", "func.__name__ url = '%s/%s' % (self.url if self.url != '/' else '', name)", "#=========================================================================== # Elements (a.k.a : children) #=========================================================================== def html(self, *elems): for elem in", "= func.__name__ url = '%s/%s' % (self.url if self.url != '/' else '',", "'footer' : unicode(self._page_footer) }) self._page_rendered = self._page_rendered.encode('utf-8') self._page_updated = False self._page_lock.off() return self._page_rendered", "= createVid() name = func.__name__ url = '%s/%s' % (self.url if self.url !=", "'', name) self._page_view[name] = {'id' : id, 'name' : name, 'url' : url}", "coding: utf-8 -*- ''' Created on 2017. 10. 25. @author: HyechurnJang ''' import", "isinstance(elems, list): return self.html(*elems) else: return self.html(*(elems,)) return self class Cache: _CACHE_DATA =", "return {'reload' : reload} def __getitem__(self, names): if isinstance(names, tuple) or isinstance(names, list):", "crud: @rest('PUT', url, **opts) def put(req, *argv, **kargs): return func(req, *argv, **kargs) if", "#=========================================================================== def attr(self, **attrs): own_attrs = self['attrs'] for key, val in attrs.items(): key_low", "or isinstance(names, list): return self.reload(*names) else: return self.reload(*(names,)) #=========================================================================== # Interactive Functions #===========================================================================", "= createVid() self._event_attr = {'class' : self._event_id, 'page_url' : self._view_url, 'page_view' : self._view_id}", "content_type=ContentType.AppJson) def empty_page(req): return {'error' : 'Page Empty'} @export('GET', '/favicon.ico', content_type=ContentType.AppStream) def default_favicon(req,", "key.lower() own_attrs[key_low] = '%s %s' % (val, own_attrs[key_low]) if key_low in own_attrs else", "self._page_lock.on() for meta in meta_list: meta_str = ' ' for key, val in", "(val, own_attrs[key_low]) if key_low in own_attrs else val return self #=========================================================================== # Elements", "else view['url']} #=========================================================================== # View Functions #=========================================================================== def patch(self, name, *argv): view =", "self._event_attr def get(self, name, *argv): class Get(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view,", "= Lock() self._page_rendered = None with open(pwd() + '/template.html') as fd: self._page_template =", "url) if static[0] == '/': static = static[1:] if not static: self.static_path =", "self._page_title = title self._page_favicon = favicon self._page_meta_list = [] self._page_css_list = [] self._page_js_list", "def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id) return Delete(self._page_view[name], *argv)", "self class Cache: _CACHE_DATA = {} @classmethod def getCache(cls, file_path): if file_path in", "= self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id) def data(self): return self._data_attr return Put(self._page_view[name], *argv) def", "meta(self, *meta_list): self._page_lock.on() for meta in meta_list: meta_str = ' ' for key,", "self._page_lock.on() for css in css_list: self._page_css_list.append(css) self._page_updated = True self._page_lock.off() return self def", "else: self.url = '/%s/%s' % (mod_name, url) if not static: self.static = mod_path", ": url} if 'r' in crud or '*' in crud: @rest('GET', url, **opts)", "self._page_meta_list, 'css_list' : self._page_css_list, 'js_list' : self._page_js_list, 'head' : unicode(self._page_head), 'header' : unicode(self._page_header),", "self._page_rendered def meta(self, *meta_list): self._page_lock.on() for meta in meta_list: meta_str = ' '", "= url else: self.url = '/%s/%s' % (mod_name, url) if static[0] == '/':", "if argv else view['url'] return Tag('script', Id=id, Page_Url=url).html( '$(document).ready(function(){page_patch(\"%s\")});' % id ) def", "+= '>' for elem in self['elems']: ret += unicode(elem) ret += '</%s>' %", "self.data = fd.read() self.file_path = file_path @property def name(self): return self.file_path def read(self):", "mod_name.replace('.', '/') if not url: self.url = '/%s' % mod_name elif url[0] ==", "key, val in meta.items(): meta_str += '%s=\"%s\"' % (key, val) self._page_meta_list.append(meta_str) self._page_updated =", "Delete(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id) return Delete(self._page_view[name],", "own_attrs[key_low]) if key_low in own_attrs else val return self #=========================================================================== # Elements (a.k.a", "method='r', **opts): def wrapper(func): crud = method.lower() id = createVid() name = func.__name__", "*args, **kwargs): return self['elems'].__len__() def __str__(self): ret = '<%s' % self['tag'] for k,", "tuple) or isinstance(elems, list): return self.html(*elems) else: return self.html(*(elems,)) return self class Cache:", "self._page_template = jinja2.Template(fd.read()) @export('GET', self.url, content_type=ContentType.TextHtml) def send_template(req): return self.__render__() @export('GET', self.static_url) def", "True self._page_lock.off() return self def css(self, *css_list): self._page_lock.on() for css in css_list: self._page_css_list.append(css)", "def js(self, *js_list): self._page_lock.on() for js in js_list: self._page_js_list.append(js) self._page_updated = True self._page_lock.off()", "return 'v-' + str(uuid.uuid4()) class Tag(dict): def __init__(self, tag, **attrs): dict.__init__(self, tag=tag, elems=[],", "for key, val in attrs.items(): key_low = key.lower() own_attrs[key_low] = '%s %s' %", "%s' % path) return open(file_path, 'rb') def __render__(self): if self._page_updated: self._page_lock.on() self._page_rendered =", "names: reload.append(self._page_view[name]['id']) return {'reload' : reload} def __getitem__(self, names): if isinstance(names, tuple) or", "self._page_lock.off() return self._page_rendered else: return self._page_rendered def meta(self, *meta_list): self._page_lock.on() for meta in", "view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_delete($(this));});});' % self._event_id) return Delete(self._page_view[name], *argv) #=============================================================================== #", "# Attributes (a.k.a : event & links) #=========================================================================== def attr(self, **attrs): own_attrs =", "(own_attrs[key_low], val) if key_low in own_attrs else val return self def __lshift__(self, opts):", "% (val, own_attrs[key_low]) if key_low in own_attrs else val return self #=========================================================================== #", "Static: def __init__(self, url, static='static', cache=True): mod_path, mod_name = pmd() mod_name = mod_name.replace('.',", "ret #=========================================================================== # Attributes (a.k.a : event & links) #=========================================================================== def attr(self, **attrs):", "= key.lower() own_attrs[key_low] = '%s %s' % (val, own_attrs[key_low]) if key_low in own_attrs", "self.static = mod_path elif static[0] == '/': self.static = '%s%s' % (mod_path, static)", "ret = '<%s' % self['tag'] for k, v in self['attrs'].items(): ret += '", "@rest('DELETE', url, **opts) def delete(req, *argv, **kargs): return func(req, *argv, **kargs) return wrapper", "#=============================================================================== # Page Statics #=============================================================================== Page(url='/page', cache=True) @export('GET', '/page/empty', content_type=ContentType.AppJson) def empty_page(req): return", "or '*' in crud: @rest('POST', url, **opts) def post(req, *argv, **kargs): return func(req,", "self.static = '%s/%s' % (mod_path, static) self._static_cache = cache @export('GET', self.url) def send_static(req,", "delete(req, *argv, **kargs): return func(req, *argv, **kargs) return wrapper def getView(self, name, *path):", "self._static_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could not find %s' %", "Post(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self._data_id = self._event_id + '-data'", "static) self._static_cache = cache @export('GET', self.url) def send_static(req, *argv): path = '/'.join(argv) file_path", "= {} self._page_updated = True self._page_lock = Lock() self._page_rendered = None with open(pwd()", "@rest('DELETE', url, **opts) def delete(req, *argv, **kargs): return func(req, *argv, **kargs) self._page_lock.on() self._page_init", "meta_str += '%s=\"%s\"' % (key, val) self._page_meta_list.append(meta_str) self._page_updated = True self._page_lock.off() return self", "url = '%s/%s' % (self.url if self.url != '/' else '', name) self._page_view[name]", "'%s/%s' % (view['url'], '/'.join(path)) if path else view['url']} #=========================================================================== # View Functions #===========================================================================", "children) #=========================================================================== def html(self, *elems): for elem in elems: self['elems'].append(elem) return self def", "Cache._CACHE_DATA: return Cache._CACHE_DATA[file_path] else: class CacheDescriptor(types.FileType): def __init__(self, file_path): with open(file_path, 'rb') as", "return Cache._CACHE_DATA[file_path] else: class CacheDescriptor(types.FileType): def __init__(self, file_path): with open(file_path, 'rb') as fd:", "**opts) def get(req, *argv, **kargs): return func(req, *argv, **kargs) if 'c' in crud", "jinja2.Template(fd.read()) @export('GET', self.url, content_type=ContentType.TextHtml) def send_template(req): return self.__render__() @export('GET', self.static_url) def send_static(req, *argv):", "put(req, *argv, **kargs): return func(req, *argv, **kargs) if 'd' in crud or '*'", "#=========================================================================== def patch(self, name, *argv): view = self._page_view[name] id = view['id'] url =", "def event(self): return self._event_attr def get(self, name, *argv): class Get(Page.InteractiveTag): def __init__(self, view,", "get(self, name, *argv): class Get(Page.InteractiveTag): def __init__(self, view, *argv): Page.InteractiveTag.__init__(self, view, *argv) self.html('$(document).ready(function(){$(\".%s\").click(function(){page_get($(this));});});'", "return open(file_path, 'rb') class Page: def __init__(self, url=None, title='', favicon='/page/static/image/favicon.ico', static='static', cache=True): mod_path,", "meta in meta_list: meta_str = ' ' for key, val in meta.items(): meta_str", "#=============================================================================== Page(url='/page', cache=True) @export('GET', '/page/empty', content_type=ContentType.AppJson) def empty_page(req): return {'error' : 'Page Empty'}", "= view['id'] url = '%s/%s' % (view['url'], '/'.join(argv)) if argv else view['url'] return", "else: return self.html(*(elems,)) return self class Cache: _CACHE_DATA = {} @classmethod def getCache(cls,", "tag=tag, elems=[], attrs={}) for key, val in attrs.items(): self['attrs'][key.lower()] = val def __len__(self,", "self def __rshift__(self, elems): if elems: if isinstance(elems, tuple) or isinstance(elems, list): return", "name, *argv): return self.patch(name, *argv) def reload(self, *names): reload = [] for name", "view['url']} #=========================================================================== # View Functions #=========================================================================== def patch(self, name, *argv): view = self._page_view[name]", "(self.static_path, path) if self._page_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could not", "self._page_lock.off() return self def css(self, *css_list): self._page_lock.on() for css in css_list: self._page_css_list.append(css) self._page_updated", "'c' in crud or '*' in crud: @rest('POST', url, **opts) def post(req, *argv,", "self def header(self, html): self._page_lock.on() self._page_header = html self._page_updated = True self._page_lock.off() return", "self._data_id self.html('$(document).ready(function(){$(\".%s\").click(function(){page_put($(this));});});' % self._event_id) def data(self): return self._data_attr return Put(self._page_view[name], *argv) def delete(self,", "return ret #=========================================================================== # Attributes (a.k.a : event & links) #=========================================================================== def attr(self,", "= '%s/%s' % (self.url if self.url != '/' else '', func.__name__) self._page_view[name] =", "fd: self.data = fd.read() self.file_path = file_path @property def name(self): return self.file_path def", "% (mod_name, url) if static[0] == '/': static = static[1:] if not static:", "self._page_cache: return Cache.getCache(file_path) else: if not os.path.exists(file_path): raise Exception('could not find %s' %", "return self._data_attr return Put(self._page_view[name], *argv) def delete(self, name, *argv): class Delete(Page.InteractiveTag): def __init__(self,", "in names: reload.append(self._page_view[name]['id']) return {'reload' : reload} def __getitem__(self, names): if isinstance(names, tuple)", "@export('GET', self.url, content_type=ContentType.TextHtml) def send_template(req): return self.__render__() @export('GET', self.static_url) def send_static(req, *argv): path", "createVid() name = func.__name__ url = '%s/%s' % (self.url if self.url != '/'", "*argv, **kargs): return func(req, *argv, **kargs) if 'u' in crud or '*' in", "func.__name__) self._page_view[name] = {'id' : id, 'name' : name, 'url' : url} if", "'*' in crud: @rest('DELETE', url, **opts) def delete(req, *argv, **kargs): return func(req, *argv,", "self._page_lock.off() return self def head(self, html): self._page_lock.on() self._page_head = html self._page_updated = True", "self._view_url, 'page_view' : self._view_id} def event(self): return self._event_attr def get(self, name, *argv): class", "on 2017. 10. 25. @author: HyechurnJang ''' import os import uuid import types", "True self._page_lock.off() return self def js(self, *js_list): self._page_lock.on() for js in js_list: self._page_js_list.append(js)", "self._page_updated = True self._page_lock.off() return self def js(self, *js_list): self._page_lock.on() for js in", "def wrapper(func): crud = method.lower() id = createVid() name = func.__name__ url =", "self['attrs'] for key, val in attrs.items(): key_low = key.lower() own_attrs[key_low] = '%s %s'", "else: class CacheDescriptor(types.FileType): def __init__(self, file_path): with open(file_path, 'rb') as fd: self.data =", "wrapper def getView(self, name, *path): view = self._page_view[name] return {'id' : view['id'], 'name'", "self.patch(name, *argv) def reload(self, *names): reload = [] for name in names: reload.append(self._page_view[name]['id'])", "'/page/empty', content_type=ContentType.AppJson) def empty_page(req): return {'error' : 'Page Empty'} @export('GET', '/favicon.ico', content_type=ContentType.AppStream) def", "*argv): Tag.__init__(self, 'script') self._view_id = view['id'] self._view_url = '%s/%s' % (view['url'] + '/'.join(argv))", "self #=========================================================================== # View Definition #=========================================================================== def init(self, method='r', **opts): def wrapper(func): crud", "def attr(self, **attrs): own_attrs = self['attrs'] for key, val in attrs.items(): key_low =", "self._page_head = html self._page_updated = True self._page_lock.off() return self def header(self, html): self._page_lock.on()", "self._page_lock.on() for js in js_list: self._page_js_list.append(js) self._page_updated = True self._page_lock.off() return self def", "or '*' in crud: @rest('PUT', url, **opts) def put(req, *argv, **kargs): return func(req,", "**opts) def delete(req, *argv, **kargs): return func(req, *argv, **kargs) self._page_lock.on() self._page_init = url", "content_type=ContentType.TextHtml) def send_template(req): return self.__render__() @export('GET', self.static_url) def send_static(req, *argv): path = '/'.join(argv)", "Page(url='/page', cache=True) @export('GET', '/page/empty', content_type=ContentType.AppJson) def empty_page(req): return {'error' : 'Page Empty'} @export('GET',", "def getView(self, name, *path): view = self._page_view[name] return {'id' : view['id'], 'name' :", "*argv, **kargs) return wrapper def getView(self, name, *path): view = self._page_view[name] return {'id'", "= '%s/%s' % (self.url if self.url != '/' else '', name) self._page_view[name] =", "in attrs.items(): self['attrs'][key.lower()] = val def __len__(self, *args, **kwargs): return self['elems'].__len__() def __str__(self):", "= cache self._page_cache_data = {} self._page_updated = True self._page_lock = Lock() self._page_rendered =", "url, **opts) def put(req, *argv, **kargs): return func(req, *argv, **kargs) if 'd' in", "return self.html(*elems) else: return self.html(*(elems,)) return self class Cache: _CACHE_DATA = {} @classmethod", "static[0] == '/': static = static[1:] if not static: self.static_path = mod_path else:", "_CACHE_DATA = {} @classmethod def getCache(cls, file_path): if file_path in Cache._CACHE_DATA: return Cache._CACHE_DATA[file_path]", "import types import jinja2 from pygics import Lock, ContentType, export, rest def createVid():" ]
[]
[ "solver.solve_issues() # plot overall weighted census score over time (password is not required", "Rights\",), }, policy={ \"No Internet\": -10 }) # solve issues for the nation:", "here>\" CONTACT = \"<Nationstates demands that the User Agent contain a method of", "\"Averageness\", \"Death Rate\", \"Taxation\"), 0 : (\"Rudeness\", \"Ignorance\", \"Corruption\", \"Government Size\", \"Political Apathy\",", "Internet\": -10 }) # solve issues for the nation: solver.solve_issues() # plot overall", "solver = cm.CensusMaximizer(USER, PASSWORD) solver.adjust_weights(census = { -1 : (\"Wealth Gaps\", \"Obesity\", \"Crime\",", "address is fine>\" cm.init(CONTACT) solver = cm.CensusMaximizer(USER, PASSWORD) solver.adjust_weights(census = { -1 :", "Script's owner. An email address is fine>\" cm.init(CONTACT) solver = cm.CensusMaximizer(USER, PASSWORD) solver.adjust_weights(census", "plt # if not installed, run `pip install matplotlib` # CONFIG USER =", "as plt # if not installed, run `pip install matplotlib` # CONFIG USER", "3 : (\"Civil Rights\",), }, policy={ \"No Internet\": -10 }) # solve issues", ": (\"Wealth Gaps\", \"Obesity\", \"Crime\", \"Charmlessness\", \"Primitiveness\", \"Averageness\", \"Death Rate\", \"Taxation\"), 0 :", "matplotlib.pyplot as plt # if not installed, run `pip install matplotlib` # CONFIG", "\"Political Freedom\"), 3 : (\"Civil Rights\",), }, policy={ \"No Internet\": -10 }) #", "here>\" PASSWORD = \"<<PASSWORD> here>\" CONTACT = \"<Nationstates demands that the User Agent", "<filename>example.py<gh_stars>1-10 import census_maximizer as cm import matplotlib.pyplot as plt # if not installed,", "An email address is fine>\" cm.init(CONTACT) solver = cm.CensusMaximizer(USER, PASSWORD) solver.adjust_weights(census = {", ": (\"Civil Rights\",), }, policy={ \"No Internet\": -10 }) # solve issues for", "{ -1 : (\"Wealth Gaps\", \"Obesity\", \"Crime\", \"Charmlessness\", \"Primitiveness\", \"Averageness\", \"Death Rate\", \"Taxation\"),", "(\"Wealth Gaps\", \"Obesity\", \"Crime\", \"Charmlessness\", \"Primitiveness\", \"Averageness\", \"Death Rate\", \"Taxation\"), 0 : (\"Rudeness\",", ": (\"Rudeness\", \"Ignorance\", \"Corruption\", \"Government Size\", \"Political Apathy\", \"Authoritarianism\"), 2 : (\"Economy\", \"Political", "}, policy={ \"No Internet\": -10 }) # solve issues for the nation: solver.solve_issues()", "if not installed, run `pip install matplotlib` # CONFIG USER = \"<Insert nation", "census_maximizer as cm import matplotlib.pyplot as plt # if not installed, run `pip", "run `pip install matplotlib` # CONFIG USER = \"<Insert nation name here>\" PASSWORD", "USER = \"<Insert nation name here>\" PASSWORD = \"<<PASSWORD> here>\" CONTACT = \"<Nationstates", "= \"<<PASSWORD> here>\" CONTACT = \"<Nationstates demands that the User Agent contain a", "name here>\" PASSWORD = \"<<PASSWORD> here>\" CONTACT = \"<Nationstates demands that the User", "\"Government Size\", \"Political Apathy\", \"Authoritarianism\"), 2 : (\"Economy\", \"Political Freedom\"), 3 : (\"Civil", "matplotlib` # CONFIG USER = \"<Insert nation name here>\" PASSWORD = \"<<PASSWORD> here>\"", "Freedom\"), 3 : (\"Civil Rights\",), }, policy={ \"No Internet\": -10 }) # solve", "overall weighted census score over time (password is not required for this) plt.plot(*solver.census_score_history())", ": (\"Economy\", \"Political Freedom\"), 3 : (\"Civil Rights\",), }, policy={ \"No Internet\": -10", "2 : (\"Economy\", \"Political Freedom\"), 3 : (\"Civil Rights\",), }, policy={ \"No Internet\":", "nation name here>\" PASSWORD = \"<<PASSWORD> here>\" CONTACT = \"<Nationstates demands that the", "installed, run `pip install matplotlib` # CONFIG USER = \"<Insert nation name here>\"", "is fine>\" cm.init(CONTACT) solver = cm.CensusMaximizer(USER, PASSWORD) solver.adjust_weights(census = { -1 : (\"Wealth", "Agent contain a method of contacting the Script's owner. An email address is", "# CONFIG USER = \"<Insert nation name here>\" PASSWORD = \"<<PASSWORD> here>\" CONTACT", "method of contacting the Script's owner. An email address is fine>\" cm.init(CONTACT) solver", "cm.init(CONTACT) solver = cm.CensusMaximizer(USER, PASSWORD) solver.adjust_weights(census = { -1 : (\"Wealth Gaps\", \"Obesity\",", "contacting the Script's owner. An email address is fine>\" cm.init(CONTACT) solver = cm.CensusMaximizer(USER,", "\"Death Rate\", \"Taxation\"), 0 : (\"Rudeness\", \"Ignorance\", \"Corruption\", \"Government Size\", \"Political Apathy\", \"Authoritarianism\"),", "for the nation: solver.solve_issues() # plot overall weighted census score over time (password", "nation: solver.solve_issues() # plot overall weighted census score over time (password is not", "\"Obesity\", \"Crime\", \"Charmlessness\", \"Primitiveness\", \"Averageness\", \"Death Rate\", \"Taxation\"), 0 : (\"Rudeness\", \"Ignorance\", \"Corruption\",", "demands that the User Agent contain a method of contacting the Script's owner.", "import matplotlib.pyplot as plt # if not installed, run `pip install matplotlib` #", "(\"Civil Rights\",), }, policy={ \"No Internet\": -10 }) # solve issues for the", "\"Ignorance\", \"Corruption\", \"Government Size\", \"Political Apathy\", \"Authoritarianism\"), 2 : (\"Economy\", \"Political Freedom\"), 3", "`pip install matplotlib` # CONFIG USER = \"<Insert nation name here>\" PASSWORD =", "a method of contacting the Script's owner. An email address is fine>\" cm.init(CONTACT)", "not installed, run `pip install matplotlib` # CONFIG USER = \"<Insert nation name", "= \"<Insert nation name here>\" PASSWORD = \"<<PASSWORD> here>\" CONTACT = \"<Nationstates demands", "\"Primitiveness\", \"Averageness\", \"Death Rate\", \"Taxation\"), 0 : (\"Rudeness\", \"Ignorance\", \"Corruption\", \"Government Size\", \"Political", "\"Taxation\"), 0 : (\"Rudeness\", \"Ignorance\", \"Corruption\", \"Government Size\", \"Political Apathy\", \"Authoritarianism\"), 2 :", "PASSWORD = \"<<PASSWORD> here>\" CONTACT = \"<Nationstates demands that the User Agent contain", "\"No Internet\": -10 }) # solve issues for the nation: solver.solve_issues() # plot", "Gaps\", \"Obesity\", \"Crime\", \"Charmlessness\", \"Primitiveness\", \"Averageness\", \"Death Rate\", \"Taxation\"), 0 : (\"Rudeness\", \"Ignorance\",", "Rate\", \"Taxation\"), 0 : (\"Rudeness\", \"Ignorance\", \"Corruption\", \"Government Size\", \"Political Apathy\", \"Authoritarianism\"), 2", "-1 : (\"Wealth Gaps\", \"Obesity\", \"Crime\", \"Charmlessness\", \"Primitiveness\", \"Averageness\", \"Death Rate\", \"Taxation\"), 0", "the Script's owner. An email address is fine>\" cm.init(CONTACT) solver = cm.CensusMaximizer(USER, PASSWORD)", "User Agent contain a method of contacting the Script's owner. An email address", "of contacting the Script's owner. An email address is fine>\" cm.init(CONTACT) solver =", "\"<Insert nation name here>\" PASSWORD = \"<<PASSWORD> here>\" CONTACT = \"<Nationstates demands that", "PASSWORD) solver.adjust_weights(census = { -1 : (\"Wealth Gaps\", \"Obesity\", \"Crime\", \"Charmlessness\", \"Primitiveness\", \"Averageness\",", "\"Corruption\", \"Government Size\", \"Political Apathy\", \"Authoritarianism\"), 2 : (\"Economy\", \"Political Freedom\"), 3 :", "Apathy\", \"Authoritarianism\"), 2 : (\"Economy\", \"Political Freedom\"), 3 : (\"Civil Rights\",), }, policy={", "install matplotlib` # CONFIG USER = \"<Insert nation name here>\" PASSWORD = \"<<PASSWORD>", "\"<Nationstates demands that the User Agent contain a method of contacting the Script's", "as cm import matplotlib.pyplot as plt # if not installed, run `pip install", "fine>\" cm.init(CONTACT) solver = cm.CensusMaximizer(USER, PASSWORD) solver.adjust_weights(census = { -1 : (\"Wealth Gaps\",", "policy={ \"No Internet\": -10 }) # solve issues for the nation: solver.solve_issues() #", "solver.adjust_weights(census = { -1 : (\"Wealth Gaps\", \"Obesity\", \"Crime\", \"Charmlessness\", \"Primitiveness\", \"Averageness\", \"Death", "the nation: solver.solve_issues() # plot overall weighted census score over time (password is", "# solve issues for the nation: solver.solve_issues() # plot overall weighted census score", "email address is fine>\" cm.init(CONTACT) solver = cm.CensusMaximizer(USER, PASSWORD) solver.adjust_weights(census = { -1", "(\"Economy\", \"Political Freedom\"), 3 : (\"Civil Rights\",), }, policy={ \"No Internet\": -10 })", "Size\", \"Political Apathy\", \"Authoritarianism\"), 2 : (\"Economy\", \"Political Freedom\"), 3 : (\"Civil Rights\",),", "import census_maximizer as cm import matplotlib.pyplot as plt # if not installed, run", "}) # solve issues for the nation: solver.solve_issues() # plot overall weighted census", "weighted census score over time (password is not required for this) plt.plot(*solver.census_score_history()) plt.show()", "= \"<Nationstates demands that the User Agent contain a method of contacting the", "issues for the nation: solver.solve_issues() # plot overall weighted census score over time", "CONFIG USER = \"<Insert nation name here>\" PASSWORD = \"<<PASSWORD> here>\" CONTACT =", "(\"Rudeness\", \"Ignorance\", \"Corruption\", \"Government Size\", \"Political Apathy\", \"Authoritarianism\"), 2 : (\"Economy\", \"Political Freedom\"),", "= { -1 : (\"Wealth Gaps\", \"Obesity\", \"Crime\", \"Charmlessness\", \"Primitiveness\", \"Averageness\", \"Death Rate\",", "CONTACT = \"<Nationstates demands that the User Agent contain a method of contacting", "\"Authoritarianism\"), 2 : (\"Economy\", \"Political Freedom\"), 3 : (\"Civil Rights\",), }, policy={ \"No", "cm.CensusMaximizer(USER, PASSWORD) solver.adjust_weights(census = { -1 : (\"Wealth Gaps\", \"Obesity\", \"Crime\", \"Charmlessness\", \"Primitiveness\",", "0 : (\"Rudeness\", \"Ignorance\", \"Corruption\", \"Government Size\", \"Political Apathy\", \"Authoritarianism\"), 2 : (\"Economy\",", "\"Crime\", \"Charmlessness\", \"Primitiveness\", \"Averageness\", \"Death Rate\", \"Taxation\"), 0 : (\"Rudeness\", \"Ignorance\", \"Corruption\", \"Government", "cm import matplotlib.pyplot as plt # if not installed, run `pip install matplotlib`", "solve issues for the nation: solver.solve_issues() # plot overall weighted census score over", "plot overall weighted census score over time (password is not required for this)", "\"<<PASSWORD> here>\" CONTACT = \"<Nationstates demands that the User Agent contain a method", "-10 }) # solve issues for the nation: solver.solve_issues() # plot overall weighted", "owner. An email address is fine>\" cm.init(CONTACT) solver = cm.CensusMaximizer(USER, PASSWORD) solver.adjust_weights(census =", "# plot overall weighted census score over time (password is not required for", "the User Agent contain a method of contacting the Script's owner. An email", "\"Political Apathy\", \"Authoritarianism\"), 2 : (\"Economy\", \"Political Freedom\"), 3 : (\"Civil Rights\",), },", "# if not installed, run `pip install matplotlib` # CONFIG USER = \"<Insert", "\"Charmlessness\", \"Primitiveness\", \"Averageness\", \"Death Rate\", \"Taxation\"), 0 : (\"Rudeness\", \"Ignorance\", \"Corruption\", \"Government Size\",", "= cm.CensusMaximizer(USER, PASSWORD) solver.adjust_weights(census = { -1 : (\"Wealth Gaps\", \"Obesity\", \"Crime\", \"Charmlessness\",", "contain a method of contacting the Script's owner. An email address is fine>\"", "that the User Agent contain a method of contacting the Script's owner. An" ]
[ "# Generic release markers: # X.Y # X.Y.Z # For bugfix releases #", "markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN", "Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. #", "example! \"\"\" # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic", "# Release Candidate # X.Y # Final release # # Dev branch marker", "# X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or", "branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0'", "'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical", "'X.Y.dev0' is the canonical version of 'X.Y.dev' __version__ = \"0.2\" __all__ = ['row_your_boat']", "'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of", "For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release", "coding: utf-8 -*- # Copyright (c) 2015 <NAME> (http://www.jdhp.org) \"\"\" A great package", "great package example! \"\"\" # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ #", "Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate #", "X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release", "# Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate", "compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y", "# X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers:", "-*- # Copyright (c) 2015 <NAME> (http://www.jdhp.org) \"\"\" A great package example! \"\"\"", "# 'X.Y.dev0' is the canonical version of 'X.Y.dev' __version__ = \"0.2\" __all__ =", "version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z", "or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version", "Copyright (c) 2015 <NAME> (http://www.jdhp.org) \"\"\" A great package example! \"\"\" # PEP0440", "# Beta release # X.YrcN # Release Candidate # X.Y # Final release", "# PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers:", "Beta release # X.YrcN # Release Candidate # X.Y # Final release #", "see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z #", "formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y #", "is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' __version__ =", "release # X.YrcN # Release Candidate # X.Y # Final release # #", "X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN'", "releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN", "2015 <NAME> (http://www.jdhp.org) \"\"\" A great package example! \"\"\" # PEP0440 compatible formatted", "<NAME> (http://www.jdhp.org) \"\"\" A great package example! \"\"\" # PEP0440 compatible formatted version,", "(c) 2015 <NAME> (http://www.jdhp.org) \"\"\" A great package example! \"\"\" # PEP0440 compatible", "(http://www.jdhp.org) \"\"\" A great package example! \"\"\" # PEP0440 compatible formatted version, see:", "pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release #", "X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: #", "# X.YaN # Alpha release # X.YbN # Beta release # X.YrcN #", "release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is", "\"\"\" # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release", "# # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN #", "# -*- coding: utf-8 -*- # Copyright (c) 2015 <NAME> (http://www.jdhp.org) \"\"\" A", "Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release", "markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release", "X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final", "# Copyright (c) 2015 <NAME> (http://www.jdhp.org) \"\"\" A great package example! \"\"\" #", "PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: #", "A great package example! \"\"\" # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/", "package example! \"\"\" # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # #", "where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev'", "X.YrcN # Release Candidate # X.Y # Final release # # Dev branch", "\"\"\" A great package example! \"\"\" # PEP0440 compatible formatted version, see: #", "bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release #", "integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' __version__ = \"0.2\" __all__", "Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.dev'", "# # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an", "-*- coding: utf-8 -*- # Copyright (c) 2015 <NAME> (http://www.jdhp.org) \"\"\" A great", "Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N", "is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the", "# For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha", "https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix", "# X.YbN # Beta release # X.YrcN # Release Candidate # X.Y #", "utf-8 -*- # Copyright (c) 2015 <NAME> (http://www.jdhp.org) \"\"\" A great package example!", "# https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For", "an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' __version__ = \"0.2\"", "# Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where", "X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN #", "# # Generic release markers: # X.Y # X.Y.Z # For bugfix releases", "# X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN", "Release Candidate # X.Y # Final release # # Dev branch marker is:", "Generic release markers: # X.Y # X.Y.Z # For bugfix releases # #", "# X.YrcN # Release Candidate # X.Y # Final release # # Dev", "marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is", "release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y", "# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.", "# Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta", "N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' __version__", "release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "from StringIO import StringIO except ImportError: from io import StringIO from distutils.version import", "LooseVersion from requests.exceptions import HTTPError pytestmark = pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'), reason='Needs v12", "task class TestPackageManagementTasks(object): def test_create_task(self, pkg_task): assert pkg_task.operation == \"INSTALL\" assert pkg_task.kind ==", "break assert pkg_task.__dict__['deleted'] def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx): col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert isinstance(col, list)", "0 def test_create_query_task(self, pkg_query_task): assert pkg_query_task.operation == \"QUERY\" assert pkg_query_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate'", "KIND, either express or implied. # See the License for the specific language", "iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' ) yield task @pytest.fixture(scope='function')", "Unless required by applicable law or agreed to in writing, software # distributed", "\\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA def test_load_no_task(self, mgmt_root): with pytest.raises(HTTPError) as err: collection =", "and # limitations under the License. # import pytest try: from StringIO import", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' ) yield task @pytest.fixture(scope='function') def pkg_query_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task =", "assert pkg_task.selfLink == resource.selfLink def test_exists(self, mgmt_root, pkg_task): pid = str(pkg_task.id) collection =", "distutils.version import LooseVersion from requests.exceptions import HTTPError pytestmark = pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'),", "True def test_cancel(self, pkg_task): pkg_task.cancel() assert pkg_task.__dict__['canceled'] def test_delete(self, pkg_task): pkg_task.cancel() while True:", "License. # You may obtain a copy of the License at # #", "io import StringIO from distutils.version import LooseVersion from requests.exceptions import HTTPError pytestmark =", "collection = mgmt_root.shared.iapp.package_management_tasks_s resource = collection.package_management_task.load(id=pkg_task.id) assert pkg_task.id == resource.id assert pkg_task.selfLink ==", "'foo-iapp.rpm' sio = StringIO(80*'a') ftu = mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20) yield fake_iapp_name tpath_name", "pkg_task): pkg_task.cancel() assert pkg_task.__dict__['canceled'] def test_delete(self, pkg_task): pkg_task.cancel() while True: pkg_task.refresh() if pkg_task.status", "permissions and # limitations under the License. # import pytest try: from StringIO", "err: collection = mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load( id='asdasdasd' ) assert err.value.response.status_code == 404 def test_load(self,", "language governing permissions and # limitations under the License. # import pytest try:", "< LooseVersion('12.0.0'), reason='Needs v12 TMOS or greater to pass.' ) @pytest.fixture(scope='function') def iapp_lx(mgmt_root):", "assert pkg_task.operation == \"INSTALL\" assert pkg_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA def test_load_no_task(self,", "mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' ) yield task @pytest.fixture(scope='function') def pkg_query_task(mgmt_root, iapp_lx):", "law or agreed to in writing, software # distributed under the License is", "@pytest.fixture(scope='function') def iapp_lx(mgmt_root): fake_iapp_name = 'foo-iapp.rpm' sio = StringIO(80*'a') ftu = mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio,", "the License for the specific language governing permissions and # limitations under the", "collection.package_management_task.exists(id=pid) assert exists is True def test_cancel(self, pkg_task): pkg_task.cancel() assert pkg_task.__dict__['canceled'] def test_delete(self,", "StringIO from distutils.version import LooseVersion from requests.exceptions import HTTPError pytestmark = pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release'))", "compliance with the License. # You may obtain a copy of the License", "pytest try: from StringIO import StringIO except ImportError: from io import StringIO from", "to pass.' ) @pytest.fixture(scope='function') def iapp_lx(mgmt_root): fake_iapp_name = 'foo-iapp.rpm' sio = StringIO(80*'a') ftu", "@pytest.fixture(scope='function') def pkg_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' )", "def test_exists(self, mgmt_root, pkg_task): pid = str(pkg_task.id) collection = mgmt_root.shared.iapp.package_management_tasks_s exists = collection.package_management_task.exists(id=pid)", "or greater to pass.' ) @pytest.fixture(scope='function') def iapp_lx(mgmt_root): fake_iapp_name = 'foo-iapp.rpm' sio =", "pass.' ) @pytest.fixture(scope='function') def iapp_lx(mgmt_root): fake_iapp_name = 'foo-iapp.rpm' sio = StringIO(80*'a') ftu =", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "== resource.selfLink def test_exists(self, mgmt_root, pkg_task): pid = str(pkg_task.id) collection = mgmt_root.shared.iapp.package_management_tasks_s exists", "this file except in compliance with the License. # You may obtain a", "ImportError: from io import StringIO from distutils.version import LooseVersion from requests.exceptions import HTTPError", "test_load(self, mgmt_root, pkg_task): collection = mgmt_root.shared.iapp.package_management_tasks_s resource = collection.package_management_task.load(id=pkg_task.id) assert pkg_task.id == resource.id", "def test_delete(self, pkg_task): pkg_task.cancel() while True: pkg_task.refresh() if pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']:", "def pkg_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' ) yield", "pkg_task.cancel() while True: pkg_task.refresh() if pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']: pkg_task.delete() break assert", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "assert pkg_task.__dict__['canceled'] def test_delete(self, pkg_task): pkg_task.cancel() while True: pkg_task.refresh() if pkg_task.status in ['CANCELED',", "you may not use this file except in compliance with the License. #", "specific language governing permissions and # limitations under the License. # import pytest", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "# Copyright 2015 F5 Networks Inc. # # Licensed under the Apache License,", "import LooseVersion from requests.exceptions import HTTPError pytestmark = pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'), reason='Needs", "= mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load( id='asdasdasd' ) assert err.value.response.status_code == 404 def test_load(self, mgmt_root, pkg_task):", "from distutils.version import LooseVersion from requests.exceptions import HTTPError pytestmark = pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) <", "pkg_task.refresh() if pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']: pkg_task.delete() break assert pkg_task.__dict__['deleted'] def test_package_mgmt_tasks_collection(self,", "under the License. # import pytest try: from StringIO import StringIO except ImportError:", "ANY KIND, either express or implied. # See the License for the specific", "operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' ) yield task @pytest.fixture(scope='function') def pkg_query_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task", "pkg_task): collection = mgmt_root.shared.iapp.package_management_tasks_s resource = collection.package_management_task.load(id=pkg_task.id) assert pkg_task.id == resource.id assert pkg_task.selfLink", "utilCmdArgs=tpath_name) @pytest.fixture(scope='function') def pkg_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm'", "assert exists is True def test_cancel(self, pkg_task): pkg_task.cancel() assert pkg_task.__dict__['canceled'] def test_delete(self, pkg_task):", "iapp_lx(mgmt_root): fake_iapp_name = 'foo-iapp.rpm' sio = StringIO(80*'a') ftu = mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20)", "# limitations under the License. # import pytest try: from StringIO import StringIO", "in compliance with the License. # You may obtain a copy of the", "collection = mgmt_root.shared.iapp.package_management_tasks_s exists = collection.package_management_task.exists(id=pid) assert exists is True def test_cancel(self, pkg_task):", "def test_cancel(self, pkg_task): pkg_task.cancel() assert pkg_task.__dict__['canceled'] def test_delete(self, pkg_task): pkg_task.cancel() while True: pkg_task.refresh()", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the", "# NOQA def test_load_no_task(self, mgmt_root): with pytest.raises(HTTPError) as err: collection = mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load(", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "test_create_query_task(self, pkg_query_task): assert pkg_query_task.operation == \"QUERY\" assert pkg_query_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA", "use this file except in compliance with the License. # You may obtain", "= mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='QUERY' ) yield task class TestPackageManagementTasks(object): def test_create_task(self,", "fake_iapp_name, chunk_size=20) yield fake_iapp_name tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name) @pytest.fixture(scope='function') def pkg_task(mgmt_root, iapp_lx):", "= '/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name) @pytest.fixture(scope='function') def pkg_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task =", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']: pkg_task.delete() break assert pkg_task.__dict__['deleted'] def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx):", "'FINISHED']: pkg_task.delete() break assert pkg_task.__dict__['deleted'] def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx): col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert", "not use this file except in compliance with the License. # You may", "pkg_task.__dict__['deleted'] def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx): col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert isinstance(col, list) assert len(col)", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "License. # import pytest try: from StringIO import StringIO except ImportError: from io", "fake_iapp_name tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name) @pytest.fixture(scope='function') def pkg_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s", "mgmt_root, iapp_lx): col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert isinstance(col, list) assert len(col) > 0 def", "pkg_task): assert pkg_task.operation == \"INSTALL\" assert pkg_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA def", "pytestmark = pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'), reason='Needs v12 TMOS or greater to pass.'", "with pytest.raises(HTTPError) as err: collection = mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load( id='asdasdasd' ) assert err.value.response.status_code ==", "StringIO except ImportError: from io import StringIO from distutils.version import LooseVersion from requests.exceptions", "\"INSTALL\" assert pkg_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA def test_load_no_task(self, mgmt_root): with pytest.raises(HTTPError)", "See the License for the specific language governing permissions and # limitations under", "assert pkg_task.id == resource.id assert pkg_task.selfLink == resource.selfLink def test_exists(self, mgmt_root, pkg_task): pid", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "import StringIO from distutils.version import LooseVersion from requests.exceptions import HTTPError pytestmark = pytest.mark.skipif(", "collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' ) yield task @pytest.fixture(scope='function') def", "License, Version 2.0 (the \"License\"); # you may not use this file except", "operation='QUERY' ) yield task class TestPackageManagementTasks(object): def test_create_task(self, pkg_task): assert pkg_task.operation == \"INSTALL\"", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA def test_load_no_task(self, mgmt_root): with pytest.raises(HTTPError) as err: collection = mgmt_root.shared.iapp.package_management_tasks_s", "= collection.package_management_task.load(id=pkg_task.id) assert pkg_task.id == resource.id assert pkg_task.selfLink == resource.selfLink def test_exists(self, mgmt_root,", "pkg_task.cancel() assert pkg_task.__dict__['canceled'] def test_delete(self, pkg_task): pkg_task.cancel() while True: pkg_task.refresh() if pkg_task.status in", "mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert isinstance(col, list) assert len(col) > 0 def test_create_query_task(self, pkg_query_task): assert pkg_query_task.operation", "exists = collection.package_management_task.exists(id=pid) assert exists is True def test_cancel(self, pkg_task): pkg_task.cancel() assert pkg_task.__dict__['canceled']", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "limitations under the License. # import pytest try: from StringIO import StringIO except", "mgmt_root, pkg_task): pid = str(pkg_task.id) collection = mgmt_root.shared.iapp.package_management_tasks_s exists = collection.package_management_task.exists(id=pid) assert exists", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "the License. # import pytest try: from StringIO import StringIO except ImportError: from", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "yield task class TestPackageManagementTasks(object): def test_create_task(self, pkg_task): assert pkg_task.operation == \"INSTALL\" assert pkg_task.kind", "pkg_query_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='QUERY' ) yield task class", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "pkg_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' ) yield task", "if pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']: pkg_task.delete() break assert pkg_task.__dict__['deleted'] def test_package_mgmt_tasks_collection(self, mgmt_root,", "yield fake_iapp_name tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name) @pytest.fixture(scope='function') def pkg_task(mgmt_root, iapp_lx): collection =", "Networks Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "assert pkg_task.__dict__['deleted'] def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx): col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert isinstance(col, list) assert", "OF ANY KIND, either express or implied. # See the License for the", "as err: collection = mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load( id='asdasdasd' ) assert err.value.response.status_code == 404 def", "pkg_task.id == resource.id assert pkg_task.selfLink == resource.selfLink def test_exists(self, mgmt_root, pkg_task): pid =", "2.0 (the \"License\"); # you may not use this file except in compliance", "# you may not use this file except in compliance with the License.", "test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx): col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert isinstance(col, list) assert len(col) > 0", "= 'foo-iapp.rpm' sio = StringIO(80*'a') ftu = mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20) yield fake_iapp_name", "try: from StringIO import StringIO except ImportError: from io import StringIO from distutils.version", "tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name) @pytest.fixture(scope='function') def pkg_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task", "for the specific language governing permissions and # limitations under the License. #", "TestPackageManagementTasks(object): def test_create_task(self, pkg_task): assert pkg_task.operation == \"INSTALL\" assert pkg_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate'", "agreed to in writing, software # distributed under the License is distributed on", "greater to pass.' ) @pytest.fixture(scope='function') def iapp_lx(mgmt_root): fake_iapp_name = 'foo-iapp.rpm' sio = StringIO(80*'a')", "collection.package_management_task.load( id='asdasdasd' ) assert err.value.response.status_code == 404 def test_load(self, mgmt_root, pkg_task): collection =", "col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert isinstance(col, list) assert len(col) > 0 def test_create_query_task(self, pkg_query_task):", "the specific language governing permissions and # limitations under the License. # import", "StringIO(80*'a') ftu = mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20) yield fake_iapp_name tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run',", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "except ImportError: from io import StringIO from distutils.version import LooseVersion from requests.exceptions import", "resource = collection.package_management_task.load(id=pkg_task.id) assert pkg_task.id == resource.id assert pkg_task.selfLink == resource.selfLink def test_exists(self,", "str(pkg_task.id) collection = mgmt_root.shared.iapp.package_management_tasks_s exists = collection.package_management_task.exists(id=pid) assert exists is True def test_cancel(self,", "= mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' ) yield task @pytest.fixture(scope='function') def pkg_query_task(mgmt_root,", "(the \"License\"); # you may not use this file except in compliance with", "iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='QUERY' ) yield task class TestPackageManagementTasks(object):", "LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'), reason='Needs v12 TMOS or greater to pass.' ) @pytest.fixture(scope='function') def", "= mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert isinstance(col, list) assert len(col) > 0 def test_create_query_task(self, pkg_query_task): assert", "pkg_task.__dict__['canceled'] def test_delete(self, pkg_task): pkg_task.cancel() while True: pkg_task.refresh() if pkg_task.status in ['CANCELED', 'FAILED',", "# # Unless required by applicable law or agreed to in writing, software", "def pkg_query_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='QUERY' ) yield task", "pkg_task): pid = str(pkg_task.id) collection = mgmt_root.shared.iapp.package_management_tasks_s exists = collection.package_management_task.exists(id=pid) assert exists is", "= pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'), reason='Needs v12 TMOS or greater to pass.' )", "NOQA def test_load_no_task(self, mgmt_root): with pytest.raises(HTTPError) as err: collection = mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load( id='asdasdasd'", "== \"INSTALL\" assert pkg_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA def test_load_no_task(self, mgmt_root): with", "= mgmt_root.shared.iapp.package_management_tasks_s exists = collection.package_management_task.exists(id=pid) assert exists is True def test_cancel(self, pkg_task): pkg_task.cancel()", "ftu = mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20) yield fake_iapp_name tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name)", "express or implied. # See the License for the specific language governing permissions", "pkg_task.selfLink == resource.selfLink def test_exists(self, mgmt_root, pkg_task): pid = str(pkg_task.id) collection = mgmt_root.shared.iapp.package_management_tasks_s", "is True def test_cancel(self, pkg_task): pkg_task.cancel() assert pkg_task.__dict__['canceled'] def test_delete(self, pkg_task): pkg_task.cancel() while", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "mgmt_root.shared.iapp.package_management_tasks_s resource = collection.package_management_task.load(id=pkg_task.id) assert pkg_task.id == resource.id assert pkg_task.selfLink == resource.selfLink def", "except in compliance with the License. # You may obtain a copy of", "mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='QUERY' ) yield task class TestPackageManagementTasks(object): def test_create_task(self, pkg_task):", "by applicable law or agreed to in writing, software # distributed under the", "collection.package_management_task.create( operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' ) yield task @pytest.fixture(scope='function') def pkg_query_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx): col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert isinstance(col, list) assert len(col) >", "assert pkg_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA def test_load_no_task(self, mgmt_root): with pytest.raises(HTTPError) as", "either express or implied. # See the License for the specific language governing", "reason='Needs v12 TMOS or greater to pass.' ) @pytest.fixture(scope='function') def iapp_lx(mgmt_root): fake_iapp_name =", "err.value.response.status_code == 404 def test_load(self, mgmt_root, pkg_task): collection = mgmt_root.shared.iapp.package_management_tasks_s resource = collection.package_management_task.load(id=pkg_task.id)", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "import HTTPError pytestmark = pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'), reason='Needs v12 TMOS or greater", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "resource.id assert pkg_task.selfLink == resource.selfLink def test_exists(self, mgmt_root, pkg_task): pid = str(pkg_task.id) collection", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'), reason='Needs v12 TMOS or greater to pass.' ) @pytest.fixture(scope='function')", "collection = mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load( id='asdasdasd' ) assert err.value.response.status_code == 404 def test_load(self, mgmt_root,", "404 def test_load(self, mgmt_root, pkg_task): collection = mgmt_root.shared.iapp.package_management_tasks_s resource = collection.package_management_task.load(id=pkg_task.id) assert pkg_task.id", "in ['CANCELED', 'FAILED', 'FINISHED']: pkg_task.delete() break assert pkg_task.__dict__['deleted'] def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx): col", "= mgmt_root.shared.iapp.package_management_tasks_s resource = collection.package_management_task.load(id=pkg_task.id) assert pkg_task.id == resource.id assert pkg_task.selfLink == resource.selfLink", "task = collection.package_management_task.create( operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' ) yield task @pytest.fixture(scope='function') def pkg_query_task(mgmt_root, iapp_lx): collection", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "mgmt_root.shared.iapp.package_management_tasks_s exists = collection.package_management_task.exists(id=pid) assert exists is True def test_cancel(self, pkg_task): pkg_task.cancel() assert", "test_create_task(self, pkg_task): assert pkg_task.operation == \"INSTALL\" assert pkg_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA", "pkg_task): pkg_task.cancel() while True: pkg_task.refresh() if pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']: pkg_task.delete() break", "file except in compliance with the License. # You may obtain a copy", "TMOS or greater to pass.' ) @pytest.fixture(scope='function') def iapp_lx(mgmt_root): fake_iapp_name = 'foo-iapp.rpm' sio", "from io import StringIO from distutils.version import LooseVersion from requests.exceptions import HTTPError pytestmark", "sio = StringIO(80*'a') ftu = mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20) yield fake_iapp_name tpath_name =", "task @pytest.fixture(scope='function') def pkg_query_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='QUERY' )", "task = collection.package_management_task.create( operation='QUERY' ) yield task class TestPackageManagementTasks(object): def test_create_task(self, pkg_task): assert", "= collection.package_management_task.create( operation='QUERY' ) yield task class TestPackageManagementTasks(object): def test_create_task(self, pkg_task): assert pkg_task.operation", "v12 TMOS or greater to pass.' ) @pytest.fixture(scope='function') def iapp_lx(mgmt_root): fake_iapp_name = 'foo-iapp.rpm'", "iapp_lx): col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert isinstance(col, list) assert len(col) > 0 def test_create_query_task(self,", "mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20) yield fake_iapp_name tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name) @pytest.fixture(scope='function') def", "test_cancel(self, pkg_task): pkg_task.cancel() assert pkg_task.__dict__['canceled'] def test_delete(self, pkg_task): pkg_task.cancel() while True: pkg_task.refresh() if", "True: pkg_task.refresh() if pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']: pkg_task.delete() break assert pkg_task.__dict__['deleted'] def", "import pytest try: from StringIO import StringIO except ImportError: from io import StringIO", "fake_iapp_name = 'foo-iapp.rpm' sio = StringIO(80*'a') ftu = mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20) yield", ") yield task @pytest.fixture(scope='function') def pkg_query_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create(", "@pytest.fixture(scope='function') def pkg_query_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='QUERY' ) yield", ") assert err.value.response.status_code == 404 def test_load(self, mgmt_root, pkg_task): collection = mgmt_root.shared.iapp.package_management_tasks_s resource", "assert len(col) > 0 def test_create_query_task(self, pkg_query_task): assert pkg_query_task.operation == \"QUERY\" assert pkg_query_task.kind", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "from requests.exceptions import HTTPError pytestmark = pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'), reason='Needs v12 TMOS", "collection.package_management_task.load(id=pkg_task.id) assert pkg_task.id == resource.id assert pkg_task.selfLink == resource.selfLink def test_exists(self, mgmt_root, pkg_task):", "def test_create_task(self, pkg_task): assert pkg_task.operation == \"INSTALL\" assert pkg_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' #", "License for the specific language governing permissions and # limitations under the License.", "= mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20) yield fake_iapp_name tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name) @pytest.fixture(scope='function')", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "test_exists(self, mgmt_root, pkg_task): pid = str(pkg_task.id) collection = mgmt_root.shared.iapp.package_management_tasks_s exists = collection.package_management_task.exists(id=pid) assert", "ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20) yield fake_iapp_name tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name) @pytest.fixture(scope='function') def pkg_task(mgmt_root,", "['CANCELED', 'FAILED', 'FINISHED']: pkg_task.delete() break assert pkg_task.__dict__['deleted'] def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx): col =", "the License. # You may obtain a copy of the License at #", "collection.package_management_task.create( operation='QUERY' ) yield task class TestPackageManagementTasks(object): def test_create_task(self, pkg_task): assert pkg_task.operation ==", "pkg_task.operation == \"INSTALL\" assert pkg_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA def test_load_no_task(self, mgmt_root):", "mgmt_root, pkg_task): collection = mgmt_root.shared.iapp.package_management_tasks_s resource = collection.package_management_task.load(id=pkg_task.id) assert pkg_task.id == resource.id assert", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "assert isinstance(col, list) assert len(col) > 0 def test_create_query_task(self, pkg_query_task): assert pkg_query_task.operation ==", "len(col) > 0 def test_create_query_task(self, pkg_query_task): assert pkg_query_task.operation == \"QUERY\" assert pkg_query_task.kind ==", "def test_load(self, mgmt_root, pkg_task): collection = mgmt_root.shared.iapp.package_management_tasks_s resource = collection.package_management_task.load(id=pkg_task.id) assert pkg_task.id ==", "mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name) @pytest.fixture(scope='function') def pkg_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='INSTALL',", "yield task @pytest.fixture(scope='function') def pkg_query_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='QUERY'", "= StringIO(80*'a') ftu = mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20) yield fake_iapp_name tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name)", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "'/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name) @pytest.fixture(scope='function') def pkg_task(mgmt_root, iapp_lx): collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create(", "implied. # See the License for the specific language governing permissions and #", "while True: pkg_task.refresh() if pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']: pkg_task.delete() break assert pkg_task.__dict__['deleted']", "Copyright 2015 F5 Networks Inc. # # Licensed under the Apache License, Version", "list) assert len(col) > 0 def test_create_query_task(self, pkg_query_task): assert pkg_query_task.operation == \"QUERY\" assert", "\"License\"); # you may not use this file except in compliance with the", "# import pytest try: from StringIO import StringIO except ImportError: from io import", "pid = str(pkg_task.id) collection = mgmt_root.shared.iapp.package_management_tasks_s exists = collection.package_management_task.exists(id=pid) assert exists is True", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "LooseVersion('12.0.0'), reason='Needs v12 TMOS or greater to pass.' ) @pytest.fixture(scope='function') def iapp_lx(mgmt_root): fake_iapp_name", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "test_delete(self, pkg_task): pkg_task.cancel() while True: pkg_task.refresh() if pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']: pkg_task.delete()", "required by applicable law or agreed to in writing, software # distributed under", "def test_load_no_task(self, mgmt_root): with pytest.raises(HTTPError) as err: collection = mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load( id='asdasdasd' )", "pkg_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA def test_load_no_task(self, mgmt_root): with pytest.raises(HTTPError) as err:", "== \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA def test_load_no_task(self, mgmt_root): with pytest.raises(HTTPError) as err: collection", "mgmt_root): with pytest.raises(HTTPError) as err: collection = mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load( id='asdasdasd' ) assert err.value.response.status_code", "== resource.id assert pkg_task.selfLink == resource.selfLink def test_exists(self, mgmt_root, pkg_task): pid = str(pkg_task.id)", "applicable law or agreed to in writing, software # distributed under the License", "chunk_size=20) yield fake_iapp_name tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name) mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name) @pytest.fixture(scope='function') def pkg_task(mgmt_root, iapp_lx): collection", "resource.selfLink def test_exists(self, mgmt_root, pkg_task): pid = str(pkg_task.id) collection = mgmt_root.shared.iapp.package_management_tasks_s exists =", "id='asdasdasd' ) assert err.value.response.status_code == 404 def test_load(self, mgmt_root, pkg_task): collection = mgmt_root.shared.iapp.package_management_tasks_s", "mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load( id='asdasdasd' ) assert err.value.response.status_code == 404 def test_load(self, mgmt_root, pkg_task): collection", "StringIO import StringIO except ImportError: from io import StringIO from distutils.version import LooseVersion", "== 404 def test_load(self, mgmt_root, pkg_task): collection = mgmt_root.shared.iapp.package_management_tasks_s resource = collection.package_management_task.load(id=pkg_task.id) assert", "def test_create_query_task(self, pkg_query_task): assert pkg_query_task.operation == \"QUERY\" assert pkg_query_task.kind == \\ 'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' #", "> 0 def test_create_query_task(self, pkg_query_task): assert pkg_query_task.operation == \"QUERY\" assert pkg_query_task.kind == \\", "isinstance(col, list) assert len(col) > 0 def test_create_query_task(self, pkg_query_task): assert pkg_query_task.operation == \"QUERY\"", "or agreed to in writing, software # distributed under the License is distributed", "pytest.raises(HTTPError) as err: collection = mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load( id='asdasdasd' ) assert err.value.response.status_code == 404", "'FAILED', 'FINISHED']: pkg_task.delete() break assert pkg_task.__dict__['deleted'] def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx): col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection()", "or implied. # See the License for the specific language governing permissions and", "= collection.package_management_task.create( operation='INSTALL', packageFilePath='/var/config/rest/downloads/foo-iapp.rpm' ) yield task @pytest.fixture(scope='function') def pkg_query_task(mgmt_root, iapp_lx): collection =", "= str(pkg_task.id) collection = mgmt_root.shared.iapp.package_management_tasks_s exists = collection.package_management_task.exists(id=pid) assert exists is True def", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "import StringIO except ImportError: from io import StringIO from distutils.version import LooseVersion from", "HTTPError pytestmark = pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'), reason='Needs v12 TMOS or greater to", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "governing permissions and # limitations under the License. # import pytest try: from", "collection = mgmt_root.shared.iapp.package_management_tasks_s task = collection.package_management_task.create( operation='QUERY' ) yield task class TestPackageManagementTasks(object): def", "test_load_no_task(self, mgmt_root): with pytest.raises(HTTPError) as err: collection = mgmt_root.shared.iapp.package_management_tasks_s collection.package_management_task.load( id='asdasdasd' ) assert", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "requests.exceptions import HTTPError pytestmark = pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'), reason='Needs v12 TMOS or", "= collection.package_management_task.exists(id=pid) assert exists is True def test_cancel(self, pkg_task): pkg_task.cancel() assert pkg_task.__dict__['canceled'] def", "exists is True def test_cancel(self, pkg_task): pkg_task.cancel() assert pkg_task.__dict__['canceled'] def test_delete(self, pkg_task): pkg_task.cancel()", ") @pytest.fixture(scope='function') def iapp_lx(mgmt_root): fake_iapp_name = 'foo-iapp.rpm' sio = StringIO(80*'a') ftu = mgmt_root.shared.file_transfer.uploads", "2015 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0", "with the License. # You may obtain a copy of the License at", "assert err.value.response.status_code == 404 def test_load(self, mgmt_root, pkg_task): collection = mgmt_root.shared.iapp.package_management_tasks_s resource =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "def iapp_lx(mgmt_root): fake_iapp_name = 'foo-iapp.rpm' sio = StringIO(80*'a') ftu = mgmt_root.shared.file_transfer.uploads ftu.upload_stringio(sio, fake_iapp_name,", ") yield task class TestPackageManagementTasks(object): def test_create_task(self, pkg_task): assert pkg_task.operation == \"INSTALL\" assert", "pkg_task.delete() break assert pkg_task.__dict__['deleted'] def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx): col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection() assert isinstance(col,", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "class TestPackageManagementTasks(object): def test_create_task(self, pkg_task): assert pkg_task.operation == \"INSTALL\" assert pkg_task.kind == \\" ]
[ "draw_branch(branch_length-10, pen_size) turtle.right(20) turtle.backward(branch_length) def main(): # 画笔起始位置 turtle.right(90) turtle.penup() turtle.forward(300) turtle.pendown() turtle.left(90)", "# 画笔起始位置 turtle.right(90) turtle.penup() turtle.forward(300) turtle.pendown() turtle.left(90) turtle.left(90) draw_branch(100, 5) turtle.exitonclick() if __name__", "画笔起始位置 turtle.right(90) turtle.penup() turtle.forward(300) turtle.pendown() turtle.left(90) turtle.left(90) draw_branch(100, 5) turtle.exitonclick() if __name__ ==", "turtle.right(90) turtle.penup() turtle.forward(300) turtle.pendown() turtle.left(90) turtle.left(90) draw_branch(100, 5) turtle.exitonclick() if __name__ == '__main__':", "日期:2018/08/19 \"\"\" import turtle def draw_branch(branch_length, pen_size): if(branch_length > 0): turtle.forward(branch_length) turtle.right(20) draw_branch(branch_length-10,", "turtle.backward(branch_length) def main(): # 画笔起始位置 turtle.right(90) turtle.penup() turtle.forward(300) turtle.pendown() turtle.left(90) turtle.left(90) draw_branch(100, 5)", "def main(): # 画笔起始位置 turtle.right(90) turtle.penup() turtle.forward(300) turtle.pendown() turtle.left(90) turtle.left(90) draw_branch(100, 5) turtle.exitonclick()", "import turtle def draw_branch(branch_length, pen_size): if(branch_length > 0): turtle.forward(branch_length) turtle.right(20) draw_branch(branch_length-10, pen_size) turtle.left(40)", "def draw_branch(branch_length, pen_size): if(branch_length > 0): turtle.forward(branch_length) turtle.right(20) draw_branch(branch_length-10, pen_size) turtle.left(40) draw_branch(branch_length-10, pen_size)", "turtle.right(20) turtle.backward(branch_length) def main(): # 画笔起始位置 turtle.right(90) turtle.penup() turtle.forward(300) turtle.pendown() turtle.left(90) turtle.left(90) draw_branch(100,", "版本:1.0 日期:2018/08/19 \"\"\" import turtle def draw_branch(branch_length, pen_size): if(branch_length > 0): turtle.forward(branch_length) turtle.right(20)", "turtle.right(20) draw_branch(branch_length-10, pen_size) turtle.left(40) draw_branch(branch_length-10, pen_size) turtle.right(20) turtle.backward(branch_length) def main(): # 画笔起始位置 turtle.right(90)", "draw_branch(branch_length, pen_size): if(branch_length > 0): turtle.forward(branch_length) turtle.right(20) draw_branch(branch_length-10, pen_size) turtle.left(40) draw_branch(branch_length-10, pen_size) turtle.right(20)", "pen_size) turtle.right(20) turtle.backward(branch_length) def main(): # 画笔起始位置 turtle.right(90) turtle.penup() turtle.forward(300) turtle.pendown() turtle.left(90) turtle.left(90)", "> 0): turtle.forward(branch_length) turtle.right(20) draw_branch(branch_length-10, pen_size) turtle.left(40) draw_branch(branch_length-10, pen_size) turtle.right(20) turtle.backward(branch_length) def main():", "0): turtle.forward(branch_length) turtle.right(20) draw_branch(branch_length-10, pen_size) turtle.left(40) draw_branch(branch_length-10, pen_size) turtle.right(20) turtle.backward(branch_length) def main(): #", "main(): # 画笔起始位置 turtle.right(90) turtle.penup() turtle.forward(300) turtle.pendown() turtle.left(90) turtle.left(90) draw_branch(100, 5) turtle.exitonclick() if", "if(branch_length > 0): turtle.forward(branch_length) turtle.right(20) draw_branch(branch_length-10, pen_size) turtle.left(40) draw_branch(branch_length-10, pen_size) turtle.right(20) turtle.backward(branch_length) def", "pen_size): if(branch_length > 0): turtle.forward(branch_length) turtle.right(20) draw_branch(branch_length-10, pen_size) turtle.left(40) draw_branch(branch_length-10, pen_size) turtle.right(20) turtle.backward(branch_length)", "draw_branch(branch_length-10, pen_size) turtle.left(40) draw_branch(branch_length-10, pen_size) turtle.right(20) turtle.backward(branch_length) def main(): # 画笔起始位置 turtle.right(90) turtle.penup()", "turtle def draw_branch(branch_length, pen_size): if(branch_length > 0): turtle.forward(branch_length) turtle.right(20) draw_branch(branch_length-10, pen_size) turtle.left(40) draw_branch(branch_length-10,", "pen_size) turtle.left(40) draw_branch(branch_length-10, pen_size) turtle.right(20) turtle.backward(branch_length) def main(): # 画笔起始位置 turtle.right(90) turtle.penup() turtle.forward(300)", "功能:分形树 版本:1.0 日期:2018/08/19 \"\"\" import turtle def draw_branch(branch_length, pen_size): if(branch_length > 0): turtle.forward(branch_length)", "turtle.left(40) draw_branch(branch_length-10, pen_size) turtle.right(20) turtle.backward(branch_length) def main(): # 画笔起始位置 turtle.right(90) turtle.penup() turtle.forward(300) turtle.pendown()", "turtle.forward(branch_length) turtle.right(20) draw_branch(branch_length-10, pen_size) turtle.left(40) draw_branch(branch_length-10, pen_size) turtle.right(20) turtle.backward(branch_length) def main(): # 画笔起始位置", "turtle.penup() turtle.forward(300) turtle.pendown() turtle.left(90) turtle.left(90) draw_branch(100, 5) turtle.exitonclick() if __name__ == '__main__': main()", "\"\"\" import turtle def draw_branch(branch_length, pen_size): if(branch_length > 0): turtle.forward(branch_length) turtle.right(20) draw_branch(branch_length-10, pen_size)", "\"\"\" 功能:分形树 版本:1.0 日期:2018/08/19 \"\"\" import turtle def draw_branch(branch_length, pen_size): if(branch_length > 0):" ]
[ "# # Permission is hereby granted, free of charge, to any person #", "= False if version < 8: self.mouse_move = False if version < 9:", "do a performance test on startup? self.performance_test = True # The language we", "anymore.) self.mute = { } # Joystick mappings. self.joymap = dict() # The", "False) def get_all_mixers(self): return renpy.audio.music.get_all_mixers() def __eq__(self, other): return vars(self) == vars(other) renpy.game.Preferences", "OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING", "= \"auto\" # Should we do a performance test on startup? self.performance_test =", "\"button_select\" ], \"pad_a_press\" : [ \"dismiss\", \"button_select\" ], \"pad_b_press\" : [ \"button_alternate\" ],", "# Permission is hereby granted, free of charge, to any person # obtaining", "(not mute) and (self.volumes.get(mixer, 1.0) == 0.0): self.volumes[mixer] = 1.0 def get_mute(self, mixer):", "to provided displayables if possible? self.video_image_fallback = False self.skip_after_choices = False # Mixer", "(the \"Software\"), to deal in the Software without restriction, # including without limitation", "[ \"game_menu\", ], \"pad_start_press\" : [ \"game_menu\", ], \"pad_y_press\" : [ \"hide_windows\", ],", "[ \"game_menu\", ], \"pad_y_press\" : [ \"hide_windows\", ], \"pad_rightshoulder_press\" : [ \"rollforward\", ],", "# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "\"pad_a_press\" : [ \"dismiss\", \"button_select\" ], \"pad_b_press\" : [ \"button_alternate\" ], \"pad_dleft_press\" :", "[ \"focus_right\", \"bar_right\" ], \"pad_rightx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_dpup_press\" : [", "# Should we self-voice? self.self_voicing = False # Should we emphasize audio? self.emphasize_audio", "# obtaining a copy of this software and associated documentation files # (the", "[ \"focus_down\", \"bar_down\" ], \"pad_lefty_pos\" : [ \"focus_down\", \"bar_down\" ], \"pad_righty_pos\" : [", "not in self.volumes: return False return self.mute[mixer] def init_mixers(self): for i in renpy.audio.music.get_all_mixers():", "# and to permit persons to whom the Software is furnished to do", "= 1.0 def get_mute(self, mixer): if mixer not in self.volumes: return False return", "= True if version < 13: self.self_voicing = False if version < 14:", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION #", "self.mute[mixer] = False self.volumes[mixer] = volume def get_volume(self, mixer): if mixer not in", "= True # Should we disengage auto-forward mode after a click? self.afm_after_click =", "= False if version < 15: self.pad_enabled = True if version < 17:", "transitions. # 1 - Only non-default transitions. # 0 - No transitions. self.transitions", "do so, # subject to the following conditions: # # The above copyright", "\"game_menu\", ], \"pad_y_press\" : [ \"hide_windows\", ], \"pad_rightshoulder_press\" : [ \"rollforward\", ], \"pad_righttrigger_press\"", "\"pad_lefty_pos\" : [ \"focus_down\", \"bar_down\" ], \"pad_righty_pos\" : [ \"focus_down\", \"bar_down\" ], }", "], \"pad_lefty_pos\" : [ \"focus_down\", \"bar_down\" ], \"pad_righty_pos\" : [ \"focus_down\", \"bar_down\" ],", "= None if version < 4: self.renderer = \"auto\" self.performance_test = True if", "self.volumes[mixer] def set_mute(self, mixer, mute): self.mute[mixer] = mute if (not mute) and (self.volumes.get(mixer,", ": [ \"focus_right\", \"bar_right\" ], \"pad_leftx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_rightx_pos\" :", "not in self.volumes: return 0.0 if self.mute.get(mixer, False): return 0.0 return self.volumes[mixer] def", "< 1: self.mute_volumes = 0 if version < 2: self.using_afm_enable = False if", "for translations. self.language = None # Should we self-voice? self.self_voicing = False #", "def __init__(self): self.fullscreen = False self.skip_unseen = False self.text_cps = 0 self.afm_time =", "set_volume(self, mixer, volume): if volume != 0: self.mute[mixer] = False self.volumes[mixer] = volume", "self.desktop_rollback_side = \"disable\" def set_volume(self, mixer, volume): if volume != 0: self.mute[mixer] =", "return renpy.audio.music.get_all_mixers() def __eq__(self, other): return vars(self) == vars(other) renpy.game.Preferences = Preferences renpy.game.preferences", "without restriction, # including without limitation the rights to use, copy, modify, merge,", "conditions: # # The above copyright notice and this permission notice shall be", "\"focus_right\", \"bar_right\" ], \"pad_leftx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_rightx_pos\" : [ \"focus_right\",", "without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense,", "\"pad_leftshoulder_press\" : [ \"rollback\", ], \"pad_lefttrigger_pos\" : [ \"rollback\", ], \"pad_back_press\" : [", "USE import renpy.audio pad_bindings = { \"pad_leftshoulder_press\" : [ \"rollback\", ], \"pad_lefttrigger_pos\" :", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "\"bar_up\" ], \"pad_lefty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_righty_neg\" : [ \"focus_up\", \"bar_up\"", "we disengage auto-forward mode after a click? self.afm_after_click = False # 2 -", "All transitions. # 1 - Only non-default transitions. # 0 - No transitions.", "\"rollback\", ], \"pad_lefttrigger_pos\" : [ \"rollback\", ], \"pad_back_press\" : [ \"rollback\", ], \"pad_guide_press\"", "mixer, volume): if volume != 0: self.mute[mixer] = False self.volumes[mixer] = volume def", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR", "No transitions. self.transitions = 2 # Should video sprites always default to provided", "the current volume (between 0 and 1). self.volumes = { } # True", "def get_all_mixers(self): return renpy.audio.music.get_all_mixers() def __eq__(self, other): return vars(self) == vars(other) renpy.game.Preferences =", "KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "# WITH THE SOFTWARE OR THE USE import renpy.audio pad_bindings = { \"pad_leftshoulder_press\"", "this permission notice shall be # included in all copies or substantial portions", "virtual size at the time self.physical_size was set. self.virtual_size = None # The", "of charge, to any person # obtaining a copy of this software and", "\"bar_right\" ], \"pad_dpup_press\" : [ \"focus_up\", \"bar_up\" ], \"pad_lefty_neg\" : [ \"focus_up\", \"bar_up\"", "self.mute.setdefault(i, False) def get_all_mixers(self): return renpy.audio.music.get_all_mixers() def __eq__(self, other): return vars(self) == vars(other)", "the voice to stop? self.wait_voice = True # Should we disengage auto-forward mode", "\"dismiss\", \"button_select\" ], \"pad_b_press\" : [ \"button_alternate\" ], \"pad_dleft_press\" : [ \"focus_left\", \"bar_left\"", "in self.volumes: return 0.0 if self.mute.get(mixer, False): return 0.0 return self.volumes[mixer] def set_mute(self,", "= False if version < 9: self.afm_after_click = False if version < 11:", "for the voice to stop? self.wait_voice = True # Should we disengage auto-forward", "size of the window, or None if we don't know it yet. self.physical_size", ": [ \"focus_up\", \"bar_up\" ], \"pad_lefty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_righty_neg\" :", "person # obtaining a copy of this software and associated documentation files #", "], \"pad_start_press\" : [ \"game_menu\", ], \"pad_y_press\" : [ \"hide_windows\", ], \"pad_rightshoulder_press\" :", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION #", "OR IN CONNECTION # WITH THE SOFTWARE OR THE USE import renpy.audio pad_bindings", "we use. self.renderer = \"auto\" # Should we do a performance test on", "\"bar_left\" ], \"pad_rightx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_dpright_press\" : [ \"focus_right\", \"bar_right\"", "\"bar_left\" ], \"pad_leftx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_rightx_neg\" : [ \"focus_left\", \"bar_left\"", "], \"pad_rightx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_dpup_press\" : [ \"focus_up\", \"bar_up\" ],", "= False if version < 14: self.emphasize_audio = False if version < 15:", "any person # obtaining a copy of this software and associated documentation files", "version < 7: self.voice_sustain = False if version < 8: self.mouse_move = False", "self.afm_after_click = False # 2 - All transitions. # 1 - Only non-default", "HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "self.mute[mixer] def init_mixers(self): for i in renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i, 1.0) self.mute.setdefault(i, False) def get_all_mixers(self):", "False if version < 15: self.pad_enabled = True if version < 17: self.init_rollback_side()", "self.renderer = \"auto\" # Should we do a performance test on startup? self.performance_test", "we self-voice? self.self_voicing = False # Should we emphasize audio? self.emphasize_audio = False", "merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and", "to the following conditions: # # The above copyright notice and this permission", "\"pad_dpright_press\" : [ \"focus_right\", \"bar_right\" ], \"pad_leftx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_rightx_pos\"", "# Should we disengage auto-forward mode after a click? self.afm_after_click = False #", "= True if version < 17: self.init_rollback_side() if version < 18: self.virtual_size =", "\"button_select\" ], \"pad_b_press\" : [ \"button_alternate\" ], \"pad_dleft_press\" : [ \"focus_left\", \"bar_left\" ],", ": [ \"rollforward\", ], \"pad_righttrigger_press\" : [ \"dismiss\", \"button_select\" ], \"pad_a_press\" : [", "{ } # True if the channel should not play music. False #", "of the Software, # and to permit persons to whom the Software is", "translations. self.language = None # Should we self-voice? self.self_voicing = False # Should", "self.pad_enabled = True self.init_rollback_side() def init_rollback_side(self): self.mobile_rollback_side = \"disable\" self.desktop_rollback_side = \"disable\" def", "(self.volumes.get(mixer, 1.0) == 0.0): self.volumes[mixer] = 1.0 def get_mute(self, mixer): if mixer not", "mixer): if mixer not in self.volumes: return False return self.mute[mixer] def init_mixers(self): for", ": [ \"focus_up\", \"bar_up\" ], \"pad_dpdown_press\" : [ \"focus_down\", \"bar_down\" ], \"pad_lefty_pos\" :", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE", "False if version < 11: self.show_empty_window = True if version < 13: self.self_voicing", "emphasize audio? self.emphasize_audio = False # Is the gamepad enabled? self.pad_enabled = True", "< 9: self.afm_after_click = False if version < 11: self.show_empty_window = True if", "software and associated documentation files # (the \"Software\"), to deal in the Software", "\"pad_leftx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_rightx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_dpup_press\"", "0 and 1). self.volumes = { } # True if the channel should", "# Mixer channel info. # A map from channel name to the current", "None if version < 6: self.wait_voice = True if version < 7: self.voice_sustain", "in the Software without restriction, # including without limitation the rights to use,", "it yet. self.physical_size = None # The virtual size at the time self.physical_size", "copy of this software and associated documentation files # (the \"Software\"), to deal", "None if version < 4: self.renderer = \"auto\" self.performance_test = True if version", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM,", "5: self.language = None if version < 6: self.wait_voice = True if version", "we use for translations. self.language = None # Should we self-voice? self.self_voicing =", "self.volumes[mixer] = 1.0 def get_mute(self, mixer): if mixer not in self.volumes: return False", "(Not used anymore.) self.mute = { } # Joystick mappings. self.joymap = dict()", "play music. False # otherwise. (Not used anymore.) self.mute = { } #", "Permission is hereby granted, free of charge, to any person # obtaining a", "[ \"focus_down\", \"bar_down\" ], \"pad_righty_pos\" : [ \"focus_down\", \"bar_down\" ], } class Preferences(renpy.object.Object):", "# 2 - All transitions. # 1 - Only non-default transitions. # 0", "\"pad_rightx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_dpup_press\" : [ \"focus_up\", \"bar_up\" ], \"pad_lefty_neg\"", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY,", ": [ \"game_menu\", ], \"pad_start_press\" : [ \"game_menu\", ], \"pad_y_press\" : [ \"hide_windows\",", "True self.using_afm_enable = False self.voice_sustain = False self.mouse_move = False self.show_empty_window = True", ": [ \"rollback\", ], \"pad_guide_press\" : [ \"game_menu\", ], \"pad_start_press\" : [ \"game_menu\",", "that will one day be persisted. \"\"\" __version__ = 18 def after_upgrade(self, version):", "8: self.mouse_move = False if version < 9: self.afm_after_click = False if version", "sprites always default to provided displayables if possible? self.video_image_fallback = False self.skip_after_choices =", "sell copies of the Software, # and to permit persons to whom the", "deal in the Software without restriction, # including without limitation the rights to", "don't know it yet. self.physical_size = None # The virtual size at the", "on startup? self.performance_test = True # The language we use for translations. self.language", "< 4: self.renderer = \"auto\" self.performance_test = True if version < 5: self.language", "Should we wait for the voice to stop? self.wait_voice = True # Should", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR", "self.performance_test = True # The language we use for translations. self.language = None", "], \"pad_dpright_press\" : [ \"focus_right\", \"bar_right\" ], \"pad_leftx_pos\" : [ \"focus_right\", \"bar_right\" ],", "\"pad_dpdown_press\" : [ \"focus_down\", \"bar_down\" ], \"pad_lefty_pos\" : [ \"focus_down\", \"bar_down\" ], \"pad_righty_pos\"", "< 3: self.physical_size = None if version < 4: self.renderer = \"auto\" self.performance_test", "= { } # Joystick mappings. self.joymap = dict() # The size of", "copies of the Software, # and to permit persons to whom the Software", "\"rollback\", ], \"pad_guide_press\" : [ \"game_menu\", ], \"pad_start_press\" : [ \"game_menu\", ], \"pad_y_press\"", "WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "Should we self-voice? self.self_voicing = False # Should we emphasize audio? self.emphasize_audio =", "0: self.mute[mixer] = False self.volumes[mixer] = volume def get_volume(self, mixer): if mixer not", "# Should we do a performance test on startup? self.performance_test = True #", "audio? self.emphasize_audio = False # Is the gamepad enabled? self.pad_enabled = True self.init_rollback_side()", "self.afm_enable = True self.using_afm_enable = False self.voice_sustain = False self.mouse_move = False self.show_empty_window", ": [ \"game_menu\", ], \"pad_y_press\" : [ \"hide_windows\", ], \"pad_rightshoulder_press\" : [ \"rollforward\",", "return 0.0 return self.volumes[mixer] def set_mute(self, mixer, mute): self.mute[mixer] = mute if (not", "if we don't know it yet. self.physical_size = None # The virtual size", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING", "the Software without restriction, # including without limitation the rights to use, copy,", "distribute, sublicense, and/or sell copies of the Software, # and to permit persons", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT", "THE USE import renpy.audio pad_bindings = { \"pad_leftshoulder_press\" : [ \"rollback\", ], \"pad_lefttrigger_pos\"", "\"pad_rightx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_dpright_press\" : [ \"focus_right\", \"bar_right\" ], \"pad_leftx_pos\"", "if version < 18: self.virtual_size = None self.video_image_fallback = False def __init__(self): self.fullscreen", "version < 17: self.init_rollback_side() if version < 18: self.virtual_size = None self.video_image_fallback =", "= False self.mouse_move = False self.show_empty_window = True # Should we wait for", "Should video sprites always default to provided displayables if possible? self.video_image_fallback = False", "self.skip_after_choices = False # Mixer channel info. # A map from channel name", "dict() # The size of the window, or None if we don't know", "for i in renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i, 1.0) self.mute.setdefault(i, False) def get_all_mixers(self): return renpy.audio.music.get_all_mixers() def", "AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "\"pad_righttrigger_press\" : [ \"dismiss\", \"button_select\" ], \"pad_a_press\" : [ \"dismiss\", \"button_select\" ], \"pad_b_press\"", "False # Should we emphasize audio? self.emphasize_audio = False # Is the gamepad", "in all copies or substantial portions of the Software. # # THE SOFTWARE", "OR THE USE import renpy.audio pad_bindings = { \"pad_leftshoulder_press\" : [ \"rollback\", ],", "init_rollback_side(self): self.mobile_rollback_side = \"disable\" self.desktop_rollback_side = \"disable\" def set_volume(self, mixer, volume): if volume", "OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO", "\"\"\" Stores preferences that will one day be persisted. \"\"\" __version__ = 18", "the gamepad enabled? self.pad_enabled = True self.init_rollback_side() def init_rollback_side(self): self.mobile_rollback_side = \"disable\" self.desktop_rollback_side", "persisted. \"\"\" __version__ = 18 def after_upgrade(self, version): if version < 1: self.mute_volumes", "{ } # Joystick mappings. self.joymap = dict() # The size of the", "return self.volumes[mixer] def set_mute(self, mixer, mute): self.mute[mixer] = mute if (not mute) and", "= None self.video_image_fallback = False def __init__(self): self.fullscreen = False self.skip_unseen = False", "shall be # included in all copies or substantial portions of the Software.", "\"bar_right\" ], \"pad_leftx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_rightx_pos\" : [ \"focus_right\", \"bar_right\"", "], \"pad_y_press\" : [ \"hide_windows\", ], \"pad_rightshoulder_press\" : [ \"rollforward\", ], \"pad_righttrigger_press\" :", "{ \"pad_leftshoulder_press\" : [ \"rollback\", ], \"pad_lefttrigger_pos\" : [ \"rollback\", ], \"pad_back_press\" :", "} # True if the channel should not play music. False # otherwise.", "if mixer not in self.volumes: return 0.0 if self.mute.get(mixer, False): return 0.0 return", "= None # The virtual size at the time self.physical_size was set. self.virtual_size", "use for translations. self.language = None # Should we self-voice? self.self_voicing = False", "False def __init__(self): self.fullscreen = False self.skip_unseen = False self.text_cps = 0 self.afm_time", "AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "The virtual size at the time self.physical_size was set. self.virtual_size = None #", "ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE", "the Software, # and to permit persons to whom the Software is furnished", "THE SOFTWARE OR THE USE import renpy.audio pad_bindings = { \"pad_leftshoulder_press\" : [", "Only non-default transitions. # 0 - No transitions. self.transitions = 2 # Should", "gamepad enabled? self.pad_enabled = True self.init_rollback_side() def init_rollback_side(self): self.mobile_rollback_side = \"disable\" self.desktop_rollback_side =", "self.self_voicing = False # Should we emphasize audio? self.emphasize_audio = False # Is", "test on startup? self.performance_test = True # The language we use for translations.", "the following conditions: # # The above copyright notice and this permission notice", "version < 13: self.self_voicing = False if version < 14: self.emphasize_audio = False", "None # The graphics renderer we use. self.renderer = \"auto\" # Should we", "# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "- All transitions. # 1 - Only non-default transitions. # 0 - No", "False self.skip_unseen = False self.text_cps = 0 self.afm_time = 0 self.afm_enable = True", "[ \"focus_left\", \"bar_left\" ], \"pad_dpright_press\" : [ \"focus_right\", \"bar_right\" ], \"pad_leftx_pos\" : [", "(between 0 and 1). self.volumes = { } # True if the channel", "if version < 4: self.renderer = \"auto\" self.performance_test = True if version <", "and to permit persons to whom the Software is furnished to do so,", "THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT.", "], \"pad_righty_pos\" : [ \"focus_down\", \"bar_down\" ], } class Preferences(renpy.object.Object): \"\"\" Stores preferences", "1: self.mute_volumes = 0 if version < 2: self.using_afm_enable = False if version", "self.voice_sustain = False if version < 8: self.mouse_move = False if version <", "9: self.afm_after_click = False if version < 11: self.show_empty_window = True if version", "self.volumes: return 0.0 if self.mute.get(mixer, False): return 0.0 return self.volumes[mixer] def set_mute(self, mixer,", "window, or None if we don't know it yet. self.physical_size = None #", "volume (between 0 and 1). self.volumes = { } # True if the", "restriction, # including without limitation the rights to use, copy, modify, merge, #", "self.voice_sustain = False self.mouse_move = False self.show_empty_window = True # Should we wait", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH", "\"hide_windows\", ], \"pad_rightshoulder_press\" : [ \"rollforward\", ], \"pad_righttrigger_press\" : [ \"dismiss\", \"button_select\" ],", "notice shall be # included in all copies or substantial portions of the", "= False self.voice_sustain = False self.mouse_move = False self.show_empty_window = True # Should", "map from channel name to the current volume (between 0 and 1). self.volumes", "if version < 14: self.emphasize_audio = False if version < 15: self.pad_enabled =", "mixer not in self.volumes: return False return self.mute[mixer] def init_mixers(self): for i in", "\"focus_left\", \"bar_left\" ], \"pad_leftx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_rightx_neg\" : [ \"focus_left\",", "to permit persons to whom the Software is furnished to do so, #", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR", "get_volume(self, mixer): if mixer not in self.volumes: return 0.0 if self.mute.get(mixer, False): return", "\"pad_dpup_press\" : [ \"focus_up\", \"bar_up\" ], \"pad_lefty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_righty_neg\"", "\"pad_guide_press\" : [ \"game_menu\", ], \"pad_start_press\" : [ \"game_menu\", ], \"pad_y_press\" : [", "permission notice shall be # included in all copies or substantial portions of", "} class Preferences(renpy.object.Object): \"\"\" Stores preferences that will one day be persisted. \"\"\"", "= False def __init__(self): self.fullscreen = False self.skip_unseen = False self.text_cps = 0", "and this permission notice shall be # included in all copies or substantial", "WITH THE SOFTWARE OR THE USE import renpy.audio pad_bindings = { \"pad_leftshoulder_press\" :", "1.0) == 0.0): self.volumes[mixer] = 1.0 def get_mute(self, mixer): if mixer not in", "publish, distribute, sublicense, and/or sell copies of the Software, # and to permit", "channel should not play music. False # otherwise. (Not used anymore.) self.mute =", "get_all_mixers(self): return renpy.audio.music.get_all_mixers() def __eq__(self, other): return vars(self) == vars(other) renpy.game.Preferences = Preferences", "The size of the window, or None if we don't know it yet.", "self.volumes: return False return self.mute[mixer] def init_mixers(self): for i in renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i, 1.0)", ": [ \"focus_down\", \"bar_down\" ], } class Preferences(renpy.object.Object): \"\"\" Stores preferences that will", "[ \"rollforward\", ], \"pad_righttrigger_press\" : [ \"dismiss\", \"button_select\" ], \"pad_a_press\" : [ \"dismiss\",", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL", "], \"pad_a_press\" : [ \"dismiss\", \"button_select\" ], \"pad_b_press\" : [ \"button_alternate\" ], \"pad_dleft_press\"", ": [ \"focus_left\", \"bar_left\" ], \"pad_rightx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_dpright_press\" :", "\"bar_right\" ], \"pad_rightx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_dpup_press\" : [ \"focus_up\", \"bar_up\"", "[ \"focus_right\", \"bar_right\" ], \"pad_leftx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_rightx_pos\" : [", "version < 4: self.renderer = \"auto\" self.performance_test = True if version < 5:", "18: self.virtual_size = None self.video_image_fallback = False def __init__(self): self.fullscreen = False self.skip_unseen", "True if the channel should not play music. False # otherwise. (Not used", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT", "version): if version < 1: self.mute_volumes = 0 if version < 2: self.using_afm_enable", "transitions. self.transitions = 2 # Should video sprites always default to provided displayables", "volume != 0: self.mute[mixer] = False self.volumes[mixer] = volume def get_volume(self, mixer): if", "the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell", "day be persisted. \"\"\" __version__ = 18 def after_upgrade(self, version): if version <", "0 self.afm_time = 0 self.afm_enable = True self.using_afm_enable = False self.voice_sustain = False", "[ \"focus_up\", \"bar_up\" ], \"pad_righty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_dpdown_press\" : [", "= False # Mixer channel info. # A map from channel name to", "wait for the voice to stop? self.wait_voice = True # Should we disengage", "self.physical_size was set. self.virtual_size = None # The graphics renderer we use. self.renderer", "class Preferences(renpy.object.Object): \"\"\" Stores preferences that will one day be persisted. \"\"\" __version__", "], \"pad_rightx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_dpright_press\" : [ \"focus_right\", \"bar_right\" ],", "# Copyright 2004-2017 <NAME> <<EMAIL>> # # Permission is hereby granted, free of", "< 5: self.language = None if version < 6: self.wait_voice = True if", "time self.physical_size was set. self.virtual_size = None # The graphics renderer we use.", "version < 2: self.using_afm_enable = False if version < 3: self.physical_size = None", ": [ \"rollback\", ], \"pad_lefttrigger_pos\" : [ \"rollback\", ], \"pad_back_press\" : [ \"rollback\",", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "None # Should we self-voice? self.self_voicing = False # Should we emphasize audio?", "0.0 return self.volumes[mixer] def set_mute(self, mixer, mute): self.mute[mixer] = mute if (not mute)", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE", "voice to stop? self.wait_voice = True # Should we disengage auto-forward mode after", "BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "non-default transitions. # 0 - No transitions. self.transitions = 2 # Should video", "== 0.0): self.volumes[mixer] = 1.0 def get_mute(self, mixer): if mixer not in self.volumes:", "\"focus_down\", \"bar_down\" ], } class Preferences(renpy.object.Object): \"\"\" Stores preferences that will one day", "self.show_empty_window = True # Should we wait for the voice to stop? self.wait_voice", "a performance test on startup? self.performance_test = True # The language we use", "# A map from channel name to the current volume (between 0 and", "A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "should not play music. False # otherwise. (Not used anymore.) self.mute = {", "if version < 15: self.pad_enabled = True if version < 17: self.init_rollback_side() if", "OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE,", "if version < 2: self.using_afm_enable = False if version < 3: self.physical_size =", "False # 2 - All transitions. # 1 - Only non-default transitions. #", "if version < 1: self.mute_volumes = 0 if version < 2: self.using_afm_enable =", ": [ \"focus_down\", \"bar_down\" ], \"pad_lefty_pos\" : [ \"focus_down\", \"bar_down\" ], \"pad_righty_pos\" :", "return 0.0 if self.mute.get(mixer, False): return 0.0 return self.volumes[mixer] def set_mute(self, mixer, mute):", "the time self.physical_size was set. self.virtual_size = None # The graphics renderer we", "self.mute = { } # Joystick mappings. self.joymap = dict() # The size", "[ \"hide_windows\", ], \"pad_rightshoulder_press\" : [ \"rollforward\", ], \"pad_righttrigger_press\" : [ \"dismiss\", \"button_select\"", ": [ \"focus_down\", \"bar_down\" ], \"pad_righty_pos\" : [ \"focus_down\", \"bar_down\" ], } class", "LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "\"pad_leftx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_rightx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_dpright_press\"", "we do a performance test on startup? self.performance_test = True # The language", "7: self.voice_sustain = False if version < 8: self.mouse_move = False if version", "the Software is furnished to do so, # subject to the following conditions:", "[ \"rollback\", ], \"pad_guide_press\" : [ \"game_menu\", ], \"pad_start_press\" : [ \"game_menu\", ],", "if (not mute) and (self.volumes.get(mixer, 1.0) == 0.0): self.volumes[mixer] = 1.0 def get_mute(self,", ": [ \"focus_up\", \"bar_up\" ], \"pad_righty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_dpdown_press\" :", "11: self.show_empty_window = True if version < 13: self.self_voicing = False if version", "< 18: self.virtual_size = None self.video_image_fallback = False def __init__(self): self.fullscreen = False", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE", "True if version < 7: self.voice_sustain = False if version < 8: self.mouse_move", "False self.skip_after_choices = False # Mixer channel info. # A map from channel", "# Should we wait for the voice to stop? self.wait_voice = True #", "= True # Should we wait for the voice to stop? self.wait_voice =", "self.virtual_size = None self.video_image_fallback = False def __init__(self): self.fullscreen = False self.skip_unseen =", "rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies", "False if version < 8: self.mouse_move = False if version < 9: self.afm_after_click", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY", "0.0 if self.mute.get(mixer, False): return 0.0 return self.volumes[mixer] def set_mute(self, mixer, mute): self.mute[mixer]", "stop? self.wait_voice = True # Should we disengage auto-forward mode after a click?", "= True if version < 5: self.language = None if version < 6:", "self.physical_size = None if version < 4: self.renderer = \"auto\" self.performance_test = True", "def set_volume(self, mixer, volume): if volume != 0: self.mute[mixer] = False self.volumes[mixer] =", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A", "self.renderer = \"auto\" self.performance_test = True if version < 5: self.language = None", "FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "version < 9: self.afm_after_click = False if version < 11: self.show_empty_window = True", "mute if (not mute) and (self.volumes.get(mixer, 1.0) == 0.0): self.volumes[mixer] = 1.0 def", "if version < 13: self.self_voicing = False if version < 14: self.emphasize_audio =", "Is the gamepad enabled? self.pad_enabled = True self.init_rollback_side() def init_rollback_side(self): self.mobile_rollback_side = \"disable\"", "after a click? self.afm_after_click = False # 2 - All transitions. # 1", "self.self_voicing = False if version < 14: self.emphasize_audio = False if version <", "0 if version < 2: self.using_afm_enable = False if version < 3: self.physical_size", "graphics renderer we use. self.renderer = \"auto\" # Should we do a performance", "is furnished to do so, # subject to the following conditions: # #", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR", "ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "1.0) self.mute.setdefault(i, False) def get_all_mixers(self): return renpy.audio.music.get_all_mixers() def __eq__(self, other): return vars(self) ==", "], \"pad_leftx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_rightx_pos\" : [ \"focus_right\", \"bar_right\" ],", "mixer not in self.volumes: return 0.0 if self.mute.get(mixer, False): return 0.0 return self.volumes[mixer]", "associated documentation files # (the \"Software\"), to deal in the Software without restriction,", "we wait for the voice to stop? self.wait_voice = True # Should we", "= \"disable\" def set_volume(self, mixer, volume): if volume != 0: self.mute[mixer] = False", "self.wait_voice = True if version < 7: self.voice_sustain = False if version <", "self.wait_voice = True # Should we disengage auto-forward mode after a click? self.afm_after_click", "< 17: self.init_rollback_side() if version < 18: self.virtual_size = None self.video_image_fallback = False", "renpy.audio pad_bindings = { \"pad_leftshoulder_press\" : [ \"rollback\", ], \"pad_lefttrigger_pos\" : [ \"rollback\",", "False return self.mute[mixer] def init_mixers(self): for i in renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i, 1.0) self.mute.setdefault(i, False)", "[ \"focus_up\", \"bar_up\" ], \"pad_dpdown_press\" : [ \"focus_down\", \"bar_down\" ], \"pad_lefty_pos\" : [", "= volume def get_volume(self, mixer): if mixer not in self.volumes: return 0.0 if", "obtaining a copy of this software and associated documentation files # (the \"Software\"),", "and/or sell copies of the Software, # and to permit persons to whom", "AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the", "!= 0: self.mute[mixer] = False self.volumes[mixer] = volume def get_volume(self, mixer): if mixer", "= \"disable\" self.desktop_rollback_side = \"disable\" def set_volume(self, mixer, volume): if volume != 0:", "2 # Should video sprites always default to provided displayables if possible? self.video_image_fallback", "Should we disengage auto-forward mode after a click? self.afm_after_click = False # 2", "volume def get_volume(self, mixer): if mixer not in self.volumes: return 0.0 if self.mute.get(mixer,", "self.show_empty_window = True if version < 13: self.self_voicing = False if version <", "[ \"rollback\", ], \"pad_back_press\" : [ \"rollback\", ], \"pad_guide_press\" : [ \"game_menu\", ],", "click? self.afm_after_click = False # 2 - All transitions. # 1 - Only", "preferences that will one day be persisted. \"\"\" __version__ = 18 def after_upgrade(self,", "False if version < 9: self.afm_after_click = False if version < 11: self.show_empty_window", "video sprites always default to provided displayables if possible? self.video_image_fallback = False self.skip_after_choices", "def set_mute(self, mixer, mute): self.mute[mixer] = mute if (not mute) and (self.volumes.get(mixer, 1.0)", "# Should we emphasize audio? self.emphasize_audio = False # Is the gamepad enabled?", "ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "= { \"pad_leftshoulder_press\" : [ \"rollback\", ], \"pad_lefttrigger_pos\" : [ \"rollback\", ], \"pad_back_press\"", "\"focus_up\", \"bar_up\" ], \"pad_dpdown_press\" : [ \"focus_down\", \"bar_down\" ], \"pad_lefty_pos\" : [ \"focus_down\",", "\"button_alternate\" ], \"pad_dleft_press\" : [ \"focus_left\", \"bar_left\" ], \"pad_leftx_neg\" : [ \"focus_left\", \"bar_left\"", "self.using_afm_enable = False if version < 3: self.physical_size = None if version <", "17: self.init_rollback_side() if version < 18: self.virtual_size = None self.video_image_fallback = False def", "True self.init_rollback_side() def init_rollback_side(self): self.mobile_rollback_side = \"disable\" self.desktop_rollback_side = \"disable\" def set_volume(self, mixer,", "2: self.using_afm_enable = False if version < 3: self.physical_size = None if version", "18 def after_upgrade(self, version): if version < 1: self.mute_volumes = 0 if version", "if mixer not in self.volumes: return False return self.mute[mixer] def init_mixers(self): for i", "\"bar_down\" ], } class Preferences(renpy.object.Object): \"\"\" Stores preferences that will one day be", "Joystick mappings. self.joymap = dict() # The size of the window, or None", "# subject to the following conditions: # # The above copyright notice and", "\"\"\" __version__ = 18 def after_upgrade(self, version): if version < 1: self.mute_volumes =", "know it yet. self.physical_size = None # The virtual size at the time", "2 - All transitions. # 1 - Only non-default transitions. # 0 -", "name to the current volume (between 0 and 1). self.volumes = { }", "\"game_menu\", ], \"pad_start_press\" : [ \"game_menu\", ], \"pad_y_press\" : [ \"hide_windows\", ], \"pad_rightshoulder_press\"", "= None if version < 6: self.wait_voice = True if version < 7:", "[ \"focus_left\", \"bar_left\" ], \"pad_leftx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_rightx_neg\" : [", "= False # 2 - All transitions. # 1 - Only non-default transitions.", "use. self.renderer = \"auto\" # Should we do a performance test on startup?", "= None # The graphics renderer we use. self.renderer = \"auto\" # Should", "\"focus_down\", \"bar_down\" ], \"pad_righty_pos\" : [ \"focus_down\", \"bar_down\" ], } class Preferences(renpy.object.Object): \"\"\"", "= True self.using_afm_enable = False self.voice_sustain = False self.mouse_move = False self.show_empty_window =", "WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "version < 3: self.physical_size = None if version < 4: self.renderer = \"auto\"", "\"focus_left\", \"bar_left\" ], \"pad_rightx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_dpright_press\" : [ \"focus_right\",", "self.physical_size = None # The virtual size at the time self.physical_size was set.", "\"pad_lefty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_righty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_dpdown_press\"", "self.language = None # Should we self-voice? self.self_voicing = False # Should we", "\"pad_dleft_press\" : [ \"focus_left\", \"bar_left\" ], \"pad_leftx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_rightx_neg\"", "IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "disengage auto-forward mode after a click? self.afm_after_click = False # 2 - All", "in renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i, 1.0) self.mute.setdefault(i, False) def get_all_mixers(self): return renpy.audio.music.get_all_mixers() def __eq__(self, other):", "BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR", "OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #", "def init_mixers(self): for i in renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i, 1.0) self.mute.setdefault(i, False) def get_all_mixers(self): return", "set_mute(self, mixer, mute): self.mute[mixer] = mute if (not mute) and (self.volumes.get(mixer, 1.0) ==", "1). self.volumes = { } # True if the channel should not play", "hereby granted, free of charge, to any person # obtaining a copy of", "< 2: self.using_afm_enable = False if version < 3: self.physical_size = None if", "to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of", "after_upgrade(self, version): if version < 1: self.mute_volumes = 0 if version < 2:", "= 0 self.afm_time = 0 self.afm_enable = True self.using_afm_enable = False self.voice_sustain =", "\"focus_right\", \"bar_right\" ], \"pad_rightx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_dpup_press\" : [ \"focus_up\",", "used anymore.) self.mute = { } # Joystick mappings. self.joymap = dict() #", "# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF #", "persons to whom the Software is furnished to do so, # subject to", "or None if we don't know it yet. self.physical_size = None # The", "# Joystick mappings. self.joymap = dict() # The size of the window, or", "The language we use for translations. self.language = None # Should we self-voice?", "not play music. False # otherwise. (Not used anymore.) self.mute = { }", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE", "True # The language we use for translations. self.language = None # Should", "= 2 # Should video sprites always default to provided displayables if possible?", "The graphics renderer we use. self.renderer = \"auto\" # Should we do a", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, #", "= False # Is the gamepad enabled? self.pad_enabled = True self.init_rollback_side() def init_rollback_side(self):", "we don't know it yet. self.physical_size = None # The virtual size at", "self.joymap = dict() # The size of the window, or None if we", "WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "self.emphasize_audio = False # Is the gamepad enabled? self.pad_enabled = True self.init_rollback_side() def", "PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "\"rollback\", ], \"pad_back_press\" : [ \"rollback\", ], \"pad_guide_press\" : [ \"game_menu\", ], \"pad_start_press\"", "\"bar_up\" ], \"pad_righty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_dpdown_press\" : [ \"focus_down\", \"bar_down\"", "self.mute_volumes = 0 if version < 2: self.using_afm_enable = False if version <", "at the time self.physical_size was set. self.virtual_size = None # The graphics renderer", "2004-2017 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to", "self.skip_unseen = False self.text_cps = 0 self.afm_time = 0 self.afm_enable = True self.using_afm_enable", "CONNECTION # WITH THE SOFTWARE OR THE USE import renpy.audio pad_bindings = {", "renderer we use. self.renderer = \"auto\" # Should we do a performance test", "\"focus_left\", \"bar_left\" ], \"pad_dpright_press\" : [ \"focus_right\", \"bar_right\" ], \"pad_leftx_pos\" : [ \"focus_right\",", "= True if version < 7: self.voice_sustain = False if version < 8:", "], \"pad_rightshoulder_press\" : [ \"rollforward\", ], \"pad_righttrigger_press\" : [ \"dismiss\", \"button_select\" ], \"pad_a_press\"", "get_mute(self, mixer): if mixer not in self.volumes: return False return self.mute[mixer] def init_mixers(self):", "False # Mixer channel info. # A map from channel name to the", "\"Software\"), to deal in the Software without restriction, # including without limitation the", "\"bar_up\" ], \"pad_dpdown_press\" : [ \"focus_down\", \"bar_down\" ], \"pad_lefty_pos\" : [ \"focus_down\", \"bar_down\"", "charge, to any person # obtaining a copy of this software and associated", "documentation files # (the \"Software\"), to deal in the Software without restriction, #", "mute): self.mute[mixer] = mute if (not mute) and (self.volumes.get(mixer, 1.0) == 0.0): self.volumes[mixer]", "= 18 def after_upgrade(self, version): if version < 1: self.mute_volumes = 0 if", ": [ \"focus_left\", \"bar_left\" ], \"pad_leftx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_rightx_neg\" :", "= mute if (not mute) and (self.volumes.get(mixer, 1.0) == 0.0): self.volumes[mixer] = 1.0", ": [ \"rollback\", ], \"pad_back_press\" : [ \"rollback\", ], \"pad_guide_press\" : [ \"game_menu\",", "None self.video_image_fallback = False def __init__(self): self.fullscreen = False self.skip_unseen = False self.text_cps", "# The language we use for translations. self.language = None # Should we", "self.text_cps = 0 self.afm_time = 0 self.afm_enable = True self.using_afm_enable = False self.voice_sustain", "current volume (between 0 and 1). self.volumes = { } # True if", ": [ \"button_alternate\" ], \"pad_dleft_press\" : [ \"focus_left\", \"bar_left\" ], \"pad_leftx_neg\" : [", "a click? self.afm_after_click = False # 2 - All transitions. # 1 -", "\"focus_right\", \"bar_right\" ], \"pad_dpup_press\" : [ \"focus_up\", \"bar_up\" ], \"pad_lefty_neg\" : [ \"focus_up\",", "False self.text_cps = 0 self.afm_time = 0 self.afm_enable = True self.using_afm_enable = False", "< 6: self.wait_voice = True if version < 7: self.voice_sustain = False if", "# True if the channel should not play music. False # otherwise. (Not", "self.mouse_move = False if version < 9: self.afm_after_click = False if version <", "auto-forward mode after a click? self.afm_after_click = False # 2 - All transitions.", "self.volumes = { } # True if the channel should not play music.", "# otherwise. (Not used anymore.) self.mute = { } # Joystick mappings. self.joymap", "enabled? self.pad_enabled = True self.init_rollback_side() def init_rollback_side(self): self.mobile_rollback_side = \"disable\" self.desktop_rollback_side = \"disable\"", "True if version < 13: self.self_voicing = False if version < 14: self.emphasize_audio", "this software and associated documentation files # (the \"Software\"), to deal in the", "4: self.renderer = \"auto\" self.performance_test = True if version < 5: self.language =", "music. False # otherwise. (Not used anymore.) self.mute = { } # Joystick", "granted, free of charge, to any person # obtaining a copy of this", "13: self.self_voicing = False if version < 14: self.emphasize_audio = False if version", "mixer, mute): self.mute[mixer] = mute if (not mute) and (self.volumes.get(mixer, 1.0) == 0.0):", "return False return self.mute[mixer] def init_mixers(self): for i in renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i, 1.0) self.mute.setdefault(i,", "\"bar_down\" ], \"pad_righty_pos\" : [ \"focus_down\", \"bar_down\" ], } class Preferences(renpy.object.Object): \"\"\" Stores", "to the current volume (between 0 and 1). self.volumes = { } #", "1 - Only non-default transitions. # 0 - No transitions. self.transitions = 2", "], \"pad_lefty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_righty_neg\" : [ \"focus_up\", \"bar_up\" ],", "of the window, or None if we don't know it yet. self.physical_size =", "False self.show_empty_window = True # Should we wait for the voice to stop?", "if possible? self.video_image_fallback = False self.skip_after_choices = False # Mixer channel info. #", "< 11: self.show_empty_window = True if version < 13: self.self_voicing = False if", "False self.voice_sustain = False self.mouse_move = False self.show_empty_window = True # Should we", "3: self.physical_size = None if version < 4: self.renderer = \"auto\" self.performance_test =", "], \"pad_leftx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_rightx_neg\" : [ \"focus_left\", \"bar_left\" ],", "# including without limitation the rights to use, copy, modify, merge, # publish,", "[ \"focus_up\", \"bar_up\" ], \"pad_lefty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_righty_neg\" : [", "version < 11: self.show_empty_window = True if version < 13: self.self_voicing = False", "], \"pad_dpdown_press\" : [ \"focus_down\", \"bar_down\" ], \"pad_lefty_pos\" : [ \"focus_down\", \"bar_down\" ],", ": [ \"dismiss\", \"button_select\" ], \"pad_b_press\" : [ \"button_alternate\" ], \"pad_dleft_press\" : [", "], \"pad_lefttrigger_pos\" : [ \"rollback\", ], \"pad_back_press\" : [ \"rollback\", ], \"pad_guide_press\" :", "self.pad_enabled = True if version < 17: self.init_rollback_side() if version < 18: self.virtual_size", "self.volumes[mixer] = volume def get_volume(self, mixer): if mixer not in self.volumes: return 0.0", "import renpy.audio pad_bindings = { \"pad_leftshoulder_press\" : [ \"rollback\", ], \"pad_lefttrigger_pos\" : [", ": [ \"focus_right\", \"bar_right\" ], \"pad_dpup_press\" : [ \"focus_up\", \"bar_up\" ], \"pad_lefty_neg\" :", "= False if version < 3: self.physical_size = None if version < 4:", "self.using_afm_enable = False self.voice_sustain = False self.mouse_move = False self.show_empty_window = True #", "self.transitions = 2 # Should video sprites always default to provided displayables if", "True if version < 17: self.init_rollback_side() if version < 18: self.virtual_size = None", "= False self.skip_after_choices = False # Mixer channel info. # A map from", "None # The virtual size at the time self.physical_size was set. self.virtual_size =", "and (self.volumes.get(mixer, 1.0) == 0.0): self.volumes[mixer] = 1.0 def get_mute(self, mixer): if mixer", "\"disable\" def set_volume(self, mixer, volume): if volume != 0: self.mute[mixer] = False self.volumes[mixer]", "THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR", "if version < 8: self.mouse_move = False if version < 9: self.afm_after_click =", "= 0 self.afm_enable = True self.using_afm_enable = False self.voice_sustain = False self.mouse_move =", "self.mouse_move = False self.show_empty_window = True # Should we wait for the voice", "to stop? self.wait_voice = True # Should we disengage auto-forward mode after a", "notice and this permission notice shall be # included in all copies or", "if the channel should not play music. False # otherwise. (Not used anymore.)", "renpy.audio.music.get_all_mixers() def __eq__(self, other): return vars(self) == vars(other) renpy.game.Preferences = Preferences renpy.game.preferences =", "version < 8: self.mouse_move = False if version < 9: self.afm_after_click = False", "version < 1: self.mute_volumes = 0 if version < 2: self.using_afm_enable = False", "# (the \"Software\"), to deal in the Software without restriction, # including without", "# # The above copyright notice and this permission notice shall be #", "\"pad_y_press\" : [ \"hide_windows\", ], \"pad_rightshoulder_press\" : [ \"rollforward\", ], \"pad_righttrigger_press\" : [", "= 0 if version < 2: self.using_afm_enable = False if version < 3:", "modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, #", "renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i, 1.0) self.mute.setdefault(i, False) def get_all_mixers(self): return renpy.audio.music.get_all_mixers() def __eq__(self, other): return", "def init_rollback_side(self): self.mobile_rollback_side = \"disable\" self.desktop_rollback_side = \"disable\" def set_volume(self, mixer, volume): if", "\"auto\" self.performance_test = True if version < 5: self.language = None if version", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES", "pad_bindings = { \"pad_leftshoulder_press\" : [ \"rollback\", ], \"pad_lefttrigger_pos\" : [ \"rollback\", ],", "True # Should we wait for the voice to stop? self.wait_voice = True", "self-voice? self.self_voicing = False # Should we emphasize audio? self.emphasize_audio = False #", "self.language = None if version < 6: self.wait_voice = True if version <", "self.afm_time = 0 self.afm_enable = True self.using_afm_enable = False self.voice_sustain = False self.mouse_move", "the channel should not play music. False # otherwise. (Not used anymore.) self.mute", "\"disable\" self.desktop_rollback_side = \"disable\" def set_volume(self, mixer, volume): if volume != 0: self.mute[mixer]", "<<EMAIL>> # # Permission is hereby granted, free of charge, to any person", "and 1). self.volumes = { } # True if the channel should not", "and associated documentation files # (the \"Software\"), to deal in the Software without", "# 0 - No transitions. self.transitions = 2 # Should video sprites always", "# included in all copies or substantial portions of the Software. # #", "copyright notice and this permission notice shall be # included in all copies", "Mixer channel info. # A map from channel name to the current volume", "to do so, # subject to the following conditions: # # The above", "], } class Preferences(renpy.object.Object): \"\"\" Stores preferences that will one day be persisted.", "Should we do a performance test on startup? self.performance_test = True # The", "to whom the Software is furnished to do so, # subject to the", "Should we emphasize audio? self.emphasize_audio = False # Is the gamepad enabled? self.pad_enabled", ": [ \"focus_left\", \"bar_left\" ], \"pad_dpright_press\" : [ \"focus_right\", \"bar_right\" ], \"pad_leftx_pos\" :", "channel name to the current volume (between 0 and 1). self.volumes = {", "OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE import", "= False self.text_cps = 0 self.afm_time = 0 self.afm_enable = True self.using_afm_enable =", "True # Should we disengage auto-forward mode after a click? self.afm_after_click = False", "], \"pad_dpup_press\" : [ \"focus_up\", \"bar_up\" ], \"pad_lefty_neg\" : [ \"focus_up\", \"bar_up\" ],", "<NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any", "mute) and (self.volumes.get(mixer, 1.0) == 0.0): self.volumes[mixer] = 1.0 def get_mute(self, mixer): if", "\"rollforward\", ], \"pad_righttrigger_press\" : [ \"dismiss\", \"button_select\" ], \"pad_a_press\" : [ \"dismiss\", \"button_select\"", "self.init_rollback_side() def init_rollback_side(self): self.mobile_rollback_side = \"disable\" self.desktop_rollback_side = \"disable\" def set_volume(self, mixer, volume):", "__init__(self): self.fullscreen = False self.skip_unseen = False self.text_cps = 0 self.afm_time = 0", "= \"auto\" self.performance_test = True if version < 5: self.language = None if", "performance test on startup? self.performance_test = True # The language we use for", "if version < 17: self.init_rollback_side() if version < 18: self.virtual_size = None self.video_image_fallback", "volume): if volume != 0: self.mute[mixer] = False self.volumes[mixer] = volume def get_volume(self,", "mode after a click? self.afm_after_click = False # 2 - All transitions. #", "LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT", "if version < 3: self.physical_size = None if version < 4: self.renderer =", "False # Is the gamepad enabled? self.pad_enabled = True self.init_rollback_side() def init_rollback_side(self): self.mobile_rollback_side", "\"pad_start_press\" : [ \"game_menu\", ], \"pad_y_press\" : [ \"hide_windows\", ], \"pad_rightshoulder_press\" : [", "furnished to do so, # subject to the following conditions: # # The", "# The virtual size at the time self.physical_size was set. self.virtual_size = None", "files # (the \"Software\"), to deal in the Software without restriction, # including", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "# Is the gamepad enabled? self.pad_enabled = True self.init_rollback_side() def init_rollback_side(self): self.mobile_rollback_side =", "None if we don't know it yet. self.physical_size = None # The virtual", "was set. self.virtual_size = None # The graphics renderer we use. self.renderer =", "to deal in the Software without restriction, # including without limitation the rights", "[ \"focus_down\", \"bar_down\" ], } class Preferences(renpy.object.Object): \"\"\" Stores preferences that will one", "is hereby granted, free of charge, to any person # obtaining a copy", "[ \"focus_left\", \"bar_left\" ], \"pad_rightx_neg\" : [ \"focus_left\", \"bar_left\" ], \"pad_dpright_press\" : [", "< 14: self.emphasize_audio = False if version < 15: self.pad_enabled = True if", "version < 15: self.pad_enabled = True if version < 17: self.init_rollback_side() if version", "], \"pad_back_press\" : [ \"rollback\", ], \"pad_guide_press\" : [ \"game_menu\", ], \"pad_start_press\" :", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "\"pad_righty_pos\" : [ \"focus_down\", \"bar_down\" ], } class Preferences(renpy.object.Object): \"\"\" Stores preferences that", "__version__ = 18 def after_upgrade(self, version): if version < 1: self.mute_volumes = 0", "= None # Should we self-voice? self.self_voicing = False # Should we emphasize", "def get_mute(self, mixer): if mixer not in self.volumes: return False return self.mute[mixer] def", "# The above copyright notice and this permission notice shall be # included", "WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN", "14: self.emphasize_audio = False if version < 15: self.pad_enabled = True if version", "SOFTWARE OR THE USE import renpy.audio pad_bindings = { \"pad_leftshoulder_press\" : [ \"rollback\",", "], \"pad_dleft_press\" : [ \"focus_left\", \"bar_left\" ], \"pad_leftx_neg\" : [ \"focus_left\", \"bar_left\" ],", "language we use for translations. self.language = None # Should we self-voice? self.self_voicing", "mappings. self.joymap = dict() # The size of the window, or None if", "False): return 0.0 return self.volumes[mixer] def set_mute(self, mixer, mute): self.mute[mixer] = mute if", "included in all copies or substantial portions of the Software. # # THE", "permit persons to whom the Software is furnished to do so, # subject", "whom the Software is furnished to do so, # subject to the following", "Software without restriction, # including without limitation the rights to use, copy, modify,", "self.mobile_rollback_side = \"disable\" self.desktop_rollback_side = \"disable\" def set_volume(self, mixer, volume): if volume !=", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR", "\"pad_b_press\" : [ \"button_alternate\" ], \"pad_dleft_press\" : [ \"focus_left\", \"bar_left\" ], \"pad_leftx_neg\" :", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF", "[ \"rollback\", ], \"pad_lefttrigger_pos\" : [ \"rollback\", ], \"pad_back_press\" : [ \"rollback\", ],", "= { } # True if the channel should not play music. False", "if version < 7: self.voice_sustain = False if version < 8: self.mouse_move =", "be # included in all copies or substantial portions of the Software. #", "A map from channel name to the current volume (between 0 and 1).", "so, # subject to the following conditions: # # The above copyright notice", "= False # Should we emphasize audio? self.emphasize_audio = False # Is the", "init_mixers(self): for i in renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i, 1.0) self.mute.setdefault(i, False) def get_all_mixers(self): return renpy.audio.music.get_all_mixers()", "= False self.skip_unseen = False self.text_cps = 0 self.afm_time = 0 self.afm_enable =", "possible? self.video_image_fallback = False self.skip_after_choices = False # Mixer channel info. # A", "False if version < 14: self.emphasize_audio = False if version < 15: self.pad_enabled", "False self.volumes[mixer] = volume def get_volume(self, mixer): if mixer not in self.volumes: return", "[ \"button_alternate\" ], \"pad_dleft_press\" : [ \"focus_left\", \"bar_left\" ], \"pad_leftx_neg\" : [ \"focus_left\",", "[ \"dismiss\", \"button_select\" ], \"pad_b_press\" : [ \"button_alternate\" ], \"pad_dleft_press\" : [ \"focus_left\",", "PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "\"auto\" # Should we do a performance test on startup? self.performance_test = True", "\"pad_back_press\" : [ \"rollback\", ], \"pad_guide_press\" : [ \"game_menu\", ], \"pad_start_press\" : [", "\"bar_left\" ], \"pad_dpright_press\" : [ \"focus_right\", \"bar_right\" ], \"pad_leftx_pos\" : [ \"focus_right\", \"bar_right\"", "= True self.init_rollback_side() def init_rollback_side(self): self.mobile_rollback_side = \"disable\" self.desktop_rollback_side = \"disable\" def set_volume(self,", "\"pad_lefttrigger_pos\" : [ \"rollback\", ], \"pad_back_press\" : [ \"rollback\", ], \"pad_guide_press\" : [", "[ \"dismiss\", \"button_select\" ], \"pad_a_press\" : [ \"dismiss\", \"button_select\" ], \"pad_b_press\" : [", "mixer): if mixer not in self.volumes: return 0.0 if self.mute.get(mixer, False): return 0.0", "including without limitation the rights to use, copy, modify, merge, # publish, distribute,", "False # otherwise. (Not used anymore.) self.mute = { } # Joystick mappings.", "provided displayables if possible? self.video_image_fallback = False self.skip_after_choices = False # Mixer channel", "OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE import renpy.audio", "one day be persisted. \"\"\" __version__ = 18 def after_upgrade(self, version): if version", "<filename>renpy/preferences.py # Copyright 2004-2017 <NAME> <<EMAIL>> # # Permission is hereby granted, free", "self.fullscreen = False self.skip_unseen = False self.text_cps = 0 self.afm_time = 0 self.afm_enable", "self.mute.get(mixer, False): return 0.0 return self.volumes[mixer] def set_mute(self, mixer, mute): self.mute[mixer] = mute", ": [ \"dismiss\", \"button_select\" ], \"pad_a_press\" : [ \"dismiss\", \"button_select\" ], \"pad_b_press\" :", "True if version < 5: self.language = None if version < 6: self.wait_voice", "if volume != 0: self.mute[mixer] = False self.volumes[mixer] = volume def get_volume(self, mixer):", "if self.mute.get(mixer, False): return 0.0 return self.volumes[mixer] def set_mute(self, mixer, mute): self.mute[mixer] =", "# publish, distribute, sublicense, and/or sell copies of the Software, # and to", "all copies or substantial portions of the Software. # # THE SOFTWARE IS", "in self.volumes: return False return self.mute[mixer] def init_mixers(self): for i in renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i,", "self.volumes.setdefault(i, 1.0) self.mute.setdefault(i, False) def get_all_mixers(self): return renpy.audio.music.get_all_mixers() def __eq__(self, other): return vars(self)", "to any person # obtaining a copy of this software and associated documentation", "displayables if possible? self.video_image_fallback = False self.skip_after_choices = False # Mixer channel info.", "yet. self.physical_size = None # The virtual size at the time self.physical_size was", "15: self.pad_enabled = True if version < 17: self.init_rollback_side() if version < 18:", "< 7: self.voice_sustain = False if version < 8: self.mouse_move = False if", "sublicense, and/or sell copies of the Software, # and to permit persons to", "be persisted. \"\"\" __version__ = 18 def after_upgrade(self, version): if version < 1:", "i in renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i, 1.0) self.mute.setdefault(i, False) def get_all_mixers(self): return renpy.audio.music.get_all_mixers() def __eq__(self,", "= False self.show_empty_window = True # Should we wait for the voice to", "a copy of this software and associated documentation files # (the \"Software\"), to", "self.performance_test = True if version < 5: self.language = None if version <", "info. # A map from channel name to the current volume (between 0", "self.emphasize_audio = False if version < 15: self.pad_enabled = True if version <", "self.afm_after_click = False if version < 11: self.show_empty_window = True if version <", "< 15: self.pad_enabled = True if version < 17: self.init_rollback_side() if version <", "self.video_image_fallback = False def __init__(self): self.fullscreen = False self.skip_unseen = False self.text_cps =", "[ \"focus_right\", \"bar_right\" ], \"pad_dpup_press\" : [ \"focus_up\", \"bar_up\" ], \"pad_lefty_neg\" : [", "\"dismiss\", \"button_select\" ], \"pad_a_press\" : [ \"dismiss\", \"button_select\" ], \"pad_b_press\" : [ \"button_alternate\"", "COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "= False if version < 11: self.show_empty_window = True if version < 13:", "transitions. # 0 - No transitions. self.transitions = 2 # Should video sprites", "above copyright notice and this permission notice shall be # included in all", "- Only non-default transitions. # 0 - No transitions. self.transitions = 2 #", "# Should video sprites always default to provided displayables if possible? self.video_image_fallback =", "subject to the following conditions: # # The above copyright notice and this", "} # Joystick mappings. self.joymap = dict() # The size of the window,", "following conditions: # # The above copyright notice and this permission notice shall", "channel info. # A map from channel name to the current volume (between", "if version < 6: self.wait_voice = True if version < 7: self.voice_sustain =", "def __eq__(self, other): return vars(self) == vars(other) renpy.game.Preferences = Preferences renpy.game.preferences = Preferences()", "copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software,", "version < 5: self.language = None if version < 6: self.wait_voice = True", "limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or", "The above copyright notice and this permission notice shall be # included in", "version < 6: self.wait_voice = True if version < 7: self.voice_sustain = False", "self.init_rollback_side() if version < 18: self.virtual_size = None self.video_image_fallback = False def __init__(self):", "version < 18: self.virtual_size = None self.video_image_fallback = False def __init__(self): self.fullscreen =", "from channel name to the current volume (between 0 and 1). self.volumes =", "], \"pad_righttrigger_press\" : [ \"dismiss\", \"button_select\" ], \"pad_a_press\" : [ \"dismiss\", \"button_select\" ],", "0 - No transitions. self.transitions = 2 # Should video sprites always default", ": [ \"focus_right\", \"bar_right\" ], \"pad_rightx_pos\" : [ \"focus_right\", \"bar_right\" ], \"pad_dpup_press\" :", "if version < 5: self.language = None if version < 6: self.wait_voice =", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED,", "Copyright 2004-2017 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge,", "Stores preferences that will one day be persisted. \"\"\" __version__ = 18 def", "otherwise. (Not used anymore.) self.mute = { } # Joystick mappings. self.joymap =", "def get_volume(self, mixer): if mixer not in self.volumes: return 0.0 if self.mute.get(mixer, False):", "Software, # and to permit persons to whom the Software is furnished to", "< 13: self.self_voicing = False if version < 14: self.emphasize_audio = False if", "always default to provided displayables if possible? self.video_image_fallback = False self.skip_after_choices = False", "False self.mouse_move = False self.show_empty_window = True # Should we wait for the", "0.0): self.volumes[mixer] = 1.0 def get_mute(self, mixer): if mixer not in self.volumes: return", "\"pad_righty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_dpdown_press\" : [ \"focus_down\", \"bar_down\" ], \"pad_lefty_pos\"", "default to provided displayables if possible? self.video_image_fallback = False self.skip_after_choices = False #", "self.mute[mixer] = mute if (not mute) and (self.volumes.get(mixer, 1.0) == 0.0): self.volumes[mixer] =", "Preferences(renpy.object.Object): \"\"\" Stores preferences that will one day be persisted. \"\"\" __version__ =", "= True # The language we use for translations. self.language = None #", "FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE", "< 8: self.mouse_move = False if version < 9: self.afm_after_click = False if", "version < 14: self.emphasize_audio = False if version < 15: self.pad_enabled = True", "0 self.afm_enable = True self.using_afm_enable = False self.voice_sustain = False self.mouse_move = False", "IN CONNECTION # WITH THE SOFTWARE OR THE USE import renpy.audio pad_bindings =", "self.virtual_size = None # The graphics renderer we use. self.renderer = \"auto\" #", "\"focus_up\", \"bar_up\" ], \"pad_righty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_dpdown_press\" : [ \"focus_down\",", "6: self.wait_voice = True if version < 7: self.voice_sustain = False if version", "will one day be persisted. \"\"\" __version__ = 18 def after_upgrade(self, version): if", "= False self.volumes[mixer] = volume def get_volume(self, mixer): if mixer not in self.volumes:", "- No transitions. self.transitions = 2 # Should video sprites always default to", "# 1 - Only non-default transitions. # 0 - No transitions. self.transitions =", "Software is furnished to do so, # subject to the following conditions: #", "# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT", "set. self.virtual_size = None # The graphics renderer we use. self.renderer = \"auto\"", "self.video_image_fallback = False self.skip_after_choices = False # Mixer channel info. # A map", "return self.mute[mixer] def init_mixers(self): for i in renpy.audio.music.get_all_mixers(): self.volumes.setdefault(i, 1.0) self.mute.setdefault(i, False) def", "\"focus_up\", \"bar_up\" ], \"pad_lefty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_righty_neg\" : [ \"focus_up\",", "= dict() # The size of the window, or None if we don't", "1.0 def get_mute(self, mixer): if mixer not in self.volumes: return False return self.mute[mixer]", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE #", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT,", ": [ \"hide_windows\", ], \"pad_rightshoulder_press\" : [ \"rollforward\", ], \"pad_righttrigger_press\" : [ \"dismiss\",", "free of charge, to any person # obtaining a copy of this software", "], \"pad_guide_press\" : [ \"game_menu\", ], \"pad_start_press\" : [ \"game_menu\", ], \"pad_y_press\" :", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS", "\"bar_down\" ], \"pad_lefty_pos\" : [ \"focus_down\", \"bar_down\" ], \"pad_righty_pos\" : [ \"focus_down\", \"bar_down\"", "size at the time self.physical_size was set. self.virtual_size = None # The graphics", "False if version < 3: self.physical_size = None if version < 4: self.renderer", "def after_upgrade(self, version): if version < 1: self.mute_volumes = 0 if version <", "if version < 9: self.afm_after_click = False if version < 11: self.show_empty_window =", "# The size of the window, or None if we don't know it", "of this software and associated documentation files # (the \"Software\"), to deal in", "# The graphics renderer we use. self.renderer = \"auto\" # Should we do", "the window, or None if we don't know it yet. self.physical_size = None", "\"pad_rightshoulder_press\" : [ \"rollforward\", ], \"pad_righttrigger_press\" : [ \"dismiss\", \"button_select\" ], \"pad_a_press\" :", "], \"pad_righty_neg\" : [ \"focus_up\", \"bar_up\" ], \"pad_dpdown_press\" : [ \"focus_down\", \"bar_down\" ],", "we emphasize audio? self.emphasize_audio = False # Is the gamepad enabled? self.pad_enabled =", "startup? self.performance_test = True # The language we use for translations. self.language =", "\"focus_down\", \"bar_down\" ], \"pad_lefty_pos\" : [ \"focus_down\", \"bar_down\" ], \"pad_righty_pos\" : [ \"focus_down\",", "if version < 11: self.show_empty_window = True if version < 13: self.self_voicing =", "], \"pad_b_press\" : [ \"button_alternate\" ], \"pad_dleft_press\" : [ \"focus_left\", \"bar_left\" ], \"pad_leftx_neg\"" ]
[ "= [ ('core', '0014_auto_20190417_1639'), ] operations = [ migrations.AlterUniqueTogether( name='documentpart', unique_together={('order', 'document')}, ),", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0014_auto_20190417_1639'), ] operations", "Django 2.1.4 on 2019-04-26 07:24 from django.db import migrations class Migration(migrations.Migration): dependencies =", "by Django 2.1.4 on 2019-04-26 07:24 from django.db import migrations class Migration(migrations.Migration): dependencies", "on 2019-04-26 07:24 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core',", "[ ('core', '0014_auto_20190417_1639'), ] operations = [ migrations.AlterUniqueTogether( name='documentpart', unique_together={('order', 'document')}, ), ]", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0014_auto_20190417_1639'), ] operations =", "Migration(migrations.Migration): dependencies = [ ('core', '0014_auto_20190417_1639'), ] operations = [ migrations.AlterUniqueTogether( name='documentpart', unique_together={('order',", "2019-04-26 07:24 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0014_auto_20190417_1639'),", "07:24 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0014_auto_20190417_1639'), ]", "# Generated by Django 2.1.4 on 2019-04-26 07:24 from django.db import migrations class", "class Migration(migrations.Migration): dependencies = [ ('core', '0014_auto_20190417_1639'), ] operations = [ migrations.AlterUniqueTogether( name='documentpart',", "2.1.4 on 2019-04-26 07:24 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0014_auto_20190417_1639'), ] operations = [", "migrations class Migration(migrations.Migration): dependencies = [ ('core', '0014_auto_20190417_1639'), ] operations = [ migrations.AlterUniqueTogether(", "dependencies = [ ('core', '0014_auto_20190417_1639'), ] operations = [ migrations.AlterUniqueTogether( name='documentpart', unique_together={('order', 'document')},", "Generated by Django 2.1.4 on 2019-04-26 07:24 from django.db import migrations class Migration(migrations.Migration):" ]
[ "+ \"_internal, //data\\n\\ };\\n\\ CODE UINT8 \" + map_name + \"_collision_tiles[] = \"", "\\\"\" + map_name + \"_tiles.h\\\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ const struct MapInfoInternal \"", "\" + map_name + \";\\n\\ extern UINT8 \" + map_name + \"_collision_tiles;\\n\\ extern", "+ name + \"_palette[] = {\\n\\ \" + palette_or_zero(0) + \",\\n\\ \" +", "name + \"_palette_H\\n\\ #include \\\"types.h\\\"\\n\\ extern UINT16 \" + name + \"_palette;\\n\\ #endif\\n\\", "//map\\n\\ \" + map_width + \", //width\\n\\ \" + map_height + \", //height\\n\\", "if len(palettes) > 8: raise ValueError(\"More than 8 palettes given!\") with open(outfile +", "write tiledata with open(outfile + \"_tiles.b\" + bank + \".c\", \"w\") as file:", "ValueError(\"More than 8 palettes given!\") with open(outfile + \"_palette.b\" + bank + \".c\",", "+ \"_internal = {\\n\\ \" + map_name + \"_map, //map\\n\\ \" + map_width", "\", \".join([hex(tile[\"palette_idx\"]) for tile in tiles_data]) # write tiledata with open(outfile + \"_tiles.b\"", "+ \"_tile_palettes[] = {\\n\\ \" + write_palettes() + \"\\n\\ };\\n\\ \\n\\ #include \\\"TilesInfo.h\\\"\\n\\", "+ \"_tiles.h\\\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ const struct MapInfoInternal \" + map_name +", "+ write_palettes() + \"\\n\\ };\\n\\ \\n\\ #include \\\"TilesInfo.h\\\"\\n\\ const struct TilesInfoInternal \" +", "str(len(tiles_data)) + \", //num_tiles\\n\\ \" + name + \"_tiles_data, //tiles\\n\\ \" + name", "write_collision_tiles(collision_tiles) + \";\\n\\ CODE UINT8 \" + map_name + \"_collision_down_tiles[] = \" +", "__addressmod empty const CODE;\\n\\ \\n\\ #include \\\"types.h\\\"\\n\\ \\n\\ CODE UINT16 \" + name", "+= \" // \" + tile[\"name\"] + \"\\n\" out += \" \" +", "+ write_collision_tiles(collision_down_tiles) + \";\\n\\ \\n\\ \") # write map header with open(outfile +", "\" + map_width + \", //width\\n\\ \" + map_height + \", //height\\n\\ 0,", "palette_or_zero(4) + \",\\n\\ \" + palette_or_zero(5) + \",\\n\\ \" + palette_or_zero(6) + \",\\n\\", "+ \";\\n\\ CODE UINT8 \" + map_name + \"_collision_down_tiles[] = \" + write_collision_tiles(collision_down_tiles)", "+ \",\\n\\ \" + palette_or_zero(4) + \",\\n\\ \" + palette_or_zero(5) + \",\\n\\ \"", "extern UINT8 \" + map_name + \"_collision_tiles;\\n\\ extern UINT8 \" + map_name +", "+ \",\\n\\ \" + palette_or_zero(6) + \",\\n\\ \" + palette_or_zero(7) + \"\\n\\ };\")", "PALETTE_\" + name + \"_palette_H\\n\\ #define PALETTE_\" + name + \"_palette_H\\n\\ #include \\\"types.h\\\"\\n\\", "\"_tiles_internal = {\\n\\ 8, //width\\n\\ 8, //height\\n\\ \" + str(len(tiles_data)) + \", //num_tiles\\n\\", "UINT8 \" + map_name + \"_collision_down_tiles[] = \" + write_collision_tiles(collision_down_tiles) + \";\\n\\ \\n\\", "\", //num_tiles\\n\\ \" + name + \"_tiles_data, //tiles\\n\\ \" + name + \"_tile_palettes,", "\"_collision_down_tiles[] = \" + write_collision_tiles(collision_down_tiles) + \";\\n\\ \\n\\ \") # write map header", "0x0000, 0x0000\" if len(palettes) > 8: raise ValueError(\"More than 8 palettes given!\") with", "0}\" # write map with open(outfile + \".b\" + bank + \".c\", \"w\")", "open(outfile + \".b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma bank", "open(outfile + \"_tiles.h\", \"w\") as file: file.write(\"\\ #ifndef TILES_\" + name + \"_tiles_H\\n\\", "\" + \",\".join(tile[\"hexdata\"]) + \",\\n\" return out def write_palettes(): return \", \".join([hex(tile[\"palette_idx\"]) for", "def write_collision_tiles(collision_tiles): if len(collision_tiles) == 0: return \"0\" return \"{\" + \", \".join(str(v)", "\" \") for v in palettes[idx]]) except: return \"0x0000, 0x0000, 0x0000, 0x0000\" if", "return \", \".join([hex(v).rjust(6, \" \") for v in palettes[idx]]) except: return \"0x0000, 0x0000,", "#endif\\n\\ \") def write_palette_data(outfile, bank, name, palettes): def palette_or_zero(idx): try: return \", \".join([hex(v).rjust(6,", "__nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ const unsigned char \" + name", "\" + palette_or_zero(4) + \",\\n\\ \" + palette_or_zero(5) + \",\\n\\ \" + palette_or_zero(6)", "map_width + \", //width\\n\\ \" + map_height + \", //height\\n\\ 0, //attributes\\n\\ &\"", "unsigned char \" + map_name + \"_map[] = { \\n\\ \" + map_data", "const CODE;\\n\\ \\n\\ const unsigned char \" + map_name + \"_map[] = {", "+ \"_tiles;\\n\\ #endif\\n\\ \") def write_mapdata( outfile, bank, map_data,map_width, map_height, map_name, collision_tiles, collision_down_tiles):", "empty const CODE;\\n\\ \\n\\ const unsigned char \" + map_name + \"_map[] =", "file: file.write(\"\\ #ifndef PALETTE_\" + name + \"_palette_H\\n\\ #define PALETTE_\" + name +", "extern unsigned char bank_\" + map_name + \";\\n\\ extern struct MapInfo \" +", "\"_map[] = { \\n\\ \" + map_data + \" \\n\\ };\\n\\ #include \\\"\"", "#pragma bank \" + bank + \"\\n\\ \\n\\ void empty(void) __nonbanked {}\\n\\ __addressmod", "\"w\") as file: file.write(\"\\ #ifndef TILES_\" + name + \"_tiles_H\\n\\ #define TILES_\" +", "\"_tiles_data, //tiles\\n\\ \" + name + \"_tile_palettes, //CGB palette\\n\\ };\\n\\ CODE struct TilesInfo", "\" + palette_or_zero(2) + \",\\n\\ \" + palette_or_zero(3) + \",\\n\\ \" + palette_or_zero(4)", "\";\\n\\ \\n\\ \") # write map header with open(outfile + \".h\", \"w\") as", "+ \"_tiles, //tiles info\\n\\ };\\n\\ CODE struct MapInfo \" + map_name + \"", "\"\\n\\ \\n\\ void empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ const unsigned", "MAP_\" + map_name + \"_H\\n\\ #define mapWidth \" + map_width + \"\\n\\ #define", "+ map_name + \"_H\\n\\ #define mapWidth \" + map_width + \"\\n\\ #define mapHeight", "for tile in tiles_data]) # write tiledata with open(outfile + \"_tiles.b\" + bank", "MapInfo \" + map_name + \" = {\\n\\ 3, //bank\\n\\ &\" + map_name", "= {\\n\\ \" + bank + \", //bank\\n\\ &\" + name + \"_tiles_internal,", "write_mapdata( outfile, bank, map_data,map_width, map_height, map_name, collision_tiles, collision_down_tiles): def write_collision_tiles(collision_tiles): if len(collision_tiles) ==", "map_name + \"_collision_down_tiles[] = \" + write_collision_tiles(collision_down_tiles) + \";\\n\\ \\n\\ \") # write", "+ tile[\"name\"] + \"\\n\" out += \" \" + \",\".join(tile[\"hexdata\"]) + \",\\n\" return", "+ name + \"_tiles_H\\n\\ #include \\\"TilesInfo.h\\\"\\n\\ extern struct TilesInfo \" + name +", "with open(outfile + \"_palette.b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma", "bank \" + bank + \"\\n\\ \\n\\ void empty(void) __nonbanked {}\\n\\ __addressmod empty", "+ bank + \"\\n\\ \\n\\ void empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\", "+ \", //num_tiles\\n\\ \" + name + \"_tiles_data, //tiles\\n\\ \" + name +", "};\\n\\ \\n\\ #include \\\"TilesInfo.h\\\"\\n\\ const struct TilesInfoInternal \" + name + \"_tiles_internal =", "//data\\n\\ };\\n\\ \") # write tiledata header with open(outfile + \"_tiles.h\", \"w\") as", "\"_tiles.b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma bank \" +", "+ name + \"_tiles_data, //tiles\\n\\ \" + name + \"_tile_palettes, //CGB palette\\n\\ };\\n\\", "\"w\") as file: file.write(\"\\ #pragma bank \" + bank + \"\\n\\ \\n\\ void", "0, //attributes\\n\\ &\" + map_name + \"_tiles, //tiles info\\n\\ };\\n\\ CODE struct MapInfo", "const struct TilesInfoInternal \" + name + \"_tiles_internal = {\\n\\ 8, //width\\n\\ 8,", "\\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ const struct MapInfoInternal \" + map_name + \"_internal = {\\n\\", "+= \" \" + \",\".join(tile[\"hexdata\"]) + \",\\n\" return out def write_palettes(): return \",", "// \" + tile[\"name\"] + \"\\n\" out += \" \" + \",\".join(tile[\"hexdata\"]) +", "void empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ const unsigned char \"", "unsigned char \" + name + \"_tile_palettes[] = {\\n\\ \" + write_palettes() +", "bank + \", //bank\\n\\ &\" + name + \"_tiles_internal, //data\\n\\ };\\n\\ \") #", "+ \"_tiles_H\\n\\ #include \\\"TilesInfo.h\\\"\\n\\ extern struct TilesInfo \" + name + \"_tiles;\\n\\ #endif\\n\\", "+ name + \"_tiles = {\\n\\ \" + bank + \", //bank\\n\\ &\"", "//tiles info\\n\\ };\\n\\ CODE struct MapInfo \" + map_name + \" = {\\n\\", "+ \"_map, //map\\n\\ \" + map_width + \", //width\\n\\ \" + map_height +", "\", \".join(str(v) for v in collision_tiles) + \", 0}\" # write map with", "\" + palette_or_zero(7) + \"\\n\\ };\") # write tiledata header with open(outfile +", "+ map_name + \";\\n\\ extern UINT8 \" + map_name + \"_collision_tiles;\\n\\ extern UINT8", "//width\\n\\ \" + map_height + \", //height\\n\\ 0, //attributes\\n\\ &\" + map_name +", "v in collision_tiles) + \", 0}\" # write map with open(outfile + \".b\"", "+ name + \"_tiles;\\n\\ #endif\\n\\ \") def write_mapdata( outfile, bank, map_data,map_width, map_height, map_name,", "mapWidth \" + map_width + \"\\n\\ #define mapHeight \" + map_height + \"\\n\\", "map_name + \";\\n\\ extern struct MapInfo \" + map_name + \";\\n\\ extern UINT8", "+ name + \"_palette_H\\n\\ #define PALETTE_\" + name + \"_palette_H\\n\\ #include \\\"types.h\\\"\\n\\ extern", "\".b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma bank \" +", "map_name + \"_tiles.h\\\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ const struct MapInfoInternal \" + map_name", "};\\n\\ CODE struct MapInfo \" + map_name + \" = {\\n\\ 3, //bank\\n\\", "mapHeight \" + map_height + \"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ extern unsigned char", "\" + name + \"_tiles_data[] = {\\n\\ \" + write_tiles() + \"};\\n\\ \\n\\", "\" \\n\\ };\\n\\ #include \\\"\" + map_name + \"_tiles.h\\\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\", "palettes): def palette_or_zero(idx): try: return \", \".join([hex(v).rjust(6, \" \") for v in palettes[idx]])", "CODE UINT8 \" + map_name + \"_collision_down_tiles[] = \" + write_collision_tiles(collision_down_tiles) + \";\\n\\", "file.write(\"\\ #ifndef TILES_\" + name + \"_tiles_H\\n\\ #define TILES_\" + name + \"_tiles_H\\n\\", "write_palettes() + \"\\n\\ };\\n\\ \\n\\ #include \\\"TilesInfo.h\\\"\\n\\ const struct TilesInfoInternal \" + name", "if len(collision_tiles) == 0: return \"0\" return \"{\" + \", \".join(str(v) for v", "struct TilesInfo \" + name + \"_tiles;\\n\\ #endif\\n\\ \") def write_mapdata( outfile, bank,", "# write tiledata header with open(outfile + \"_palette.h\", \"w\") as file: file.write(\"\\ #ifndef", "+ \"_tiles = {\\n\\ \" + bank + \", //bank\\n\\ &\" + name", "+ map_name + \"_H\\n\\ #define MAP_\" + map_name + \"_H\\n\\ #define mapWidth \"", "= \" + write_collision_tiles(collision_down_tiles) + \";\\n\\ \\n\\ \") # write map header with", "name + \"_tile_palettes[] = {\\n\\ \" + write_palettes() + \"\\n\\ };\\n\\ \\n\\ #include", "+ \"\\n\\ };\") # write tiledata header with open(outfile + \"_palette.h\", \"w\") as", "= {\\n\\ 8, //width\\n\\ 8, //height\\n\\ \" + str(len(tiles_data)) + \", //num_tiles\\n\\ \"", "\" + map_name + \"_map, //map\\n\\ \" + map_width + \", //width\\n\\ \"", "\" // \" + tile[\"name\"] + \"\\n\" out += \" \" + \",\".join(tile[\"hexdata\"])", "in tiles_data]) # write tiledata with open(outfile + \"_tiles.b\" + bank + \".c\",", "except: return \"0x0000, 0x0000, 0x0000, 0x0000\" if len(palettes) > 8: raise ValueError(\"More than", "+ map_height + \", //height\\n\\ 0, //attributes\\n\\ &\" + map_name + \"_tiles, //tiles", "\",\\n\\ \" + palette_or_zero(7) + \"\\n\\ };\") # write tiledata header with open(outfile", "\" + write_tiles() + \"};\\n\\ \\n\\ const unsigned char \" + name +", "\"\\n\\ \\n\\ void empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ #include \\\"types.h\\\"\\n\\", "{\\n\\ 8, //width\\n\\ 8, //height\\n\\ \" + str(len(tiles_data)) + \", //num_tiles\\n\\ \" +", "open(outfile + \".h\", \"w\") as file: file.write(\"\\ #ifndef MAP_\" + map_name + \"_H\\n\\", "\"_palette_H\\n\\ #define PALETTE_\" + name + \"_palette_H\\n\\ #include \\\"types.h\\\"\\n\\ extern UINT16 \" +", "= {\\n\\ 3, //bank\\n\\ &\" + map_name + \"_internal, //data\\n\\ };\\n\\ CODE UINT8", "\";\\n\\ CODE UINT8 \" + map_name + \"_collision_down_tiles[] = \" + write_collision_tiles(collision_down_tiles) +", "__nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ #include \\\"types.h\\\"\\n\\ \\n\\ CODE UINT16 \"", "0: return \"0\" return \"{\" + \", \".join(str(v) for v in collision_tiles) +", "struct MapInfo \" + map_name + \" = {\\n\\ 3, //bank\\n\\ &\" +", "\") for v in palettes[idx]]) except: return \"0x0000, 0x0000, 0x0000, 0x0000\" if len(palettes)", "+ \"_tiles_data, //tiles\\n\\ \" + name + \"_tile_palettes, //CGB palette\\n\\ };\\n\\ CODE struct", "+ \"_H\\n\\ #define MAP_\" + map_name + \"_H\\n\\ #define mapWidth \" + map_width", "file.write(\"\\ #ifndef PALETTE_\" + name + \"_palette_H\\n\\ #define PALETTE_\" + name + \"_palette_H\\n\\", "\";\\n\\ extern UINT8 \" + map_name + \"_collision_tiles;\\n\\ extern UINT8 \" + map_name", "empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ #include \\\"types.h\\\"\\n\\ \\n\\ CODE UINT16", "open(outfile + \"_palette.b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma bank", "\"_palette.b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma bank \" +", "name + \"_tiles_internal = {\\n\\ 8, //width\\n\\ 8, //height\\n\\ \" + str(len(tiles_data)) +", "#define TILES_\" + name + \"_tiles_H\\n\\ #include \\\"TilesInfo.h\\\"\\n\\ extern struct TilesInfo \" +", "\"_collision_tiles[] = \" + write_collision_tiles(collision_tiles) + \";\\n\\ CODE UINT8 \" + map_name +", "MapInfoInternal \" + map_name + \"_internal = {\\n\\ \" + map_name + \"_map,", "CODE struct TilesInfo \" + name + \"_tiles = {\\n\\ \" + bank", "+ \", //height\\n\\ 0, //attributes\\n\\ &\" + map_name + \"_tiles, //tiles info\\n\\ };\\n\\", "\\n\\ void empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ #include \\\"types.h\\\"\\n\\ \\n\\", "};\\n\\ #include \\\"\" + map_name + \"_tiles.h\\\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ const struct", "\" + palette_or_zero(1) + \",\\n\\ \" + palette_or_zero(2) + \",\\n\\ \" + palette_or_zero(3)", "in palettes[idx]]) except: return \"0x0000, 0x0000, 0x0000, 0x0000\" if len(palettes) > 8: raise", "map_name + \"_H\\n\\ #define MAP_\" + map_name + \"_H\\n\\ #define mapWidth \" +", "= {\\n\\ \" + map_name + \"_map, //map\\n\\ \" + map_width + \",", "\"_tile_palettes, //CGB palette\\n\\ };\\n\\ CODE struct TilesInfo \" + name + \"_tiles =", "\"w\") as file: file.write(\"\\ #ifndef MAP_\" + map_name + \"_H\\n\\ #define MAP_\" +", "map_data,map_width, map_height, map_name, collision_tiles, collision_down_tiles): def write_collision_tiles(collision_tiles): if len(collision_tiles) == 0: return \"0\"", "palettes[idx]]) except: return \"0x0000, 0x0000, 0x0000, 0x0000\" if len(palettes) > 8: raise ValueError(\"More", "out = \"\" for tile in tiles_data: out += \" // \" +", "map_name + \"_H\\n\\ #define mapWidth \" + map_width + \"\\n\\ #define mapHeight \"", "in tiles_data: out += \" // \" + tile[\"name\"] + \"\\n\" out +=", "map_height, map_name, collision_tiles, collision_down_tiles): def write_collision_tiles(collision_tiles): if len(collision_tiles) == 0: return \"0\" return", "\" + write_collision_tiles(collision_tiles) + \";\\n\\ CODE UINT8 \" + map_name + \"_collision_down_tiles[] =", "as file: file.write(\"\\ #ifndef TILES_\" + name + \"_tiles_H\\n\\ #define TILES_\" + name", "+ map_name + \"_collision_down_tiles[] = \" + write_collision_tiles(collision_down_tiles) + \";\\n\\ \\n\\ \") #", "write_collision_tiles(collision_tiles): if len(collision_tiles) == 0: return \"0\" return \"{\" + \", \".join(str(v) for", "as file: file.write(\"\\ #ifndef MAP_\" + map_name + \"_H\\n\\ #define MAP_\" + map_name", "+ \"_palette.b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma bank \"", "len(collision_tiles) == 0: return \"0\" return \"{\" + \", \".join(str(v) for v in", "+ name + \"_palette_H\\n\\ #include \\\"types.h\\\"\\n\\ extern UINT16 \" + name + \"_palette;\\n\\", "#include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ extern unsigned char bank_\" + map_name + \";\\n\\ extern", "\" + map_name + \"_collision_tiles;\\n\\ extern UINT8 \" + map_name + \"_collision_down_tiles;\\n\\ #endif\\n\\", "\"_tiles, //tiles info\\n\\ };\\n\\ CODE struct MapInfo \" + map_name + \" =", "\".h\", \"w\") as file: file.write(\"\\ #ifndef MAP_\" + map_name + \"_H\\n\\ #define MAP_\"", "\"_tile_palettes[] = {\\n\\ \" + write_palettes() + \"\\n\\ };\\n\\ \\n\\ #include \\\"TilesInfo.h\\\"\\n\\ const", "};\\n\\ CODE struct TilesInfo \" + name + \"_tiles = {\\n\\ \" +", "MAP_\" + map_name + \"_H\\n\\ #define MAP_\" + map_name + \"_H\\n\\ #define mapWidth", "__nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ const unsigned char \" + map_name", "#ifndef TILES_\" + name + \"_tiles_H\\n\\ #define TILES_\" + name + \"_tiles_H\\n\\ #include", "def write_tiledata(outfile, bank, tiles_data, name): def write_tiles(): out = \"\" for tile in", "name): def write_tiles(): out = \"\" for tile in tiles_data: out += \"", "+ palette_or_zero(1) + \",\\n\\ \" + palette_or_zero(2) + \",\\n\\ \" + palette_or_zero(3) +", "+ map_name + \" = {\\n\\ 3, //bank\\n\\ &\" + map_name + \"_internal,", "empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ const unsigned char \" +", "len(palettes) > 8: raise ValueError(\"More than 8 palettes given!\") with open(outfile + \"_palette.b\"", "CODE struct MapInfo \" + map_name + \" = {\\n\\ 3, //bank\\n\\ &\"", "+ palette_or_zero(6) + \",\\n\\ \" + palette_or_zero(7) + \"\\n\\ };\") # write tiledata", "name + \"_palette_H\\n\\ #define PALETTE_\" + name + \"_palette_H\\n\\ #include \\\"types.h\\\"\\n\\ extern UINT16", "bank, map_data,map_width, map_height, map_name, collision_tiles, collision_down_tiles): def write_collision_tiles(collision_tiles): if len(collision_tiles) == 0: return", "UINT8 \" + map_name + \"_collision_down_tiles;\\n\\ #endif\\n\\ \") def write_palette_data(outfile, bank, name, palettes):", "\"\\n\\ };\") # write tiledata header with open(outfile + \"_palette.h\", \"w\") as file:", "+ name + \"_tiles_internal = {\\n\\ 8, //width\\n\\ 8, //height\\n\\ \" + str(len(tiles_data))", "palette_or_zero(2) + \",\\n\\ \" + palette_or_zero(3) + \",\\n\\ \" + palette_or_zero(4) + \",\\n\\", "return \", \".join([hex(tile[\"palette_idx\"]) for tile in tiles_data]) # write tiledata with open(outfile +", "\" + tile[\"name\"] + \"\\n\" out += \" \" + \",\".join(tile[\"hexdata\"]) + \",\\n\"", "map_name + \"_map, //map\\n\\ \" + map_width + \", //width\\n\\ \" + map_height", "write_palettes(): return \", \".join([hex(tile[\"palette_idx\"]) for tile in tiles_data]) # write tiledata with open(outfile", "+ \"_tiles_internal, //data\\n\\ };\\n\\ \") # write tiledata header with open(outfile + \"_tiles.h\",", "__addressmod empty const CODE;\\n\\ \\n\\ const unsigned char \" + map_name + \"_map[]", "header with open(outfile + \"_palette.h\", \"w\") as file: file.write(\"\\ #ifndef PALETTE_\" + name", "#include \\\"TilesInfo.h\\\"\\n\\ const struct TilesInfoInternal \" + name + \"_tiles_internal = {\\n\\ 8,", "\") def write_palette_data(outfile, bank, name, palettes): def palette_or_zero(idx): try: return \", \".join([hex(v).rjust(6, \"", "\"_map, //map\\n\\ \" + map_width + \", //width\\n\\ \" + map_height + \",", "&\" + name + \"_tiles_internal, //data\\n\\ };\\n\\ \") # write tiledata header with", "\\n\\ void empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ const unsigned char", "{ \\n\\ \" + map_data + \" \\n\\ };\\n\\ #include \\\"\" + map_name", "\"_internal = {\\n\\ \" + map_name + \"_map, //map\\n\\ \" + map_width +", "char \" + map_name + \"_map[] = { \\n\\ \" + map_data +", "name, palettes): def palette_or_zero(idx): try: return \", \".join([hex(v).rjust(6, \" \") for v in", "\".join([hex(v).rjust(6, \" \") for v in palettes[idx]]) except: return \"0x0000, 0x0000, 0x0000, 0x0000\"", "outfile, bank, map_data,map_width, map_height, map_name, collision_tiles, collision_down_tiles): def write_collision_tiles(collision_tiles): if len(collision_tiles) == 0:", "\\n\\ const unsigned char \" + name + \"_tiles_data[] = {\\n\\ \" +", "in collision_tiles) + \", 0}\" # write map with open(outfile + \".b\" +", "struct TilesInfoInternal \" + name + \"_tiles_internal = {\\n\\ 8, //width\\n\\ 8, //height\\n\\", "\"_tiles_H\\n\\ #include \\\"TilesInfo.h\\\"\\n\\ extern struct TilesInfo \" + name + \"_tiles;\\n\\ #endif\\n\\ \")", "\"_palette[] = {\\n\\ \" + palette_or_zero(0) + \",\\n\\ \" + palette_or_zero(1) + \",\\n\\", "collision_down_tiles): def write_collision_tiles(collision_tiles): if len(collision_tiles) == 0: return \"0\" return \"{\" + \",", "tiledata with open(outfile + \"_tiles.b\" + bank + \".c\", \"w\") as file: file.write(\"\\", "\\n\\ CODE UINT16 \" + name + \"_palette[] = {\\n\\ \" + palette_or_zero(0)", "name + \"_palette[] = {\\n\\ \" + palette_or_zero(0) + \",\\n\\ \" + palette_or_zero(1)", "#include \\\"types.h\\\"\\n\\ extern unsigned char bank_\" + map_name + \";\\n\\ extern struct MapInfo", "for v in palettes[idx]]) except: return \"0x0000, 0x0000, 0x0000, 0x0000\" if len(palettes) >", "write_tiles() + \"};\\n\\ \\n\\ const unsigned char \" + name + \"_tile_palettes[] =", "char \" + name + \"_tiles_data[] = {\\n\\ \" + write_tiles() + \"};\\n\\", "&\" + map_name + \"_tiles, //tiles info\\n\\ };\\n\\ CODE struct MapInfo \" +", "UINT8 \" + map_name + \"_collision_tiles[] = \" + write_collision_tiles(collision_tiles) + \";\\n\\ CODE", "tile in tiles_data: out += \" // \" + tile[\"name\"] + \"\\n\" out", "as file: file.write(\"\\ #pragma bank \" + bank + \"\\n\\ \\n\\ void empty(void)", "write_collision_tiles(collision_down_tiles) + \";\\n\\ \\n\\ \") # write map header with open(outfile + \".h\",", "+ palette_or_zero(5) + \",\\n\\ \" + palette_or_zero(6) + \",\\n\\ \" + palette_or_zero(7) +", "for v in collision_tiles) + \", 0}\" # write map with open(outfile +", "+ \"\\n\\ \\n\\ void empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ const", "palette\\n\\ };\\n\\ CODE struct TilesInfo \" + name + \"_tiles = {\\n\\ \"", "tiles_data: out += \" // \" + tile[\"name\"] + \"\\n\" out += \"", "void empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ #include \\\"types.h\\\"\\n\\ \\n\\ CODE", "+ \" = {\\n\\ 3, //bank\\n\\ &\" + map_name + \"_internal, //data\\n\\ };\\n\\", "+ map_name + \"_map, //map\\n\\ \" + map_width + \", //width\\n\\ \" +", "\" + name + \"_tile_palettes, //CGB palette\\n\\ };\\n\\ CODE struct TilesInfo \" +", "+ \"_palette.h\", \"w\") as file: file.write(\"\\ #ifndef PALETTE_\" + name + \"_palette_H\\n\\ #define", "\" + map_name + \"_collision_tiles[] = \" + write_collision_tiles(collision_tiles) + \";\\n\\ CODE UINT8", "write map header with open(outfile + \".h\", \"w\") as file: file.write(\"\\ #ifndef MAP_\"", "= { \\n\\ \" + map_data + \" \\n\\ };\\n\\ #include \\\"\" +", "+ \"_collision_tiles;\\n\\ extern UINT8 \" + map_name + \"_collision_down_tiles;\\n\\ #endif\\n\\ \") def write_palette_data(outfile,", "header with open(outfile + \".h\", \"w\") as file: file.write(\"\\ #ifndef MAP_\" + map_name", "const unsigned char \" + name + \"_tiles_data[] = {\\n\\ \" + write_tiles()", "\" + name + \"_palette[] = {\\n\\ \" + palette_or_zero(0) + \",\\n\\ \"", "tile in tiles_data]) # write tiledata with open(outfile + \"_tiles.b\" + bank +", "+ map_name + \"_collision_tiles;\\n\\ extern UINT8 \" + map_name + \"_collision_down_tiles;\\n\\ #endif\\n\\ \")", "\" + map_name + \" = {\\n\\ 3, //bank\\n\\ &\" + map_name +", "write_palette_data(outfile, bank, name, palettes): def palette_or_zero(idx): try: return \", \".join([hex(v).rjust(6, \" \") for", "palette_or_zero(3) + \",\\n\\ \" + palette_or_zero(4) + \",\\n\\ \" + palette_or_zero(5) + \",\\n\\", "bank, name, palettes): def palette_or_zero(idx): try: return \", \".join([hex(v).rjust(6, \" \") for v", "//bank\\n\\ &\" + map_name + \"_internal, //data\\n\\ };\\n\\ CODE UINT8 \" + map_name", "\",\\n\\ \" + palette_or_zero(4) + \",\\n\\ \" + palette_or_zero(5) + \",\\n\\ \" +", "//num_tiles\\n\\ \" + name + \"_tiles_data, //tiles\\n\\ \" + name + \"_tile_palettes, //CGB", "+ map_name + \";\\n\\ extern struct MapInfo \" + map_name + \";\\n\\ extern", "#ifndef MAP_\" + map_name + \"_H\\n\\ #define MAP_\" + map_name + \"_H\\n\\ #define", "\" + map_height + \", //height\\n\\ 0, //attributes\\n\\ &\" + map_name + \"_tiles,", "#include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ const struct MapInfoInternal \" + map_name + \"_internal =", "\") # write tiledata header with open(outfile + \"_tiles.h\", \"w\") as file: file.write(\"\\", "\", 0}\" # write map with open(outfile + \".b\" + bank + \".c\",", "\" + map_width + \"\\n\\ #define mapHeight \" + map_height + \"\\n\\ #include", "#define PALETTE_\" + name + \"_palette_H\\n\\ #include \\\"types.h\\\"\\n\\ extern UINT16 \" + name", "bank + \"\\n\\ \\n\\ void empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\", "\"};\\n\\ \\n\\ const unsigned char \" + name + \"_tile_palettes[] = {\\n\\ \"", "+ \",\\n\\ \" + palette_or_zero(1) + \",\\n\\ \" + palette_or_zero(2) + \",\\n\\ \"", "\"_tiles.h\", \"w\") as file: file.write(\"\\ #ifndef TILES_\" + name + \"_tiles_H\\n\\ #define TILES_\"", "file: file.write(\"\\ #ifndef TILES_\" + name + \"_tiles_H\\n\\ #define TILES_\" + name +", "\"0x0000, 0x0000, 0x0000, 0x0000\" if len(palettes) > 8: raise ValueError(\"More than 8 palettes", "+ \",\\n\\ \" + palette_or_zero(5) + \",\\n\\ \" + palette_or_zero(6) + \",\\n\\ \"", "\" + write_collision_tiles(collision_down_tiles) + \";\\n\\ \\n\\ \") # write map header with open(outfile", "\" + name + \"_tiles_data, //tiles\\n\\ \" + name + \"_tile_palettes, //CGB palette\\n\\", "\" \" + \",\".join(tile[\"hexdata\"]) + \",\\n\" return out def write_palettes(): return \", \".join([hex(tile[\"palette_idx\"])", "\" + name + \"_tiles = {\\n\\ \" + bank + \", //bank\\n\\", "+ palette_or_zero(0) + \",\\n\\ \" + palette_or_zero(1) + \",\\n\\ \" + palette_or_zero(2) +", "map header with open(outfile + \".h\", \"w\") as file: file.write(\"\\ #ifndef MAP_\" +", "+ palette_or_zero(7) + \"\\n\\ };\") # write tiledata header with open(outfile + \"_palette.h\",", "write_tiles(): out = \"\" for tile in tiles_data: out += \" // \"", "{\\n\\ \" + write_palettes() + \"\\n\\ };\\n\\ \\n\\ #include \\\"TilesInfo.h\\\"\\n\\ const struct TilesInfoInternal", "\"_tiles_data[] = {\\n\\ \" + write_tiles() + \"};\\n\\ \\n\\ const unsigned char \"", "{}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ const unsigned char \" + name +", "+ map_name + \"_collision_tiles[] = \" + write_collision_tiles(collision_tiles) + \";\\n\\ CODE UINT8 \"", "+ \";\\n\\ extern UINT8 \" + map_name + \"_collision_tiles;\\n\\ extern UINT8 \" +", "empty const CODE;\\n\\ \\n\\ #include \\\"types.h\\\"\\n\\ \\n\\ CODE UINT16 \" + name +", "+ bank + \", //bank\\n\\ &\" + name + \"_tiles_internal, //data\\n\\ };\\n\\ \")", "out += \" \" + \",\".join(tile[\"hexdata\"]) + \",\\n\" return out def write_palettes(): return", "+ str(len(tiles_data)) + \", //num_tiles\\n\\ \" + name + \"_tiles_data, //tiles\\n\\ \" +", "map_width + \"\\n\\ #define mapHeight \" + map_height + \"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include", "= {\\n\\ \" + write_palettes() + \"\\n\\ };\\n\\ \\n\\ #include \\\"TilesInfo.h\\\"\\n\\ const struct", "#include \\\"types.h\\\"\\n\\ const struct MapInfoInternal \" + map_name + \"_internal = {\\n\\ \"", "+ \"\\n\\ #define mapHeight \" + map_height + \"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\", "#define mapWidth \" + map_width + \"\\n\\ #define mapHeight \" + map_height +", "\"_collision_down_tiles;\\n\\ #endif\\n\\ \") def write_palette_data(outfile, bank, name, palettes): def palette_or_zero(idx): try: return \",", "palette_or_zero(7) + \"\\n\\ };\") # write tiledata header with open(outfile + \"_palette.h\", \"w\")", "def write_palette_data(outfile, bank, name, palettes): def palette_or_zero(idx): try: return \", \".join([hex(v).rjust(6, \" \")", "\") def write_mapdata( outfile, bank, map_data,map_width, map_height, map_name, collision_tiles, collision_down_tiles): def write_collision_tiles(collision_tiles): if", "\" + name + \"_tile_palettes[] = {\\n\\ \" + write_palettes() + \"\\n\\ };\\n\\", "+ \", //bank\\n\\ &\" + name + \"_tiles_internal, //data\\n\\ };\\n\\ \") # write", "+ \", //width\\n\\ \" + map_height + \", //height\\n\\ 0, //attributes\\n\\ &\" +", "empty const CODE;\\n\\ \\n\\ const unsigned char \" + name + \"_tiles_data[] =", "tiles_data, name): def write_tiles(): out = \"\" for tile in tiles_data: out +=", "CODE;\\n\\ \\n\\ const unsigned char \" + map_name + \"_map[] = { \\n\\", "\" + bank + \"\\n\\ \\n\\ void empty(void) __nonbanked {}\\n\\ __addressmod empty const", "#ifndef PALETTE_\" + name + \"_palette_H\\n\\ #define PALETTE_\" + name + \"_palette_H\\n\\ #include", "\",\\n\\ \" + palette_or_zero(5) + \",\\n\\ \" + palette_or_zero(6) + \",\\n\\ \" +", "return out def write_palettes(): return \", \".join([hex(tile[\"palette_idx\"]) for tile in tiles_data]) # write", "map_name + \"_collision_tiles;\\n\\ extern UINT8 \" + map_name + \"_collision_down_tiles;\\n\\ #endif\\n\\ \") def", "//height\\n\\ \" + str(len(tiles_data)) + \", //num_tiles\\n\\ \" + name + \"_tiles_data, //tiles\\n\\", "};\\n\\ \") # write tiledata header with open(outfile + \"_tiles.h\", \"w\") as file:", "//width\\n\\ 8, //height\\n\\ \" + str(len(tiles_data)) + \", //num_tiles\\n\\ \" + name +", "\", //height\\n\\ 0, //attributes\\n\\ &\" + map_name + \"_tiles, //tiles info\\n\\ };\\n\\ CODE", "+ name + \"_tile_palettes, //CGB palette\\n\\ };\\n\\ CODE struct TilesInfo \" + name", "+ bank + \".c\", \"w\") as file: file.write(\"\\ #pragma bank \" + bank", "+ \"_collision_down_tiles;\\n\\ #endif\\n\\ \") def write_palette_data(outfile, bank, name, palettes): def palette_or_zero(idx): try: return", "+ \".c\", \"w\") as file: file.write(\"\\ #pragma bank \" + bank + \"\\n\\", "+ \"_collision_down_tiles[] = \" + write_collision_tiles(collision_down_tiles) + \";\\n\\ \\n\\ \") # write map", "{\\n\\ 3, //bank\\n\\ &\" + map_name + \"_internal, //data\\n\\ };\\n\\ CODE UINT8 \"", "file.write(\"\\ #pragma bank \" + bank + \"\\n\\ \\n\\ void empty(void) __nonbanked {}\\n\\", "map_data + \" \\n\\ };\\n\\ #include \\\"\" + map_name + \"_tiles.h\\\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\", "+ \".b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma bank \"", "//height\\n\\ 0, //attributes\\n\\ &\" + map_name + \"_tiles, //tiles info\\n\\ };\\n\\ CODE struct", "extern struct MapInfo \" + map_name + \";\\n\\ extern UINT8 \" + map_name", "header with open(outfile + \"_tiles.h\", \"w\") as file: file.write(\"\\ #ifndef TILES_\" + name", "\",\\n\\ \" + palette_or_zero(2) + \",\\n\\ \" + palette_or_zero(3) + \",\\n\\ \" +", "as file: file.write(\"\\ #ifndef PALETTE_\" + name + \"_palette_H\\n\\ #define PALETTE_\" + name", "CODE UINT16 \" + name + \"_palette[] = {\\n\\ \" + palette_or_zero(0) +", "\"\" for tile in tiles_data: out += \" // \" + tile[\"name\"] +", "UINT8 \" + map_name + \"_collision_tiles;\\n\\ extern UINT8 \" + map_name + \"_collision_down_tiles;\\n\\", "+ \"_tile_palettes, //CGB palette\\n\\ };\\n\\ CODE struct TilesInfo \" + name + \"_tiles", "3, //bank\\n\\ &\" + map_name + \"_internal, //data\\n\\ };\\n\\ CODE UINT8 \" +", "# write tiledata header with open(outfile + \"_tiles.h\", \"w\") as file: file.write(\"\\ #ifndef", "map_name + \"_collision_tiles[] = \" + write_collision_tiles(collision_tiles) + \";\\n\\ CODE UINT8 \" +", "+ map_height + \"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ extern unsigned char bank_\" +", "\" + map_data + \" \\n\\ };\\n\\ #include \\\"\" + map_name + \"_tiles.h\\\"\\n\\", "== 0: return \"0\" return \"{\" + \", \".join(str(v) for v in collision_tiles)", "tiledata header with open(outfile + \"_tiles.h\", \"w\") as file: file.write(\"\\ #ifndef TILES_\" +", "\" + palette_or_zero(0) + \",\\n\\ \" + palette_or_zero(1) + \",\\n\\ \" + palette_or_zero(2)", "struct TilesInfo \" + name + \"_tiles = {\\n\\ \" + bank +", "\" + name + \"_tiles;\\n\\ #endif\\n\\ \") def write_mapdata( outfile, bank, map_data,map_width, map_height,", "//bank\\n\\ &\" + name + \"_tiles_internal, //data\\n\\ };\\n\\ \") # write tiledata header", "+ name + \"_tiles_H\\n\\ #define TILES_\" + name + \"_tiles_H\\n\\ #include \\\"TilesInfo.h\\\"\\n\\ extern", "\\n\\ #include \\\"types.h\\\"\\n\\ \\n\\ CODE UINT16 \" + name + \"_palette[] = {\\n\\", "info\\n\\ };\\n\\ CODE struct MapInfo \" + map_name + \" = {\\n\\ 3,", "# write tiledata with open(outfile + \"_tiles.b\" + bank + \".c\", \"w\") as", "write map with open(outfile + \".b\" + bank + \".c\", \"w\") as file:", "+ map_name + \"_map[] = { \\n\\ \" + map_data + \" \\n\\", "\"\\n\" out += \" \" + \",\".join(tile[\"hexdata\"]) + \",\\n\" return out def write_palettes():", "with open(outfile + \"_tiles.h\", \"w\") as file: file.write(\"\\ #ifndef TILES_\" + name +", "0x0000\" if len(palettes) > 8: raise ValueError(\"More than 8 palettes given!\") with open(outfile", "\"_palette.h\", \"w\") as file: file.write(\"\\ #ifndef PALETTE_\" + name + \"_palette_H\\n\\ #define PALETTE_\"", "\"_internal, //data\\n\\ };\\n\\ CODE UINT8 \" + map_name + \"_collision_tiles[] = \" +", "\\\"TilesInfo.h\\\"\\n\\ const struct TilesInfoInternal \" + name + \"_tiles_internal = {\\n\\ 8, //width\\n\\", "UINT16 \" + name + \"_palette[] = {\\n\\ \" + palette_or_zero(0) + \",\\n\\", "\\\"types.h\\\"\\n\\ \\n\\ CODE UINT16 \" + name + \"_palette[] = {\\n\\ \" +", "= \"\" for tile in tiles_data: out += \" // \" + tile[\"name\"]", "collision_tiles) + \", 0}\" # write map with open(outfile + \".b\" + bank", "{\\n\\ \" + write_tiles() + \"};\\n\\ \\n\\ const unsigned char \" + name", "{}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ #include \\\"types.h\\\"\\n\\ \\n\\ CODE UINT16 \" +", "with open(outfile + \".b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma", "unsigned char bank_\" + map_name + \";\\n\\ extern struct MapInfo \" + map_name", "\".join([hex(tile[\"palette_idx\"]) for tile in tiles_data]) # write tiledata with open(outfile + \"_tiles.b\" +", "with open(outfile + \"_tiles.b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma", "> 8: raise ValueError(\"More than 8 palettes given!\") with open(outfile + \"_palette.b\" +", "{\\n\\ \" + palette_or_zero(0) + \",\\n\\ \" + palette_or_zero(1) + \",\\n\\ \" +", "+ \"\\n\" out += \" \" + \",\".join(tile[\"hexdata\"]) + \",\\n\" return out def", "8 palettes given!\") with open(outfile + \"_palette.b\" + bank + \".c\", \"w\") as", "map_name + \"_tiles, //tiles info\\n\\ };\\n\\ CODE struct MapInfo \" + map_name +", "map_name + \"_map[] = { \\n\\ \" + map_data + \" \\n\\ };\\n\\", "\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ extern unsigned char bank_\" + map_name + \";\\n\\", "+ \"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ extern unsigned char bank_\" + map_name +", "with open(outfile + \".h\", \"w\") as file: file.write(\"\\ #ifndef MAP_\" + map_name +", "map_name + \"_internal = {\\n\\ \" + map_name + \"_map, //map\\n\\ \" +", "8, //width\\n\\ 8, //height\\n\\ \" + str(len(tiles_data)) + \", //num_tiles\\n\\ \" + name", "//attributes\\n\\ &\" + map_name + \"_tiles, //tiles info\\n\\ };\\n\\ CODE struct MapInfo \"", "\".c\", \"w\") as file: file.write(\"\\ #pragma bank \" + bank + \"\\n\\ \\n\\", "\" + palette_or_zero(6) + \",\\n\\ \" + palette_or_zero(7) + \"\\n\\ };\") # write", "for tile in tiles_data: out += \" // \" + tile[\"name\"] + \"\\n\"", "\", \".join([hex(v).rjust(6, \" \") for v in palettes[idx]]) except: return \"0x0000, 0x0000, 0x0000,", "try: return \", \".join([hex(v).rjust(6, \" \") for v in palettes[idx]]) except: return \"0x0000,", "\",\\n\\ \" + palette_or_zero(1) + \",\\n\\ \" + palette_or_zero(2) + \",\\n\\ \" +", "+ \",\\n\" return out def write_palettes(): return \", \".join([hex(tile[\"palette_idx\"]) for tile in tiles_data])", "__addressmod empty const CODE;\\n\\ \\n\\ const unsigned char \" + name + \"_tiles_data[]", "PALETTE_\" + name + \"_palette_H\\n\\ #include \\\"types.h\\\"\\n\\ extern UINT16 \" + name +", "map_name, collision_tiles, collision_down_tiles): def write_collision_tiles(collision_tiles): if len(collision_tiles) == 0: return \"0\" return \"{\"", "\" + write_palettes() + \"\\n\\ };\\n\\ \\n\\ #include \\\"TilesInfo.h\\\"\\n\\ const struct TilesInfoInternal \"", "+ \"_tiles.h\", \"w\") as file: file.write(\"\\ #ifndef TILES_\" + name + \"_tiles_H\\n\\ #define", "\\n\\ \" + map_data + \" \\n\\ };\\n\\ #include \\\"\" + map_name +", "+ \"_map[] = { \\n\\ \" + map_data + \" \\n\\ };\\n\\ #include", "\"\\n\\ };\\n\\ \\n\\ #include \\\"TilesInfo.h\\\"\\n\\ const struct TilesInfoInternal \" + name + \"_tiles_internal", "\") # write map header with open(outfile + \".h\", \"w\") as file: file.write(\"\\", "name + \"_tiles = {\\n\\ \" + bank + \", //bank\\n\\ &\" +", "map_name + \";\\n\\ extern UINT8 \" + map_name + \"_collision_tiles;\\n\\ extern UINT8 \"", "+ palette_or_zero(2) + \",\\n\\ \" + palette_or_zero(3) + \",\\n\\ \" + palette_or_zero(4) +", "extern struct TilesInfo \" + name + \"_tiles;\\n\\ #endif\\n\\ \") def write_mapdata( outfile,", "+ map_data + \" \\n\\ };\\n\\ #include \\\"\" + map_name + \"_tiles.h\\\"\\n\\ #include", "\"_tiles_H\\n\\ #define TILES_\" + name + \"_tiles_H\\n\\ #include \\\"TilesInfo.h\\\"\\n\\ extern struct TilesInfo \"", "with open(outfile + \"_palette.h\", \"w\") as file: file.write(\"\\ #ifndef PALETTE_\" + name +", "map_name + \" = {\\n\\ 3, //bank\\n\\ &\" + map_name + \"_internal, //data\\n\\", "tile[\"name\"] + \"\\n\" out += \" \" + \",\".join(tile[\"hexdata\"]) + \",\\n\" return out", "\"_H\\n\\ #define MAP_\" + map_name + \"_H\\n\\ #define mapWidth \" + map_width +", "#define MAP_\" + map_name + \"_H\\n\\ #define mapWidth \" + map_width + \"\\n\\", "\" + map_name + \"_map[] = { \\n\\ \" + map_data + \"", "\"\\n\\ #define mapHeight \" + map_height + \"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ extern", "map with open(outfile + \".b\" + bank + \".c\", \"w\") as file: file.write(\"\\", "\"_H\\n\\ #define mapWidth \" + map_width + \"\\n\\ #define mapHeight \" + map_height", "out += \" // \" + tile[\"name\"] + \"\\n\" out += \" \"", "name + \"_tiles;\\n\\ #endif\\n\\ \") def write_mapdata( outfile, bank, map_data,map_width, map_height, map_name, collision_tiles,", "collision_tiles, collision_down_tiles): def write_collision_tiles(collision_tiles): if len(collision_tiles) == 0: return \"0\" return \"{\" +", "name + \"_tiles_data[] = {\\n\\ \" + write_tiles() + \"};\\n\\ \\n\\ const unsigned", "# write map with open(outfile + \".b\" + bank + \".c\", \"w\") as", "+ name + \"_tile_palettes[] = {\\n\\ \" + write_palettes() + \"\\n\\ };\\n\\ \\n\\", "\" + palette_or_zero(5) + \",\\n\\ \" + palette_or_zero(6) + \",\\n\\ \" + palette_or_zero(7)", "\", //width\\n\\ \" + map_height + \", //height\\n\\ 0, //attributes\\n\\ &\" + map_name", "CODE;\\n\\ \\n\\ const unsigned char \" + name + \"_tiles_data[] = {\\n\\ \"", "+ \"_palette_H\\n\\ #include \\\"types.h\\\"\\n\\ extern UINT16 \" + name + \"_palette;\\n\\ #endif\\n\\ \")", "raise ValueError(\"More than 8 palettes given!\") with open(outfile + \"_palette.b\" + bank +", "+ palette_or_zero(4) + \",\\n\\ \" + palette_or_zero(5) + \",\\n\\ \" + palette_or_zero(6) +", "out def write_palettes(): return \", \".join([hex(tile[\"palette_idx\"]) for tile in tiles_data]) # write tiledata", "\\n\\ };\\n\\ #include \\\"\" + map_name + \"_tiles.h\\\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ const", "tiledata header with open(outfile + \"_palette.h\", \"w\") as file: file.write(\"\\ #ifndef PALETTE_\" +", "def write_tiles(): out = \"\" for tile in tiles_data: out += \" //", "name + \"_tiles_data, //tiles\\n\\ \" + name + \"_tile_palettes, //CGB palette\\n\\ };\\n\\ CODE", "\\n\\ #include \\\"TilesInfo.h\\\"\\n\\ const struct TilesInfoInternal \" + name + \"_tiles_internal = {\\n\\", "+ \"_palette[] = {\\n\\ \" + palette_or_zero(0) + \",\\n\\ \" + palette_or_zero(1) +", "\" + bank + \", //bank\\n\\ &\" + name + \"_tiles_internal, //data\\n\\ };\\n\\", "\" = {\\n\\ 3, //bank\\n\\ &\" + map_name + \"_internal, //data\\n\\ };\\n\\ CODE", "+ \", \".join(str(v) for v in collision_tiles) + \", 0}\" # write map", "\" + map_height + \"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ extern unsigned char bank_\"", "write_tiledata(outfile, bank, tiles_data, name): def write_tiles(): out = \"\" for tile in tiles_data:", "{\\n\\ \" + map_name + \"_map, //map\\n\\ \" + map_width + \", //width\\n\\", "MapInfo \" + map_name + \";\\n\\ extern UINT8 \" + map_name + \"_collision_tiles;\\n\\", "write tiledata header with open(outfile + \"_tiles.h\", \"w\") as file: file.write(\"\\ #ifndef TILES_\"", "\\\"types.h\\\"\\n\\ extern unsigned char bank_\" + map_name + \";\\n\\ extern struct MapInfo \"", "+ name + \"_tiles_data[] = {\\n\\ \" + write_tiles() + \"};\\n\\ \\n\\ const", "\".join(str(v) for v in collision_tiles) + \", 0}\" # write map with open(outfile", "unsigned char \" + name + \"_tiles_data[] = {\\n\\ \" + write_tiles() +", "\" + map_name + \"_collision_down_tiles;\\n\\ #endif\\n\\ \") def write_palette_data(outfile, bank, name, palettes): def", "file: file.write(\"\\ #pragma bank \" + bank + \"\\n\\ \\n\\ void empty(void) __nonbanked", "palette_or_zero(0) + \",\\n\\ \" + palette_or_zero(1) + \",\\n\\ \" + palette_or_zero(2) + \",\\n\\", "struct MapInfo \" + map_name + \";\\n\\ extern UINT8 \" + map_name +", "const struct MapInfoInternal \" + map_name + \"_internal = {\\n\\ \" + map_name", "\" + map_name + \"_collision_down_tiles[] = \" + write_collision_tiles(collision_down_tiles) + \";\\n\\ \\n\\ \")", "};\\n\\ CODE UINT8 \" + map_name + \"_collision_tiles[] = \" + write_collision_tiles(collision_tiles) +", "#include \\\"TilesInfo.h\\\"\\n\\ extern struct TilesInfo \" + name + \"_tiles;\\n\\ #endif\\n\\ \") def", "return \"0x0000, 0x0000, 0x0000, 0x0000\" if len(palettes) > 8: raise ValueError(\"More than 8", "def palette_or_zero(idx): try: return \", \".join([hex(v).rjust(6, \" \") for v in palettes[idx]]) except:", "TILES_\" + name + \"_tiles_H\\n\\ #define TILES_\" + name + \"_tiles_H\\n\\ #include \\\"TilesInfo.h\\\"\\n\\", "+ map_name + \"_internal = {\\n\\ \" + map_name + \"_map, //map\\n\\ \"", "+ map_name + \"_tiles, //tiles info\\n\\ };\\n\\ CODE struct MapInfo \" + map_name", "TilesInfo \" + name + \"_tiles;\\n\\ #endif\\n\\ \") def write_mapdata( outfile, bank, map_data,map_width,", "+ \";\\n\\ \\n\\ \") # write map header with open(outfile + \".h\", \"w\")", "+ \"_H\\n\\ #define mapWidth \" + map_width + \"\\n\\ #define mapHeight \" +", "palette_or_zero(1) + \",\\n\\ \" + palette_or_zero(2) + \",\\n\\ \" + palette_or_zero(3) + \",\\n\\", "\"_tiles;\\n\\ #endif\\n\\ \") def write_mapdata( outfile, bank, map_data,map_width, map_height, map_name, collision_tiles, collision_down_tiles): def", "\" + str(len(tiles_data)) + \", //num_tiles\\n\\ \" + name + \"_tiles_data, //tiles\\n\\ \"", "palette_or_zero(idx): try: return \", \".join([hex(v).rjust(6, \" \") for v in palettes[idx]]) except: return", "\"_tiles_internal, //data\\n\\ };\\n\\ \") # write tiledata header with open(outfile + \"_tiles.h\", \"w\")", "+ \"_collision_tiles[] = \" + write_collision_tiles(collision_tiles) + \";\\n\\ CODE UINT8 \" + map_name", "TilesInfo \" + name + \"_tiles = {\\n\\ \" + bank + \",", "+ \" \\n\\ };\\n\\ #include \\\"\" + map_name + \"_tiles.h\\\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include", "palettes given!\") with open(outfile + \"_palette.b\" + bank + \".c\", \"w\") as file:", "#include \\\"types.h\\\"\\n\\ \\n\\ CODE UINT16 \" + name + \"_palette[] = {\\n\\ \"", "def write_mapdata( outfile, bank, map_data,map_width, map_height, map_name, collision_tiles, collision_down_tiles): def write_collision_tiles(collision_tiles): if len(collision_tiles)", "\"0\" return \"{\" + \", \".join(str(v) for v in collision_tiles) + \", 0}\"", "+ \"_tiles_H\\n\\ #define TILES_\" + name + \"_tiles_H\\n\\ #include \\\"TilesInfo.h\\\"\\n\\ extern struct TilesInfo", "= {\\n\\ \" + palette_or_zero(0) + \",\\n\\ \" + palette_or_zero(1) + \",\\n\\ \"", "8, //height\\n\\ \" + str(len(tiles_data)) + \", //num_tiles\\n\\ \" + name + \"_tiles_data,", "+ \".h\", \"w\") as file: file.write(\"\\ #ifndef MAP_\" + map_name + \"_H\\n\\ #define", "= \" + write_collision_tiles(collision_tiles) + \";\\n\\ CODE UINT8 \" + map_name + \"_collision_down_tiles[]", "char bank_\" + map_name + \";\\n\\ extern struct MapInfo \" + map_name +", "const CODE;\\n\\ \\n\\ #include \\\"types.h\\\"\\n\\ \\n\\ CODE UINT16 \" + name + \"_palette[]", "char \" + name + \"_tile_palettes[] = {\\n\\ \" + write_palettes() + \"\\n\\", "+ write_collision_tiles(collision_tiles) + \";\\n\\ CODE UINT8 \" + map_name + \"_collision_down_tiles[] = \"", "#include \\\"\" + map_name + \"_tiles.h\\\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ const struct MapInfoInternal", "const CODE;\\n\\ \\n\\ const unsigned char \" + name + \"_tiles_data[] = {\\n\\", "//data\\n\\ };\\n\\ CODE UINT8 \" + map_name + \"_collision_tiles[] = \" + write_collision_tiles(collision_tiles)", "+ \",\\n\\ \" + palette_or_zero(2) + \",\\n\\ \" + palette_or_zero(3) + \",\\n\\ \"", "name + \"_tiles_H\\n\\ #define TILES_\" + name + \"_tiles_H\\n\\ #include \\\"TilesInfo.h\\\"\\n\\ extern struct", "+ \",\".join(tile[\"hexdata\"]) + \",\\n\" return out def write_palettes(): return \", \".join([hex(tile[\"palette_idx\"]) for tile", "{\\n\\ \" + bank + \", //bank\\n\\ &\" + name + \"_tiles_internal, //data\\n\\", "\\\"TilesInfo.h\\\"\\n\\ extern struct TilesInfo \" + name + \"_tiles;\\n\\ #endif\\n\\ \") def write_mapdata(", "+ map_name + \"_tiles.h\\\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ const struct MapInfoInternal \" +", "map_height + \", //height\\n\\ 0, //attributes\\n\\ &\" + map_name + \"_tiles, //tiles info\\n\\", "file: file.write(\"\\ #ifndef MAP_\" + map_name + \"_H\\n\\ #define MAP_\" + map_name +", "file.write(\"\\ #ifndef MAP_\" + map_name + \"_H\\n\\ #define MAP_\" + map_name + \"_H\\n\\", "palette_or_zero(5) + \",\\n\\ \" + palette_or_zero(6) + \",\\n\\ \" + palette_or_zero(7) + \"\\n\\", "\"w\") as file: file.write(\"\\ #ifndef PALETTE_\" + name + \"_palette_H\\n\\ #define PALETTE_\" +", "open(outfile + \"_palette.h\", \"w\") as file: file.write(\"\\ #ifndef PALETTE_\" + name + \"_palette_H\\n\\", "given!\") with open(outfile + \"_palette.b\" + bank + \".c\", \"w\") as file: file.write(\"\\", "v in palettes[idx]]) except: return \"0x0000, 0x0000, 0x0000, 0x0000\" if len(palettes) > 8:", "CODE UINT8 \" + map_name + \"_collision_tiles[] = \" + write_collision_tiles(collision_tiles) + \";\\n\\", "open(outfile + \"_tiles.b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma bank", "+ \",\\n\\ \" + palette_or_zero(7) + \"\\n\\ };\") # write tiledata header with", "map_height + \"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ extern unsigned char bank_\" + map_name", "than 8 palettes given!\") with open(outfile + \"_palette.b\" + bank + \".c\", \"w\")", "\",\\n\" return out def write_palettes(): return \", \".join([hex(tile[\"palette_idx\"]) for tile in tiles_data]) #", "tiles_data]) # write tiledata with open(outfile + \"_tiles.b\" + bank + \".c\", \"w\")", "return \"{\" + \", \".join(str(v) for v in collision_tiles) + \", 0}\" #", "\" + map_name + \"_internal = {\\n\\ \" + map_name + \"_map, //map\\n\\", "+ \"\\n\\ \\n\\ void empty(void) __nonbanked {}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ #include", "\"{\" + \", \".join(str(v) for v in collision_tiles) + \", 0}\" # write", "{}\\n\\ __addressmod empty const CODE;\\n\\ \\n\\ const unsigned char \" + map_name +", "//CGB palette\\n\\ };\\n\\ CODE struct TilesInfo \" + name + \"_tiles = {\\n\\", "name + \"_tile_palettes, //CGB palette\\n\\ };\\n\\ CODE struct TilesInfo \" + name +", "const unsigned char \" + map_name + \"_map[] = { \\n\\ \" +", "8: raise ValueError(\"More than 8 palettes given!\") with open(outfile + \"_palette.b\" + bank", "name + \"_tiles_H\\n\\ #include \\\"TilesInfo.h\\\"\\n\\ extern struct TilesInfo \" + name + \"_tiles;\\n\\", "struct MapInfoInternal \" + map_name + \"_internal = {\\n\\ \" + map_name +", "\" + name + \"_tiles_internal = {\\n\\ 8, //width\\n\\ 8, //height\\n\\ \" +", "+ \"};\\n\\ \\n\\ const unsigned char \" + name + \"_tile_palettes[] = {\\n\\", "name + \"_tiles_internal, //data\\n\\ };\\n\\ \") # write tiledata header with open(outfile +", "\\\"types.h\\\"\\n\\ const struct MapInfoInternal \" + map_name + \"_internal = {\\n\\ \" +", "+ \"_tiles_internal = {\\n\\ 8, //width\\n\\ 8, //height\\n\\ \" + str(len(tiles_data)) + \",", "0x0000, 0x0000, 0x0000\" if len(palettes) > 8: raise ValueError(\"More than 8 palettes given!\")", "+ \",\\n\\ \" + palette_or_zero(3) + \",\\n\\ \" + palette_or_zero(4) + \",\\n\\ \"", "write tiledata header with open(outfile + \"_palette.h\", \"w\") as file: file.write(\"\\ #ifndef PALETTE_\"", "#endif\\n\\ \") def write_mapdata( outfile, bank, map_data,map_width, map_height, map_name, collision_tiles, collision_down_tiles): def write_collision_tiles(collision_tiles):", "+ map_width + \"\\n\\ #define mapHeight \" + map_height + \"\\n\\ #include \\\"MapInfo.h\\\"\\n\\", "\"_collision_tiles;\\n\\ extern UINT8 \" + map_name + \"_collision_down_tiles;\\n\\ #endif\\n\\ \") def write_palette_data(outfile, bank,", "\\n\\ const unsigned char \" + map_name + \"_map[] = { \\n\\ \"", "# write map header with open(outfile + \".h\", \"w\") as file: file.write(\"\\ #ifndef", "+ \";\\n\\ extern struct MapInfo \" + map_name + \";\\n\\ extern UINT8 \"", "+ map_name + \"_collision_down_tiles;\\n\\ #endif\\n\\ \") def write_palette_data(outfile, bank, name, palettes): def palette_or_zero(idx):", "\";\\n\\ extern struct MapInfo \" + map_name + \";\\n\\ extern UINT8 \" +", "//tiles\\n\\ \" + name + \"_tile_palettes, //CGB palette\\n\\ };\\n\\ CODE struct TilesInfo \"", "\\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ extern unsigned char bank_\" + map_name + \";\\n\\ extern struct", "+ map_name + \"_internal, //data\\n\\ };\\n\\ CODE UINT8 \" + map_name + \"_collision_tiles[]", "return \"0\" return \"{\" + \", \".join(str(v) for v in collision_tiles) + \",", "\\n\\ const unsigned char \" + name + \"_tile_palettes[] = {\\n\\ \" +", "def write_palettes(): return \", \".join([hex(tile[\"palette_idx\"]) for tile in tiles_data]) # write tiledata with", "const unsigned char \" + name + \"_tile_palettes[] = {\\n\\ \" + write_palettes()", "\",\\n\\ \" + palette_or_zero(3) + \",\\n\\ \" + palette_or_zero(4) + \",\\n\\ \" +", "+ map_width + \", //width\\n\\ \" + map_height + \", //height\\n\\ 0, //attributes\\n\\", "TILES_\" + name + \"_tiles_H\\n\\ #include \\\"TilesInfo.h\\\"\\n\\ extern struct TilesInfo \" + name", "bank_\" + map_name + \";\\n\\ extern struct MapInfo \" + map_name + \";\\n\\", "\" + palette_or_zero(3) + \",\\n\\ \" + palette_or_zero(4) + \",\\n\\ \" + palette_or_zero(5)", "+ palette_or_zero(3) + \",\\n\\ \" + palette_or_zero(4) + \",\\n\\ \" + palette_or_zero(5) +", "bank, tiles_data, name): def write_tiles(): out = \"\" for tile in tiles_data: out", "};\") # write tiledata header with open(outfile + \"_palette.h\", \"w\") as file: file.write(\"\\", "&\" + map_name + \"_internal, //data\\n\\ };\\n\\ CODE UINT8 \" + map_name +", "\\n\\ \") # write map header with open(outfile + \".h\", \"w\") as file:", "\", //bank\\n\\ &\" + name + \"_tiles_internal, //data\\n\\ };\\n\\ \") # write tiledata", "extern UINT8 \" + map_name + \"_collision_down_tiles;\\n\\ #endif\\n\\ \") def write_palette_data(outfile, bank, name,", "\"_tiles = {\\n\\ \" + bank + \", //bank\\n\\ &\" + name +", "+ \"_tiles_data[] = {\\n\\ \" + write_tiles() + \"};\\n\\ \\n\\ const unsigned char", "+ \", 0}\" # write map with open(outfile + \".b\" + bank +", "TilesInfoInternal \" + name + \"_tiles_internal = {\\n\\ 8, //width\\n\\ 8, //height\\n\\ \"", "\",\\n\\ \" + palette_or_zero(6) + \",\\n\\ \" + palette_or_zero(7) + \"\\n\\ };\") #", "map_name + \"_internal, //data\\n\\ };\\n\\ CODE UINT8 \" + map_name + \"_collision_tiles[] =", "\"_tiles.h\\\"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ const struct MapInfoInternal \" + map_name + \"_internal", "+ name + \"_tiles_internal, //data\\n\\ };\\n\\ \") # write tiledata header with open(outfile", "bank + \".c\", \"w\") as file: file.write(\"\\ #pragma bank \" + bank +", "palette_or_zero(6) + \",\\n\\ \" + palette_or_zero(7) + \"\\n\\ };\") # write tiledata header", "map_name + \"_collision_down_tiles;\\n\\ #endif\\n\\ \") def write_palette_data(outfile, bank, name, palettes): def palette_or_zero(idx): try:", "+ \"_palette_H\\n\\ #define PALETTE_\" + name + \"_palette_H\\n\\ #include \\\"types.h\\\"\\n\\ extern UINT16 \"", "+ \"_tiles.b\" + bank + \".c\", \"w\") as file: file.write(\"\\ #pragma bank \"", "\",\".join(tile[\"hexdata\"]) + \",\\n\" return out def write_palettes(): return \", \".join([hex(tile[\"palette_idx\"]) for tile in", "+ \"\\n\\ };\\n\\ \\n\\ #include \\\"TilesInfo.h\\\"\\n\\ const struct TilesInfoInternal \" + name +", "+ write_tiles() + \"};\\n\\ \\n\\ const unsigned char \" + name + \"_tile_palettes[]", "#define mapHeight \" + map_height + \"\\n\\ #include \\\"MapInfo.h\\\"\\n\\ #include \\\"types.h\\\"\\n\\ extern unsigned", "CODE;\\n\\ \\n\\ #include \\\"types.h\\\"\\n\\ \\n\\ CODE UINT16 \" + name + \"_palette[] =", "= {\\n\\ \" + write_tiles() + \"};\\n\\ \\n\\ const unsigned char \" +" ]
[ "dependencies = [ ('des', '0032_auto_20210713_2127'), ] operations = [ migrations.AlterField( model_name='astrometryjob', name='status', field=models.IntegerField(choices=[(1,", "14:49 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('des', '0032_auto_20210713_2127'),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('des', '0032_auto_20210713_2127'), ] operations", "operations = [ migrations.AlterField( model_name='astrometryjob', name='status', field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4,", "on 2021-07-22 14:49 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "model_name='astrometryjob', name='status', field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5, 'Aborted'), (6,", "'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'), (7, 'Launched')],", "models class Migration(migrations.Migration): dependencies = [ ('des', '0032_auto_20210713_2127'), ] operations = [ migrations.AlterField(", "2.2.13 on 2021-07-22 14:49 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "] operations = [ migrations.AlterField( model_name='astrometryjob', name='status', field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'),", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('des', '0032_auto_20210713_2127'), ]", "Migration(migrations.Migration): dependencies = [ ('des', '0032_auto_20210713_2127'), ] operations = [ migrations.AlterField( model_name='astrometryjob', name='status',", "'0032_auto_20210713_2127'), ] operations = [ migrations.AlterField( model_name='astrometryjob', name='status', field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3,", "name='status', field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'),", "('des', '0032_auto_20210713_2127'), ] operations = [ migrations.AlterField( model_name='astrometryjob', name='status', field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'),", "[ ('des', '0032_auto_20210713_2127'), ] operations = [ migrations.AlterField( model_name='astrometryjob', name='status', field=models.IntegerField(choices=[(1, 'Idle'), (2,", "Django 2.2.13 on 2021-07-22 14:49 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "'Running'), (3, 'Completed'), (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'), (7, 'Launched')], default=1, verbose_name='Status'),", "migrations, models class Migration(migrations.Migration): dependencies = [ ('des', '0032_auto_20210713_2127'), ] operations = [", "class Migration(migrations.Migration): dependencies = [ ('des', '0032_auto_20210713_2127'), ] operations = [ migrations.AlterField( model_name='astrometryjob',", "2021-07-22 14:49 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('des',", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('des', '0032_auto_20210713_2127'), ] operations =", "migrations.AlterField( model_name='astrometryjob', name='status', field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5, 'Aborted'),", "= [ migrations.AlterField( model_name='astrometryjob', name='status', field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'),", "by Django 2.2.13 on 2021-07-22 14:49 from django.db import migrations, models class Migration(migrations.Migration):", "Generated by Django 2.2.13 on 2021-07-22 14:49 from django.db import migrations, models class", "# Generated by Django 2.2.13 on 2021-07-22 14:49 from django.db import migrations, models", "= [ ('des', '0032_auto_20210713_2127'), ] operations = [ migrations.AlterField( model_name='astrometryjob', name='status', field=models.IntegerField(choices=[(1, 'Idle'),", "field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'), (7,", "'Completed'), (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'), (7, 'Launched')], default=1, verbose_name='Status'), ), ]", "(3, 'Completed'), (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'), (7, 'Launched')], default=1, verbose_name='Status'), ),", "(2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'), (7, 'Launched')], default=1,", "[ migrations.AlterField( model_name='astrometryjob', name='status', field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5," ]
[ "(\"S\", 1), (\"W\", 2)] ) def test_nonzero(nfa, label, expected_nnz): bm = BooleanMatrices(nfa) actual_nnz", "\"label,expected_nnz\", [(\"X\", 2), (\"Y\", 1), (\"Z\", 1), (\"S\", 1), (\"W\", 2)] ) def", "test_symbols(nfa): bm = BooleanMatrices(nfa) actual_symbols = bm.bool_matrices.keys() expected_symbols = nfa.symbols assert actual_symbols ==", "[(2, 3)]), (\"W\", [(3, 4), (4, 0)]), ], ) def test_adjacency(nfa, label, edges):", "3), (3, \"W\", 4), (4, \"W\", 0), ] ) return nfa @pytest.mark.parametrize( \"label,expected_nnz\",", "\"Z\", 1), (2, \"S\", 3), (3, \"W\", 4), (4, \"W\", 0), ] )", "2)] ) def test_nonzero(nfa, label, expected_nnz): bm = BooleanMatrices(nfa) actual_nnz = bm.bool_matrices[label].nnz assert", "(\"Y\", 1), (\"Z\", 1), (\"S\", 1), (\"W\", 2)] ) def test_nonzero(nfa, label, expected_nnz):", "assert actual_nnz == expected_nnz def test_symbols(nfa): bm = BooleanMatrices(nfa) actual_symbols = bm.bool_matrices.keys() expected_symbols", "3)]), (\"W\", [(3, 4), (4, 0)]), ], ) def test_adjacency(nfa, label, edges): bm", "bm = BooleanMatrices(nfa) actual_nnz = bm.bool_matrices[label].nnz assert actual_nnz == expected_nnz def test_symbols(nfa): bm", "actual_symbols = bm.bool_matrices.keys() expected_symbols = nfa.symbols assert actual_symbols == expected_symbols @pytest.mark.parametrize( \"label,edges\", [", "1), (0, 2)]), (\"Y\", [(1, 2)]), (\"Z\", [(1, 1)]), (\"S\", [(2, 3)]), (\"W\",", "1), (\"W\", 2)] ) def test_nonzero(nfa, label, expected_nnz): bm = BooleanMatrices(nfa) actual_nnz =", "\"S\", 3), (3, \"W\", 4), (4, \"W\", 0), ] ) return nfa @pytest.mark.parametrize(", "1), (\"S\", 1), (\"W\", 2)] ) def test_nonzero(nfa, label, expected_nnz): bm = BooleanMatrices(nfa)", "2), (1, \"Y\", 2), (1, \"Z\", 1), (2, \"S\", 3), (3, \"W\", 4),", "NondeterministicFiniteAutomaton from project import BooleanMatrices @pytest.fixture def nfa(): nfa = NondeterministicFiniteAutomaton() nfa.add_transitions( [", "return nfa @pytest.mark.parametrize( \"label,expected_nnz\", [(\"X\", 2), (\"Y\", 1), (\"Z\", 1), (\"S\", 1), (\"W\",", "= BooleanMatrices(nfa) assert all(bm.bool_matrices[label][edge] for edge in edges) def test_transitive_closure(nfa): bm = BooleanMatrices(nfa)", "= BooleanMatrices(nfa) actual_symbols = bm.bool_matrices.keys() expected_symbols = nfa.symbols assert actual_symbols == expected_symbols @pytest.mark.parametrize(", "expected_symbols @pytest.mark.parametrize( \"label,edges\", [ (\"X\", [(0, 1), (0, 2)]), (\"Y\", [(1, 2)]), (\"Z\",", "import NondeterministicFiniteAutomaton from project import BooleanMatrices @pytest.fixture def nfa(): nfa = NondeterministicFiniteAutomaton() nfa.add_transitions(", "actual_symbols == expected_symbols @pytest.mark.parametrize( \"label,edges\", [ (\"X\", [(0, 1), (0, 2)]), (\"Y\", [(1,", "[ (0, \"X\", 1), (0, \"X\", 2), (1, \"Y\", 2), (1, \"Z\", 1),", "2), (1, \"Z\", 1), (2, \"S\", 3), (3, \"W\", 4), (4, \"W\", 0),", "1), (2, \"S\", 3), (3, \"W\", 4), (4, \"W\", 0), ] ) return", "(\"Z\", 1), (\"S\", 1), (\"W\", 2)] ) def test_nonzero(nfa, label, expected_nnz): bm =", "def test_nonzero(nfa, label, expected_nnz): bm = BooleanMatrices(nfa) actual_nnz = bm.bool_matrices[label].nnz assert actual_nnz ==", ") def test_nonzero(nfa, label, expected_nnz): bm = BooleanMatrices(nfa) actual_nnz = bm.bool_matrices[label].nnz assert actual_nnz", "for edge in edges) def test_transitive_closure(nfa): bm = BooleanMatrices(nfa) tc = bm.make_transitive_closure() assert", "(\"X\", [(0, 1), (0, 2)]), (\"Y\", [(1, 2)]), (\"Z\", [(1, 1)]), (\"S\", [(2,", "2)]), (\"Z\", [(1, 1)]), (\"S\", [(2, 3)]), (\"W\", [(3, 4), (4, 0)]), ],", "[(1, 1)]), (\"S\", [(2, 3)]), (\"W\", [(3, 4), (4, 0)]), ], ) def", "bm.bool_matrices.keys() expected_symbols = nfa.symbols assert actual_symbols == expected_symbols @pytest.mark.parametrize( \"label,edges\", [ (\"X\", [(0,", "def test_symbols(nfa): bm = BooleanMatrices(nfa) actual_symbols = bm.bool_matrices.keys() expected_symbols = nfa.symbols assert actual_symbols", "label, edges): bm = BooleanMatrices(nfa) assert all(bm.bool_matrices[label][edge] for edge in edges) def test_transitive_closure(nfa):", ") def test_adjacency(nfa, label, edges): bm = BooleanMatrices(nfa) assert all(bm.bool_matrices[label][edge] for edge in", "1)]), (\"S\", [(2, 3)]), (\"W\", [(3, 4), (4, 0)]), ], ) def test_adjacency(nfa,", "== expected_symbols @pytest.mark.parametrize( \"label,edges\", [ (\"X\", [(0, 1), (0, 2)]), (\"Y\", [(1, 2)]),", "import BooleanMatrices @pytest.fixture def nfa(): nfa = NondeterministicFiniteAutomaton() nfa.add_transitions( [ (0, \"X\", 1),", "all(bm.bool_matrices[label][edge] for edge in edges) def test_transitive_closure(nfa): bm = BooleanMatrices(nfa) tc = bm.make_transitive_closure()", "nfa.symbols assert actual_symbols == expected_symbols @pytest.mark.parametrize( \"label,edges\", [ (\"X\", [(0, 1), (0, 2)]),", "nfa @pytest.mark.parametrize( \"label,expected_nnz\", [(\"X\", 2), (\"Y\", 1), (\"Z\", 1), (\"S\", 1), (\"W\", 2)]", "bm = BooleanMatrices(nfa) actual_symbols = bm.bool_matrices.keys() expected_symbols = nfa.symbols assert actual_symbols == expected_symbols", "project import BooleanMatrices @pytest.fixture def nfa(): nfa = NondeterministicFiniteAutomaton() nfa.add_transitions( [ (0, \"X\",", "(1, \"Z\", 1), (2, \"S\", 3), (3, \"W\", 4), (4, \"W\", 0), ]", "nfa = NondeterministicFiniteAutomaton() nfa.add_transitions( [ (0, \"X\", 1), (0, \"X\", 2), (1, \"Y\",", "expected_symbols = nfa.symbols assert actual_symbols == expected_symbols @pytest.mark.parametrize( \"label,edges\", [ (\"X\", [(0, 1),", "(2, \"S\", 3), (3, \"W\", 4), (4, \"W\", 0), ] ) return nfa", "nfa.add_transitions( [ (0, \"X\", 1), (0, \"X\", 2), (1, \"Y\", 2), (1, \"Z\",", "(0, 2)]), (\"Y\", [(1, 2)]), (\"Z\", [(1, 1)]), (\"S\", [(2, 3)]), (\"W\", [(3,", "BooleanMatrices @pytest.fixture def nfa(): nfa = NondeterministicFiniteAutomaton() nfa.add_transitions( [ (0, \"X\", 1), (0,", "(0, \"X\", 2), (1, \"Y\", 2), (1, \"Z\", 1), (2, \"S\", 3), (3,", "] ) return nfa @pytest.mark.parametrize( \"label,expected_nnz\", [(\"X\", 2), (\"Y\", 1), (\"Z\", 1), (\"S\",", "(4, \"W\", 0), ] ) return nfa @pytest.mark.parametrize( \"label,expected_nnz\", [(\"X\", 2), (\"Y\", 1),", "\"label,edges\", [ (\"X\", [(0, 1), (0, 2)]), (\"Y\", [(1, 2)]), (\"Z\", [(1, 1)]),", "\"X\", 2), (1, \"Y\", 2), (1, \"Z\", 1), (2, \"S\", 3), (3, \"W\",", "@pytest.mark.parametrize( \"label,expected_nnz\", [(\"X\", 2), (\"Y\", 1), (\"Z\", 1), (\"S\", 1), (\"W\", 2)] )", "actual_nnz == expected_nnz def test_symbols(nfa): bm = BooleanMatrices(nfa) actual_symbols = bm.bool_matrices.keys() expected_symbols =", "2), (\"Y\", 1), (\"Z\", 1), (\"S\", 1), (\"W\", 2)] ) def test_nonzero(nfa, label,", "2)]), (\"Y\", [(1, 2)]), (\"Z\", [(1, 1)]), (\"S\", [(2, 3)]), (\"W\", [(3, 4),", "expected_nnz def test_symbols(nfa): bm = BooleanMatrices(nfa) actual_symbols = bm.bool_matrices.keys() expected_symbols = nfa.symbols assert", "[(0, 1), (0, 2)]), (\"Y\", [(1, 2)]), (\"Z\", [(1, 1)]), (\"S\", [(2, 3)]),", "== expected_nnz def test_symbols(nfa): bm = BooleanMatrices(nfa) actual_symbols = bm.bool_matrices.keys() expected_symbols = nfa.symbols", "actual_nnz = bm.bool_matrices[label].nnz assert actual_nnz == expected_nnz def test_symbols(nfa): bm = BooleanMatrices(nfa) actual_symbols", "import pytest from pyformlang.finite_automaton import NondeterministicFiniteAutomaton from project import BooleanMatrices @pytest.fixture def nfa():", "[(\"X\", 2), (\"Y\", 1), (\"Z\", 1), (\"S\", 1), (\"W\", 2)] ) def test_nonzero(nfa,", "\"W\", 4), (4, \"W\", 0), ] ) return nfa @pytest.mark.parametrize( \"label,expected_nnz\", [(\"X\", 2),", ") return nfa @pytest.mark.parametrize( \"label,expected_nnz\", [(\"X\", 2), (\"Y\", 1), (\"Z\", 1), (\"S\", 1),", "(\"S\", [(2, 3)]), (\"W\", [(3, 4), (4, 0)]), ], ) def test_adjacency(nfa, label,", "[(1, 2)]), (\"Z\", [(1, 1)]), (\"S\", [(2, 3)]), (\"W\", [(3, 4), (4, 0)]),", "BooleanMatrices(nfa) actual_symbols = bm.bool_matrices.keys() expected_symbols = nfa.symbols assert actual_symbols == expected_symbols @pytest.mark.parametrize( \"label,edges\",", "4), (4, \"W\", 0), ] ) return nfa @pytest.mark.parametrize( \"label,expected_nnz\", [(\"X\", 2), (\"Y\",", "= NondeterministicFiniteAutomaton() nfa.add_transitions( [ (0, \"X\", 1), (0, \"X\", 2), (1, \"Y\", 2),", "0), ] ) return nfa @pytest.mark.parametrize( \"label,expected_nnz\", [(\"X\", 2), (\"Y\", 1), (\"Z\", 1),", "1), (0, \"X\", 2), (1, \"Y\", 2), (1, \"Z\", 1), (2, \"S\", 3),", "@pytest.mark.parametrize( \"label,edges\", [ (\"X\", [(0, 1), (0, 2)]), (\"Y\", [(1, 2)]), (\"Z\", [(1,", "\"W\", 0), ] ) return nfa @pytest.mark.parametrize( \"label,expected_nnz\", [(\"X\", 2), (\"Y\", 1), (\"Z\",", "label, expected_nnz): bm = BooleanMatrices(nfa) actual_nnz = bm.bool_matrices[label].nnz assert actual_nnz == expected_nnz def", "def test_adjacency(nfa, label, edges): bm = BooleanMatrices(nfa) assert all(bm.bool_matrices[label][edge] for edge in edges)", "0)]), ], ) def test_adjacency(nfa, label, edges): bm = BooleanMatrices(nfa) assert all(bm.bool_matrices[label][edge] for", "(\"Y\", [(1, 2)]), (\"Z\", [(1, 1)]), (\"S\", [(2, 3)]), (\"W\", [(3, 4), (4,", "@pytest.fixture def nfa(): nfa = NondeterministicFiniteAutomaton() nfa.add_transitions( [ (0, \"X\", 1), (0, \"X\",", "BooleanMatrices(nfa) assert all(bm.bool_matrices[label][edge] for edge in edges) def test_transitive_closure(nfa): bm = BooleanMatrices(nfa) tc", "def nfa(): nfa = NondeterministicFiniteAutomaton() nfa.add_transitions( [ (0, \"X\", 1), (0, \"X\", 2),", "edges): bm = BooleanMatrices(nfa) assert all(bm.bool_matrices[label][edge] for edge in edges) def test_transitive_closure(nfa): bm", "= BooleanMatrices(nfa) actual_nnz = bm.bool_matrices[label].nnz assert actual_nnz == expected_nnz def test_symbols(nfa): bm =", "= nfa.symbols assert actual_symbols == expected_symbols @pytest.mark.parametrize( \"label,edges\", [ (\"X\", [(0, 1), (0,", "from pyformlang.finite_automaton import NondeterministicFiniteAutomaton from project import BooleanMatrices @pytest.fixture def nfa(): nfa =", "(\"Z\", [(1, 1)]), (\"S\", [(2, 3)]), (\"W\", [(3, 4), (4, 0)]), ], )", "in edges) def test_transitive_closure(nfa): bm = BooleanMatrices(nfa) tc = bm.make_transitive_closure() assert tc.sum() ==", "(\"W\", [(3, 4), (4, 0)]), ], ) def test_adjacency(nfa, label, edges): bm =", "(4, 0)]), ], ) def test_adjacency(nfa, label, edges): bm = BooleanMatrices(nfa) assert all(bm.bool_matrices[label][edge]", "assert actual_symbols == expected_symbols @pytest.mark.parametrize( \"label,edges\", [ (\"X\", [(0, 1), (0, 2)]), (\"Y\",", "test_nonzero(nfa, label, expected_nnz): bm = BooleanMatrices(nfa) actual_nnz = bm.bool_matrices[label].nnz assert actual_nnz == expected_nnz", "[(3, 4), (4, 0)]), ], ) def test_adjacency(nfa, label, edges): bm = BooleanMatrices(nfa)", "test_adjacency(nfa, label, edges): bm = BooleanMatrices(nfa) assert all(bm.bool_matrices[label][edge] for edge in edges) def", "NondeterministicFiniteAutomaton() nfa.add_transitions( [ (0, \"X\", 1), (0, \"X\", 2), (1, \"Y\", 2), (1,", "bm = BooleanMatrices(nfa) assert all(bm.bool_matrices[label][edge] for edge in edges) def test_transitive_closure(nfa): bm =", "(3, \"W\", 4), (4, \"W\", 0), ] ) return nfa @pytest.mark.parametrize( \"label,expected_nnz\", [(\"X\",", "(\"W\", 2)] ) def test_nonzero(nfa, label, expected_nnz): bm = BooleanMatrices(nfa) actual_nnz = bm.bool_matrices[label].nnz", "= bm.bool_matrices[label].nnz assert actual_nnz == expected_nnz def test_symbols(nfa): bm = BooleanMatrices(nfa) actual_symbols =", "], ) def test_adjacency(nfa, label, edges): bm = BooleanMatrices(nfa) assert all(bm.bool_matrices[label][edge] for edge", "edges) def test_transitive_closure(nfa): bm = BooleanMatrices(nfa) tc = bm.make_transitive_closure() assert tc.sum() == tc.size", "(0, \"X\", 1), (0, \"X\", 2), (1, \"Y\", 2), (1, \"Z\", 1), (2,", "pytest from pyformlang.finite_automaton import NondeterministicFiniteAutomaton from project import BooleanMatrices @pytest.fixture def nfa(): nfa", "from project import BooleanMatrices @pytest.fixture def nfa(): nfa = NondeterministicFiniteAutomaton() nfa.add_transitions( [ (0,", "bm.bool_matrices[label].nnz assert actual_nnz == expected_nnz def test_symbols(nfa): bm = BooleanMatrices(nfa) actual_symbols = bm.bool_matrices.keys()", "edge in edges) def test_transitive_closure(nfa): bm = BooleanMatrices(nfa) tc = bm.make_transitive_closure() assert tc.sum()", "\"X\", 1), (0, \"X\", 2), (1, \"Y\", 2), (1, \"Z\", 1), (2, \"S\",", "nfa(): nfa = NondeterministicFiniteAutomaton() nfa.add_transitions( [ (0, \"X\", 1), (0, \"X\", 2), (1,", "= bm.bool_matrices.keys() expected_symbols = nfa.symbols assert actual_symbols == expected_symbols @pytest.mark.parametrize( \"label,edges\", [ (\"X\",", "assert all(bm.bool_matrices[label][edge] for edge in edges) def test_transitive_closure(nfa): bm = BooleanMatrices(nfa) tc =", "1), (\"Z\", 1), (\"S\", 1), (\"W\", 2)] ) def test_nonzero(nfa, label, expected_nnz): bm", "pyformlang.finite_automaton import NondeterministicFiniteAutomaton from project import BooleanMatrices @pytest.fixture def nfa(): nfa = NondeterministicFiniteAutomaton()", "\"Y\", 2), (1, \"Z\", 1), (2, \"S\", 3), (3, \"W\", 4), (4, \"W\",", "BooleanMatrices(nfa) actual_nnz = bm.bool_matrices[label].nnz assert actual_nnz == expected_nnz def test_symbols(nfa): bm = BooleanMatrices(nfa)", "[ (\"X\", [(0, 1), (0, 2)]), (\"Y\", [(1, 2)]), (\"Z\", [(1, 1)]), (\"S\",", "4), (4, 0)]), ], ) def test_adjacency(nfa, label, edges): bm = BooleanMatrices(nfa) assert", "(1, \"Y\", 2), (1, \"Z\", 1), (2, \"S\", 3), (3, \"W\", 4), (4,", "expected_nnz): bm = BooleanMatrices(nfa) actual_nnz = bm.bool_matrices[label].nnz assert actual_nnz == expected_nnz def test_symbols(nfa):" ]
[ "# -*- coding: UTF-8 -*- class Solution(object): def test76(self): ''' 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n ''' return", "#!/usr/local/bin/python3 # -*- coding: UTF-8 -*- class Solution(object): def test76(self): ''' 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n '''", "UTF-8 -*- class Solution(object): def test76(self): ''' 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n ''' return \"\" if __name__", "-*- coding: UTF-8 -*- class Solution(object): def test76(self): ''' 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n ''' return \"\"", "class Solution(object): def test76(self): ''' 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n ''' return \"\" if __name__ == \"__main__\":", "Solution(object): def test76(self): ''' 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n ''' return \"\" if __name__ == \"__main__\": solution", "def test76(self): ''' 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n ''' return \"\" if __name__ == \"__main__\": solution =", "test76(self): ''' 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n ''' return \"\" if __name__ == \"__main__\": solution = Solution()", "-*- class Solution(object): def test76(self): ''' 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n ''' return \"\" if __name__ ==", "''' 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n ''' return \"\" if __name__ == \"__main__\": solution = Solution() solution.test76()", "coding: UTF-8 -*- class Solution(object): def test76(self): ''' 题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n ''' return \"\" if" ]
[ "@click.pass_obj def docs(obj): \"\"\"Open the online documentation in a web browser.\"\"\" browser.open(config.documentation_url()) cli.add_command(auth)", "endpoint): \"\"\"ElectricDB - Low latency database hosting.\"\"\" ctx.obj = NotImplemented @cli.command() @click.pass_obj def", "auth # from .resources.account import account # from .resources.database import database from .", "from . import browser from . import config @click.group(cls=click.Group) @click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(),", "@cli.command() @click.pass_obj def docs(obj): \"\"\"Open the online documentation in a web browser.\"\"\" browser.open(config.documentation_url())", "\"\"\"Open the online documentation in a web browser.\"\"\" browser.open(config.documentation_url()) cli.add_command(auth) # cli.add_command(account) #", "\"\"\" import click from .resources.auth import auth # from .resources.account import account #", "line interface utility. Run `electric --help` for usage. \"\"\" import click from .resources.auth", "def docs(obj): \"\"\"Open the online documentation in a web browser.\"\"\" browser.open(config.documentation_url()) cli.add_command(auth) #", "docs(obj): \"\"\"Open the online documentation in a web browser.\"\"\" browser.open(config.documentation_url()) cli.add_command(auth) # cli.add_command(account)", ". import browser from . import config @click.group(cls=click.Group) @click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(), show_default=True,", "database from . import browser from . import config @click.group(cls=click.Group) @click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT',", "show_default=True, help='Web service API endpoint.') @click.pass_context def cli(ctx, endpoint): \"\"\"ElectricDB - Low latency", ".resources.auth import auth # from .resources.account import account # from .resources.database import database", "interface utility. Run `electric --help` for usage. \"\"\" import click from .resources.auth import", "config @click.group(cls=click.Group) @click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(), show_default=True, help='Web service API endpoint.') @click.pass_context def", "Low latency database hosting.\"\"\" ctx.obj = NotImplemented @cli.command() @click.pass_obj def docs(obj): \"\"\"Open the", "from .resources.database import database from . import browser from . import config @click.group(cls=click.Group)", "@click.pass_context def cli(ctx, endpoint): \"\"\"ElectricDB - Low latency database hosting.\"\"\" ctx.obj = NotImplemented", "usage. \"\"\" import click from .resources.auth import auth # from .resources.account import account", "envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(), show_default=True, help='Web service API endpoint.') @click.pass_context def cli(ctx, endpoint): \"\"\"ElectricDB -", "latency database hosting.\"\"\" ctx.obj = NotImplemented @cli.command() @click.pass_obj def docs(obj): \"\"\"Open the online", "hosting.\"\"\" ctx.obj = NotImplemented @cli.command() @click.pass_obj def docs(obj): \"\"\"Open the online documentation in", "click from .resources.auth import auth # from .resources.account import account # from .resources.database", "from .resources.auth import auth # from .resources.account import account # from .resources.database import", "endpoint.') @click.pass_context def cli(ctx, endpoint): \"\"\"ElectricDB - Low latency database hosting.\"\"\" ctx.obj =", "import account # from .resources.database import database from . import browser from .", "the online documentation in a web browser.\"\"\" browser.open(config.documentation_url()) cli.add_command(auth) # cli.add_command(account) # cli.add_command(database)", "from . import config @click.group(cls=click.Group) @click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(), show_default=True, help='Web service API", "\"\"\"ElectricDB - Low latency database hosting.\"\"\" ctx.obj = NotImplemented @cli.command() @click.pass_obj def docs(obj):", "# from .resources.database import database from . import browser from . import config", "import auth # from .resources.account import account # from .resources.database import database from", "import database from . import browser from . import config @click.group(cls=click.Group) @click.option('--endpoint', metavar='URL',", "utility. Run `electric --help` for usage. \"\"\" import click from .resources.auth import auth", "`electric --help` for usage. \"\"\" import click from .resources.auth import auth # from", "= NotImplemented @cli.command() @click.pass_obj def docs(obj): \"\"\"Open the online documentation in a web", "help='Web service API endpoint.') @click.pass_context def cli(ctx, endpoint): \"\"\"ElectricDB - Low latency database", "account # from .resources.database import database from . import browser from . import", "command line interface utility. Run `electric --help` for usage. \"\"\" import click from", "database hosting.\"\"\" ctx.obj = NotImplemented @cli.command() @click.pass_obj def docs(obj): \"\"\"Open the online documentation", "for usage. \"\"\" import click from .resources.auth import auth # from .resources.account import", "Run `electric --help` for usage. \"\"\" import click from .resources.auth import auth #", "import config @click.group(cls=click.Group) @click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(), show_default=True, help='Web service API endpoint.') @click.pass_context", "@click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(), show_default=True, help='Web service API endpoint.') @click.pass_context def cli(ctx, endpoint):", "def cli(ctx, endpoint): \"\"\"ElectricDB - Low latency database hosting.\"\"\" ctx.obj = NotImplemented @cli.command()", "API endpoint.') @click.pass_context def cli(ctx, endpoint): \"\"\"ElectricDB - Low latency database hosting.\"\"\" ctx.obj", "browser from . import config @click.group(cls=click.Group) @click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(), show_default=True, help='Web service", "@click.group(cls=click.Group) @click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(), show_default=True, help='Web service API endpoint.') @click.pass_context def cli(ctx,", "NotImplemented @cli.command() @click.pass_obj def docs(obj): \"\"\"Open the online documentation in a web browser.\"\"\"", "import browser from . import config @click.group(cls=click.Group) @click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(), show_default=True, help='Web", "--help` for usage. \"\"\" import click from .resources.auth import auth # from .resources.account", ".resources.database import database from . import browser from . import config @click.group(cls=click.Group) @click.option('--endpoint',", "\"\"\"ElectricDB command line interface utility. Run `electric --help` for usage. \"\"\" import click", "metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(), show_default=True, help='Web service API endpoint.') @click.pass_context def cli(ctx, endpoint): \"\"\"ElectricDB", "default=config.default_endpoint(), show_default=True, help='Web service API endpoint.') @click.pass_context def cli(ctx, endpoint): \"\"\"ElectricDB - Low", "cli(ctx, endpoint): \"\"\"ElectricDB - Low latency database hosting.\"\"\" ctx.obj = NotImplemented @cli.command() @click.pass_obj", "- Low latency database hosting.\"\"\" ctx.obj = NotImplemented @cli.command() @click.pass_obj def docs(obj): \"\"\"Open", "from .resources.account import account # from .resources.database import database from . import browser", "# from .resources.account import account # from .resources.database import database from . import", "<filename>src/electric/main.py \"\"\"ElectricDB command line interface utility. Run `electric --help` for usage. \"\"\" import", "import click from .resources.auth import auth # from .resources.account import account # from", ". import config @click.group(cls=click.Group) @click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT', default=config.default_endpoint(), show_default=True, help='Web service API endpoint.')", "service API endpoint.') @click.pass_context def cli(ctx, endpoint): \"\"\"ElectricDB - Low latency database hosting.\"\"\"", "ctx.obj = NotImplemented @cli.command() @click.pass_obj def docs(obj): \"\"\"Open the online documentation in a", ".resources.account import account # from .resources.database import database from . import browser from" ]
[ "reverse_lazy(\"login\") class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either a superuser or", "login_url for mixin defaults.\"\"\" login_url = reverse_lazy(\"login\") class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a", "django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.urls import reverse_lazy class LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets basic login_url", "django.urls import reverse_lazy class LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets basic login_url for mixin defaults.\"\"\" login_url =", "test_func(self): return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists() ]) class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user", "class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either a superuser or in", "user is either a superuser or in the delete group.\"\"\" def test_func(self): return", "any([ self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists() ]) class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either", "LoginRequiredMixin, UserPassesTestMixin from django.urls import reverse_lazy class LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets basic login_url for mixin", "is either a superuser or in the delete group.\"\"\" def test_func(self): return any([", "in the edit group.\"\"\" def test_func(self): return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists() ]) class DeleteMixin(LoggedInMixinDefaults,", "reverse_lazy class LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets basic login_url for mixin defaults.\"\"\" login_url = reverse_lazy(\"login\") class", "a user is either a superuser or in the delete group.\"\"\" def test_func(self):", "from django.urls import reverse_lazy class LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets basic login_url for mixin defaults.\"\"\" login_url", "defaults.\"\"\" login_url = reverse_lazy(\"login\") class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either", "a superuser or in the delete group.\"\"\" def test_func(self): return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='delete').exists()", "a user is either a superuser or in the edit group.\"\"\" def test_func(self):", "group.\"\"\" def test_func(self): return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists() ]) class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether", "whether a user is either a superuser or in the edit group.\"\"\" def", "import reverse_lazy class LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets basic login_url for mixin defaults.\"\"\" login_url = reverse_lazy(\"login\")", "either a superuser or in the delete group.\"\"\" def test_func(self): return any([ self.request.user.is_superuser,", "from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.urls import reverse_lazy class LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets basic", "the edit group.\"\"\" def test_func(self): return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists() ]) class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin):", "<gh_stars>0 from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.urls import reverse_lazy class LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets", "\"\"\"Checks whether a user is either a superuser or in the edit group.\"\"\"", "class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either a superuser or in", "import LoginRequiredMixin, UserPassesTestMixin from django.urls import reverse_lazy class LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets basic login_url for", "UserPassesTestMixin): \"\"\"Checks whether a user is either a superuser or in the delete", "class LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets basic login_url for mixin defaults.\"\"\" login_url = reverse_lazy(\"login\") class EditMixin(LoggedInMixinDefaults,", "def test_func(self): return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists() ]) class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a", "EditMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either a superuser or in the", "]) class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either a superuser or", "user is either a superuser or in the edit group.\"\"\" def test_func(self): return", "or in the edit group.\"\"\" def test_func(self): return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists() ]) class", "superuser or in the delete group.\"\"\" def test_func(self): return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='delete').exists() ])", "self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists() ]) class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either a", "is either a superuser or in the edit group.\"\"\" def test_func(self): return any([", "LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets basic login_url for mixin defaults.\"\"\" login_url = reverse_lazy(\"login\") class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin):", "self.request.user.groups.filter(name='edit').exists() ]) class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either a superuser", "either a superuser or in the edit group.\"\"\" def test_func(self): return any([ self.request.user.is_superuser,", "whether a user is either a superuser or in the delete group.\"\"\" def", "edit group.\"\"\" def test_func(self): return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists() ]) class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks", "\"\"\"Sets basic login_url for mixin defaults.\"\"\" login_url = reverse_lazy(\"login\") class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks", "mixin defaults.\"\"\" login_url = reverse_lazy(\"login\") class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is", "UserPassesTestMixin from django.urls import reverse_lazy class LoggedInMixinDefaults(LoginRequiredMixin): \"\"\"Sets basic login_url for mixin defaults.\"\"\"", "= reverse_lazy(\"login\") class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either a superuser", "a superuser or in the edit group.\"\"\" def test_func(self): return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists()", "\"\"\"Checks whether a user is either a superuser or in the delete group.\"\"\"", "UserPassesTestMixin): \"\"\"Checks whether a user is either a superuser or in the edit", "login_url = reverse_lazy(\"login\") class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either a", "return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists() ]) class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is", "basic login_url for mixin defaults.\"\"\" login_url = reverse_lazy(\"login\") class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether", "DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user is either a superuser or in the", "superuser or in the edit group.\"\"\" def test_func(self): return any([ self.request.user.is_superuser, self.request.user.groups.filter(name='edit').exists() ])", "for mixin defaults.\"\"\" login_url = reverse_lazy(\"login\") class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin): \"\"\"Checks whether a user" ]
[ "= QGridLayout() main_layout.addWidget(NameLabel('所持金'), 0, 0, 1, 1) main_layout.addWidget(self['所持金'], 0, 1, 1, 1) main_layout.addWidget(NameLabel('奖励EX'),", "utf-8 -*- from PySide6.QtWidgets import QGridLayout, QTextEdit from widget import BackgroundFrame, ValueSpin, NameLabel", "parameter import DataSetting # noinspection PyTypeChecker class Other(BackgroundFrame): def __init__(self, parent): BackgroundFrame.__init__(self, parent)", "= ValueSpin(None, value=DataSetting()['所持金']) self['奖励EX'] = ValueSpin(None, value=DataSetting()['奖励EX']) self['所持金'].set_parent(None) self['奖励EX'].set_parent(None) text_edit = QTextEdit() text_edit.setHtml('''", "0, 0, 1, 1) main_layout.addWidget(self['所持金'], 0, 1, 1, 1) main_layout.addWidget(NameLabel('奖励EX'), 1, 0, 1,", "= ValueSpin(None, value=DataSetting()['奖励EX']) self['所持金'].set_parent(None) self['奖励EX'].set_parent(None) text_edit = QTextEdit() text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br>", "main_layout.addWidget(NameLabel('奖励EX'), 1, 0, 1, 1) main_layout.addWidget(self['奖励EX'], 1, 1, 1, 1) main_layout.addWidget(text_edit, 2, 0,", "self['所持金'] = ValueSpin(None, value=DataSetting()['所持金']) self['奖励EX'] = ValueSpin(None, value=DataSetting()['奖励EX']) self['所持金'].set_parent(None) self['奖励EX'].set_parent(None) text_edit = QTextEdit()", "QTextEdit() text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br> <i>created by", "__init__(self, parent): BackgroundFrame.__init__(self, parent) self['所持金'] = ValueSpin(None, value=DataSetting()['所持金']) self['奖励EX'] = ValueSpin(None, value=DataSetting()['奖励EX']) self['所持金'].set_parent(None)", "2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br> <i>created by Hamano0813</i> ''') text_edit.setReadOnly(True) main_layout", "<b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br> <i>created by Hamano0813</i> ''')", "BackgroundFrame, ValueSpin, NameLabel from parameter import DataSetting # noinspection PyTypeChecker class Other(BackgroundFrame): def", "PySide6.QtWidgets import QGridLayout, QTextEdit from widget import BackgroundFrame, ValueSpin, NameLabel from parameter import", "widget import BackgroundFrame, ValueSpin, NameLabel from parameter import DataSetting # noinspection PyTypeChecker class", "1, 1) main_layout.addWidget(self['所持金'], 0, 1, 1, 1) main_layout.addWidget(NameLabel('奖励EX'), 1, 0, 1, 1) main_layout.addWidget(self['奖励EX'],", "main_layout.addWidget(NameLabel('所持金'), 0, 0, 1, 1) main_layout.addWidget(self['所持金'], 0, 1, 1, 1) main_layout.addWidget(NameLabel('奖励EX'), 1, 0,", "coding: utf-8 -*- from PySide6.QtWidgets import QGridLayout, QTextEdit from widget import BackgroundFrame, ValueSpin,", "4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br> <i>created by Hamano0813</i> ''') text_edit.setReadOnly(True) main_layout = QGridLayout()", "5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br> <i>created by Hamano0813</i> ''') text_edit.setReadOnly(True) main_layout = QGridLayout() main_layout.addWidget(NameLabel('所持金'),", "noinspection PyTypeChecker class Other(BackgroundFrame): def __init__(self, parent): BackgroundFrame.__init__(self, parent) self['所持金'] = ValueSpin(None, value=DataSetting()['所持金'])", "-*- coding: utf-8 -*- from PySide6.QtWidgets import QGridLayout, QTextEdit from widget import BackgroundFrame,", "<br> <i>created by Hamano0813</i> ''') text_edit.setReadOnly(True) main_layout = QGridLayout() main_layout.addWidget(NameLabel('所持金'), 0, 0, 1,", "by Hamano0813</i> ''') text_edit.setReadOnly(True) main_layout = QGridLayout() main_layout.addWidget(NameLabel('所持金'), 0, 0, 1, 1) main_layout.addWidget(self['所持金'],", "text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br> <i>created by Hamano0813</i>", "0, 1, 1) main_layout.addWidget(self['奖励EX'], 1, 1, 1, 1) main_layout.addWidget(text_edit, 2, 0, 1, 3)", "from parameter import DataSetting # noinspection PyTypeChecker class Other(BackgroundFrame): def __init__(self, parent): BackgroundFrame.__init__(self,", "1, 1) main_layout.addWidget(self['奖励EX'], 1, 1, 1, 1) main_layout.addWidget(text_edit, 2, 0, 1, 3) main_layout.setSpacing(3)", "ValueSpin(None, value=DataSetting()['所持金']) self['奖励EX'] = ValueSpin(None, value=DataSetting()['奖励EX']) self['所持金'].set_parent(None) self['奖励EX'].set_parent(None) text_edit = QTextEdit() text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br>", "QTextEdit from widget import BackgroundFrame, ValueSpin, NameLabel from parameter import DataSetting # noinspection", "<i>created by Hamano0813</i> ''') text_edit.setReadOnly(True) main_layout = QGridLayout() main_layout.addWidget(NameLabel('所持金'), 0, 0, 1, 1)", "Hamano0813</i> ''') text_edit.setReadOnly(True) main_layout = QGridLayout() main_layout.addWidget(NameLabel('所持金'), 0, 0, 1, 1) main_layout.addWidget(self['所持金'], 0,", "1) main_layout.addWidget(self['所持金'], 0, 1, 1, 1) main_layout.addWidget(NameLabel('奖励EX'), 1, 0, 1, 1) main_layout.addWidget(self['奖励EX'], 1,", "text_edit = QTextEdit() text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br>", "value=DataSetting()['所持金']) self['奖励EX'] = ValueSpin(None, value=DataSetting()['奖励EX']) self['所持金'].set_parent(None) self['奖励EX'].set_parent(None) text_edit = QTextEdit() text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br>", "1) main_layout.addWidget(self['奖励EX'], 1, 1, 1, 1) main_layout.addWidget(text_edit, 2, 0, 1, 3) main_layout.setSpacing(3) self.setLayout(main_layout)", "<filename>Modifier/interface/other.py #!/usr/bin/env python # -*- coding: utf-8 -*- from PySide6.QtWidgets import QGridLayout, QTextEdit", "0, 1, 1, 1) main_layout.addWidget(NameLabel('奖励EX'), 1, 0, 1, 1) main_layout.addWidget(self['奖励EX'], 1, 1, 1,", "import QGridLayout, QTextEdit from widget import BackgroundFrame, ValueSpin, NameLabel from parameter import DataSetting", "#!/usr/bin/env python # -*- coding: utf-8 -*- from PySide6.QtWidgets import QGridLayout, QTextEdit from", "0, 1, 1) main_layout.addWidget(self['所持金'], 0, 1, 1, 1) main_layout.addWidget(NameLabel('奖励EX'), 1, 0, 1, 1)", "class Other(BackgroundFrame): def __init__(self, parent): BackgroundFrame.__init__(self, parent) self['所持金'] = ValueSpin(None, value=DataSetting()['所持金']) self['奖励EX'] =", "# noinspection PyTypeChecker class Other(BackgroundFrame): def __init__(self, parent): BackgroundFrame.__init__(self, parent) self['所持金'] = ValueSpin(None,", "ValueSpin, NameLabel from parameter import DataSetting # noinspection PyTypeChecker class Other(BackgroundFrame): def __init__(self,", "self['奖励EX'].set_parent(None) text_edit = QTextEdit() text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br>", "6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br> <i>created by Hamano0813</i> ''') text_edit.setReadOnly(True) main_layout = QGridLayout() main_layout.addWidget(NameLabel('所持金'), 0,", "-*- from PySide6.QtWidgets import QGridLayout, QTextEdit from widget import BackgroundFrame, ValueSpin, NameLabel from", "def __init__(self, parent): BackgroundFrame.__init__(self, parent) self['所持金'] = ValueSpin(None, value=DataSetting()['所持金']) self['奖励EX'] = ValueSpin(None, value=DataSetting()['奖励EX'])", "3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br> <i>created by Hamano0813</i> ''') text_edit.setReadOnly(True) main_layout =", "# -*- coding: utf-8 -*- from PySide6.QtWidgets import QGridLayout, QTextEdit from widget import", "1, 0, 1, 1) main_layout.addWidget(self['奖励EX'], 1, 1, 1, 1) main_layout.addWidget(text_edit, 2, 0, 1,", "DataSetting # noinspection PyTypeChecker class Other(BackgroundFrame): def __init__(self, parent): BackgroundFrame.__init__(self, parent) self['所持金'] =", "1, 1) main_layout.addWidget(NameLabel('奖励EX'), 1, 0, 1, 1) main_layout.addWidget(self['奖励EX'], 1, 1, 1, 1) main_layout.addWidget(text_edit,", "python # -*- coding: utf-8 -*- from PySide6.QtWidgets import QGridLayout, QTextEdit from widget", "self['所持金'].set_parent(None) self['奖励EX'].set_parent(None) text_edit = QTextEdit() text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br>", "1) main_layout.addWidget(NameLabel('奖励EX'), 1, 0, 1, 1) main_layout.addWidget(self['奖励EX'], 1, 1, 1, 1) main_layout.addWidget(text_edit, 2,", "value=DataSetting()['奖励EX']) self['所持金'].set_parent(None) self['奖励EX'].set_parent(None) text_edit = QTextEdit() text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br>", "QGridLayout, QTextEdit from widget import BackgroundFrame, ValueSpin, NameLabel from parameter import DataSetting #", "QGridLayout() main_layout.addWidget(NameLabel('所持金'), 0, 0, 1, 1) main_layout.addWidget(self['所持金'], 0, 1, 1, 1) main_layout.addWidget(NameLabel('奖励EX'), 1,", "ValueSpin(None, value=DataSetting()['奖励EX']) self['所持金'].set_parent(None) self['奖励EX'].set_parent(None) text_edit = QTextEdit() text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br>", "from widget import BackgroundFrame, ValueSpin, NameLabel from parameter import DataSetting # noinspection PyTypeChecker", "NameLabel from parameter import DataSetting # noinspection PyTypeChecker class Other(BackgroundFrame): def __init__(self, parent):", "1, 1, 1) main_layout.addWidget(NameLabel('奖励EX'), 1, 0, 1, 1) main_layout.addWidget(self['奖励EX'], 1, 1, 1, 1)", "Other(BackgroundFrame): def __init__(self, parent): BackgroundFrame.__init__(self, parent) self['所持金'] = ValueSpin(None, value=DataSetting()['所持金']) self['奖励EX'] = ValueSpin(None,", "1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br> <i>created by Hamano0813</i> ''') text_edit.setReadOnly(True)", "self['奖励EX'] = ValueSpin(None, value=DataSetting()['奖励EX']) self['所持金'].set_parent(None) self['奖励EX'].set_parent(None) text_edit = QTextEdit() text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br>", "main_layout.addWidget(self['所持金'], 0, 1, 1, 1) main_layout.addWidget(NameLabel('奖励EX'), 1, 0, 1, 1) main_layout.addWidget(self['奖励EX'], 1, 1,", "= QTextEdit() text_edit.setHtml(''' <b>修改可能出现严重错误,及时存档是好习惯</b><br> 1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br> 2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br> 3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br> 4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br> 5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br> 6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br> 7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br> <i>created", "parent) self['所持金'] = ValueSpin(None, value=DataSetting()['所持金']) self['奖励EX'] = ValueSpin(None, value=DataSetting()['奖励EX']) self['所持金'].set_parent(None) self['奖励EX'].set_parent(None) text_edit =", "import BackgroundFrame, ValueSpin, NameLabel from parameter import DataSetting # noinspection PyTypeChecker class Other(BackgroundFrame):", "from PySide6.QtWidgets import QGridLayout, QTextEdit from widget import BackgroundFrame, ValueSpin, NameLabel from parameter", "parent): BackgroundFrame.__init__(self, parent) self['所持金'] = ValueSpin(None, value=DataSetting()['所持金']) self['奖励EX'] = ValueSpin(None, value=DataSetting()['奖励EX']) self['所持金'].set_parent(None) self['奖励EX'].set_parent(None)", "7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br> <br> <i>created by Hamano0813</i> ''') text_edit.setReadOnly(True) main_layout = QGridLayout() main_layout.addWidget(NameLabel('所持金'), 0, 0,", "main_layout = QGridLayout() main_layout.addWidget(NameLabel('所持金'), 0, 0, 1, 1) main_layout.addWidget(self['所持金'], 0, 1, 1, 1)", "import DataSetting # noinspection PyTypeChecker class Other(BackgroundFrame): def __init__(self, parent): BackgroundFrame.__init__(self, parent) self['所持金']", "PyTypeChecker class Other(BackgroundFrame): def __init__(self, parent): BackgroundFrame.__init__(self, parent) self['所持金'] = ValueSpin(None, value=DataSetting()['所持金']) self['奖励EX']", "BackgroundFrame.__init__(self, parent) self['所持金'] = ValueSpin(None, value=DataSetting()['所持金']) self['奖励EX'] = ValueSpin(None, value=DataSetting()['奖励EX']) self['所持金'].set_parent(None) self['奖励EX'].set_parent(None) text_edit", "''') text_edit.setReadOnly(True) main_layout = QGridLayout() main_layout.addWidget(NameLabel('所持金'), 0, 0, 1, 1) main_layout.addWidget(self['所持金'], 0, 1,", "text_edit.setReadOnly(True) main_layout = QGridLayout() main_layout.addWidget(NameLabel('所持金'), 0, 0, 1, 1) main_layout.addWidget(self['所持金'], 0, 1, 1," ]
[ "<gh_stars>100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- from .event import * from", "coding: utf-8 -*- from .event import * from .backtest_event_engine import * from .live_event_engine", "-*- from .event import * from .backtest_event_engine import * from .live_event_engine import *", "#!/usr/bin/env python # -*- coding: utf-8 -*- from .event import * from .backtest_event_engine", "# -*- coding: utf-8 -*- from .event import * from .backtest_event_engine import *", "-*- coding: utf-8 -*- from .event import * from .backtest_event_engine import * from", "python # -*- coding: utf-8 -*- from .event import * from .backtest_event_engine import", "utf-8 -*- from .event import * from .backtest_event_engine import * from .live_event_engine import" ]
[ "'FPB1'), ('FPB1', 'F2'), ('F1', 'B1'), ('B1', 'F2') ) assert set(energy_dct2.keys()) == set(ref_energy_dct2.keys()) assert", "# Test reading the entire PES with fake wells energy_dct2, conn_lst2 = mess_io.reader.pes(", "('B1', 'P2') ) assert set(energy_dct1.keys()) == set(ref_energy_dct1.keys()) assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key]) for key in", "open(os.path.join(DATA_PATH, DATA_NAME), 'r') as datfile: INP_STR = datfile.read() def test__pes(): \"\"\" tests mess_io.reader.pes", "DATA_NAME), 'r') as datfile: INP_STR = datfile.read() def test__pes(): \"\"\" tests mess_io.reader.pes \"\"\"", "import numpy import mess_io PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(PATH, 'data') DATA_NAME =", "DATA_NAME = 'rates.inp' with open(os.path.join(DATA_PATH, DATA_NAME), 'r') as datfile: INP_STR = datfile.read() def", "ref_energy_dct1[key]) for key in energy_dct1) assert conn_lst1 == ref_conn_lst1 # Test reading the", "{ 'F1': -1.0, 'F2': 2.22, 'P1': 0.0, 'P2': 3.22, 'FRB1': 0.0, 'FPB1': 3.22,", "== ref_conn_lst1 # Test reading the entire PES with fake wells energy_dct2, conn_lst2", "conn_lst1 == ref_conn_lst1 # Test reading the entire PES with fake wells energy_dct2,", ") assert set(energy_dct1.keys()) == set(ref_energy_dct1.keys()) assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key]) for key in energy_dct1) assert", "read_fake=True) ref_energy_dct2 = { 'F1': -1.0, 'F2': 2.22, 'P1': 0.0, 'P2': 3.22, 'FRB1':", "key in energy_dct1) assert conn_lst1 == ref_conn_lst1 # Test reading the entire PES", "<filename>autoio/mess_io/tests/test__read_pes.py \"\"\" tests pes reader \"\"\" import os import numpy import mess_io PATH", "os import numpy import mess_io PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(PATH, 'data') DATA_NAME", "\"\"\" import os import numpy import mess_io PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(PATH,", "= { 'P1': 0.0, 'P2': 3.22, 'B1': 13.23 } ref_conn_lst1 = ( ('P1',", "'B1': 13.23 } ref_conn_lst2 = ( ('P1', 'FRB1'), ('FRB1', 'F1'), ('P2', 'FPB1'), ('FPB1',", "'r') as datfile: INP_STR = datfile.read() def test__pes(): \"\"\" tests mess_io.reader.pes \"\"\" #", "= mess_io.reader.pes( input_string=INP_STR, read_fake=False) ref_energy_dct1 = { 'P1': 0.0, 'P2': 3.22, 'B1': 13.23", "test__pes(): \"\"\" tests mess_io.reader.pes \"\"\" # Test reading with removing any fake wells", "energy_dct1) assert conn_lst1 == ref_conn_lst1 # Test reading the entire PES with fake", "os.path.join(PATH, 'data') DATA_NAME = 'rates.inp' with open(os.path.join(DATA_PATH, DATA_NAME), 'r') as datfile: INP_STR =", "set(energy_dct1.keys()) == set(ref_energy_dct1.keys()) assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key]) for key in energy_dct1) assert conn_lst1 ==", "'B1'), ('B1', 'P2') ) assert set(energy_dct1.keys()) == set(ref_energy_dct1.keys()) assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key]) for key", "with fake wells energy_dct2, conn_lst2 = mess_io.reader.pes( input_string=INP_STR, read_fake=True) ref_energy_dct2 = { 'F1':", "13.23 } ref_conn_lst2 = ( ('P1', 'FRB1'), ('FRB1', 'F1'), ('P2', 'FPB1'), ('FPB1', 'F2'),", "0.0, 'P2': 3.22, 'B1': 13.23 } ref_conn_lst1 = ( ('P1', 'B1'), ('B1', 'P2')", "for key in energy_dct1) assert conn_lst1 == ref_conn_lst1 # Test reading the entire", "'F2': 2.22, 'P1': 0.0, 'P2': 3.22, 'FRB1': 0.0, 'FPB1': 3.22, 'B1': 13.23 }", "3.22, 'FRB1': 0.0, 'FPB1': 3.22, 'B1': 13.23 } ref_conn_lst2 = ( ('P1', 'FRB1'),", "ref_conn_lst1 = ( ('P1', 'B1'), ('B1', 'P2') ) assert set(energy_dct1.keys()) == set(ref_energy_dct1.keys()) assert", "set(energy_dct2.keys()) == set(ref_energy_dct2.keys()) assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key]) for key in energy_dct2) assert conn_lst2 ==", "with open(os.path.join(DATA_PATH, DATA_NAME), 'r') as datfile: INP_STR = datfile.read() def test__pes(): \"\"\" tests", "0.0, 'FPB1': 3.22, 'B1': 13.23 } ref_conn_lst2 = ( ('P1', 'FRB1'), ('FRB1', 'F1'),", "'B1'), ('B1', 'F2') ) assert set(energy_dct2.keys()) == set(ref_energy_dct2.keys()) assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key]) for key", "conn_lst1 = mess_io.reader.pes( input_string=INP_STR, read_fake=False) ref_energy_dct1 = { 'P1': 0.0, 'P2': 3.22, 'B1':", "mess_io PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(PATH, 'data') DATA_NAME = 'rates.inp' with open(os.path.join(DATA_PATH,", "datfile: INP_STR = datfile.read() def test__pes(): \"\"\" tests mess_io.reader.pes \"\"\" # Test reading", "'F2') ) assert set(energy_dct2.keys()) == set(ref_energy_dct2.keys()) assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key]) for key in energy_dct2)", "PES with fake wells energy_dct2, conn_lst2 = mess_io.reader.pes( input_string=INP_STR, read_fake=True) ref_energy_dct2 = {", "'FPB1': 3.22, 'B1': 13.23 } ref_conn_lst2 = ( ('P1', 'FRB1'), ('FRB1', 'F1'), ('P2',", "def test__pes(): \"\"\" tests mess_io.reader.pes \"\"\" # Test reading with removing any fake", "PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(PATH, 'data') DATA_NAME = 'rates.inp' with open(os.path.join(DATA_PATH, DATA_NAME),", "reader \"\"\" import os import numpy import mess_io PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH =", "= os.path.join(PATH, 'data') DATA_NAME = 'rates.inp' with open(os.path.join(DATA_PATH, DATA_NAME), 'r') as datfile: INP_STR", "any fake wells energy_dct1, conn_lst1 = mess_io.reader.pes( input_string=INP_STR, read_fake=False) ref_energy_dct1 = { 'P1':", "assert set(energy_dct1.keys()) == set(ref_energy_dct1.keys()) assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key]) for key in energy_dct1) assert conn_lst1", "('P1', 'FRB1'), ('FRB1', 'F1'), ('P2', 'FPB1'), ('FPB1', 'F2'), ('F1', 'B1'), ('B1', 'F2') )", "import mess_io PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(PATH, 'data') DATA_NAME = 'rates.inp' with", "('P1', 'B1'), ('B1', 'P2') ) assert set(energy_dct1.keys()) == set(ref_energy_dct1.keys()) assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key]) for", "energy_dct2, conn_lst2 = mess_io.reader.pes( input_string=INP_STR, read_fake=True) ref_energy_dct2 = { 'F1': -1.0, 'F2': 2.22,", "'F1': -1.0, 'F2': 2.22, 'P1': 0.0, 'P2': 3.22, 'FRB1': 0.0, 'FPB1': 3.22, 'B1':", "# Test reading with removing any fake wells energy_dct1, conn_lst1 = mess_io.reader.pes( input_string=INP_STR,", "pes reader \"\"\" import os import numpy import mess_io PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH", "'data') DATA_NAME = 'rates.inp' with open(os.path.join(DATA_PATH, DATA_NAME), 'r') as datfile: INP_STR = datfile.read()", "( ('P1', 'B1'), ('B1', 'P2') ) assert set(energy_dct1.keys()) == set(ref_energy_dct1.keys()) assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key])", "wells energy_dct2, conn_lst2 = mess_io.reader.pes( input_string=INP_STR, read_fake=True) ref_energy_dct2 = { 'F1': -1.0, 'F2':", "'P2': 3.22, 'FRB1': 0.0, 'FPB1': 3.22, 'B1': 13.23 } ref_conn_lst2 = ( ('P1',", "datfile.read() def test__pes(): \"\"\" tests mess_io.reader.pes \"\"\" # Test reading with removing any", "input_string=INP_STR, read_fake=True) ref_energy_dct2 = { 'F1': -1.0, 'F2': 2.22, 'P1': 0.0, 'P2': 3.22,", "0.0, 'P2': 3.22, 'FRB1': 0.0, 'FPB1': 3.22, 'B1': 13.23 } ref_conn_lst2 = (", "3.22, 'B1': 13.23 } ref_conn_lst1 = ( ('P1', 'B1'), ('B1', 'P2') ) assert", "read_fake=False) ref_energy_dct1 = { 'P1': 0.0, 'P2': 3.22, 'B1': 13.23 } ref_conn_lst1 =", "assert set(energy_dct2.keys()) == set(ref_energy_dct2.keys()) assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key]) for key in energy_dct2) assert conn_lst2", "ref_energy_dct2[key]) for key in energy_dct2) assert conn_lst2 == ref_conn_lst2 if __name__ == '__main__':", "= 'rates.inp' with open(os.path.join(DATA_PATH, DATA_NAME), 'r') as datfile: INP_STR = datfile.read() def test__pes():", "( ('P1', 'FRB1'), ('FRB1', 'F1'), ('P2', 'FPB1'), ('FPB1', 'F2'), ('F1', 'B1'), ('B1', 'F2')", "with removing any fake wells energy_dct1, conn_lst1 = mess_io.reader.pes( input_string=INP_STR, read_fake=False) ref_energy_dct1 =", "} ref_conn_lst2 = ( ('P1', 'FRB1'), ('FRB1', 'F1'), ('P2', 'FPB1'), ('FPB1', 'F2'), ('F1',", "mess_io.reader.pes( input_string=INP_STR, read_fake=False) ref_energy_dct1 = { 'P1': 0.0, 'P2': 3.22, 'B1': 13.23 }", "numpy import mess_io PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(PATH, 'data') DATA_NAME = 'rates.inp'", "INP_STR = datfile.read() def test__pes(): \"\"\" tests mess_io.reader.pes \"\"\" # Test reading with", "for key in energy_dct2) assert conn_lst2 == ref_conn_lst2 if __name__ == '__main__': test__pes()", "= ( ('P1', 'B1'), ('B1', 'P2') ) assert set(energy_dct1.keys()) == set(ref_energy_dct1.keys()) assert all(numpy.isclose(energy_dct1[key],", "'FRB1'), ('FRB1', 'F1'), ('P2', 'FPB1'), ('FPB1', 'F2'), ('F1', 'B1'), ('B1', 'F2') ) assert", "= datfile.read() def test__pes(): \"\"\" tests mess_io.reader.pes \"\"\" # Test reading with removing", "'F1'), ('P2', 'FPB1'), ('FPB1', 'F2'), ('F1', 'B1'), ('B1', 'F2') ) assert set(energy_dct2.keys()) ==", "('B1', 'F2') ) assert set(energy_dct2.keys()) == set(ref_energy_dct2.keys()) assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key]) for key in", "removing any fake wells energy_dct1, conn_lst1 = mess_io.reader.pes( input_string=INP_STR, read_fake=False) ref_energy_dct1 = {", "('FRB1', 'F1'), ('P2', 'FPB1'), ('FPB1', 'F2'), ('F1', 'B1'), ('B1', 'F2') ) assert set(energy_dct2.keys())", "all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key]) for key in energy_dct2) assert conn_lst2 == ref_conn_lst2 if __name__ ==", "'B1': 13.23 } ref_conn_lst1 = ( ('P1', 'B1'), ('B1', 'P2') ) assert set(energy_dct1.keys())", "\"\"\" # Test reading with removing any fake wells energy_dct1, conn_lst1 = mess_io.reader.pes(", "3.22, 'B1': 13.23 } ref_conn_lst2 = ( ('P1', 'FRB1'), ('FRB1', 'F1'), ('P2', 'FPB1'),", "wells energy_dct1, conn_lst1 = mess_io.reader.pes( input_string=INP_STR, read_fake=False) ref_energy_dct1 = { 'P1': 0.0, 'P2':", "set(ref_energy_dct1.keys()) assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key]) for key in energy_dct1) assert conn_lst1 == ref_conn_lst1 #", "= mess_io.reader.pes( input_string=INP_STR, read_fake=True) ref_energy_dct2 = { 'F1': -1.0, 'F2': 2.22, 'P1': 0.0,", "-1.0, 'F2': 2.22, 'P1': 0.0, 'P2': 3.22, 'FRB1': 0.0, 'FPB1': 3.22, 'B1': 13.23", "2.22, 'P1': 0.0, 'P2': 3.22, 'FRB1': 0.0, 'FPB1': 3.22, 'B1': 13.23 } ref_conn_lst2", "fake wells energy_dct2, conn_lst2 = mess_io.reader.pes( input_string=INP_STR, read_fake=True) ref_energy_dct2 = { 'F1': -1.0,", "'rates.inp' with open(os.path.join(DATA_PATH, DATA_NAME), 'r') as datfile: INP_STR = datfile.read() def test__pes(): \"\"\"", "ref_conn_lst1 # Test reading the entire PES with fake wells energy_dct2, conn_lst2 =", "tests mess_io.reader.pes \"\"\" # Test reading with removing any fake wells energy_dct1, conn_lst1", "'FRB1': 0.0, 'FPB1': 3.22, 'B1': 13.23 } ref_conn_lst2 = ( ('P1', 'FRB1'), ('FRB1',", "ref_energy_dct2 = { 'F1': -1.0, 'F2': 2.22, 'P1': 0.0, 'P2': 3.22, 'FRB1': 0.0,", "reading the entire PES with fake wells energy_dct2, conn_lst2 = mess_io.reader.pes( input_string=INP_STR, read_fake=True)", "mess_io.reader.pes \"\"\" # Test reading with removing any fake wells energy_dct1, conn_lst1 =", "= os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(PATH, 'data') DATA_NAME = 'rates.inp' with open(os.path.join(DATA_PATH, DATA_NAME), 'r')", "energy_dct1, conn_lst1 = mess_io.reader.pes( input_string=INP_STR, read_fake=False) ref_energy_dct1 = { 'P1': 0.0, 'P2': 3.22,", "entire PES with fake wells energy_dct2, conn_lst2 = mess_io.reader.pes( input_string=INP_STR, read_fake=True) ref_energy_dct2 =", ") assert set(energy_dct2.keys()) == set(ref_energy_dct2.keys()) assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key]) for key in energy_dct2) assert", "tests pes reader \"\"\" import os import numpy import mess_io PATH = os.path.dirname(os.path.realpath(__file__))", "DATA_PATH = os.path.join(PATH, 'data') DATA_NAME = 'rates.inp' with open(os.path.join(DATA_PATH, DATA_NAME), 'r') as datfile:", "== set(ref_energy_dct1.keys()) assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key]) for key in energy_dct1) assert conn_lst1 == ref_conn_lst1", "input_string=INP_STR, read_fake=False) ref_energy_dct1 = { 'P1': 0.0, 'P2': 3.22, 'B1': 13.23 } ref_conn_lst1", "assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key]) for key in energy_dct1) assert conn_lst1 == ref_conn_lst1 # Test", "{ 'P1': 0.0, 'P2': 3.22, 'B1': 13.23 } ref_conn_lst1 = ( ('P1', 'B1'),", "} ref_conn_lst1 = ( ('P1', 'B1'), ('B1', 'P2') ) assert set(energy_dct1.keys()) == set(ref_energy_dct1.keys())", "'F2'), ('F1', 'B1'), ('B1', 'F2') ) assert set(energy_dct2.keys()) == set(ref_energy_dct2.keys()) assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key])", "import os import numpy import mess_io PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(PATH, 'data')", "'P2') ) assert set(energy_dct1.keys()) == set(ref_energy_dct1.keys()) assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key]) for key in energy_dct1)", "the entire PES with fake wells energy_dct2, conn_lst2 = mess_io.reader.pes( input_string=INP_STR, read_fake=True) ref_energy_dct2", "conn_lst2 = mess_io.reader.pes( input_string=INP_STR, read_fake=True) ref_energy_dct2 = { 'F1': -1.0, 'F2': 2.22, 'P1':", "'P2': 3.22, 'B1': 13.23 } ref_conn_lst1 = ( ('P1', 'B1'), ('B1', 'P2') )", "('P2', 'FPB1'), ('FPB1', 'F2'), ('F1', 'B1'), ('B1', 'F2') ) assert set(energy_dct2.keys()) == set(ref_energy_dct2.keys())", "('F1', 'B1'), ('B1', 'F2') ) assert set(energy_dct2.keys()) == set(ref_energy_dct2.keys()) assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key]) for", "reading with removing any fake wells energy_dct1, conn_lst1 = mess_io.reader.pes( input_string=INP_STR, read_fake=False) ref_energy_dct1", "Test reading the entire PES with fake wells energy_dct2, conn_lst2 = mess_io.reader.pes( input_string=INP_STR,", "\"\"\" tests mess_io.reader.pes \"\"\" # Test reading with removing any fake wells energy_dct1,", "ref_conn_lst2 = ( ('P1', 'FRB1'), ('FRB1', 'F1'), ('P2', 'FPB1'), ('FPB1', 'F2'), ('F1', 'B1'),", "as datfile: INP_STR = datfile.read() def test__pes(): \"\"\" tests mess_io.reader.pes \"\"\" # Test", "Test reading with removing any fake wells energy_dct1, conn_lst1 = mess_io.reader.pes( input_string=INP_STR, read_fake=False)", "'P1': 0.0, 'P2': 3.22, 'FRB1': 0.0, 'FPB1': 3.22, 'B1': 13.23 } ref_conn_lst2 =", "13.23 } ref_conn_lst1 = ( ('P1', 'B1'), ('B1', 'P2') ) assert set(energy_dct1.keys()) ==", "in energy_dct1) assert conn_lst1 == ref_conn_lst1 # Test reading the entire PES with", "= ( ('P1', 'FRB1'), ('FRB1', 'F1'), ('P2', 'FPB1'), ('FPB1', 'F2'), ('F1', 'B1'), ('B1',", "ref_energy_dct1 = { 'P1': 0.0, 'P2': 3.22, 'B1': 13.23 } ref_conn_lst1 = (", "('FPB1', 'F2'), ('F1', 'B1'), ('B1', 'F2') ) assert set(energy_dct2.keys()) == set(ref_energy_dct2.keys()) assert all(numpy.isclose(energy_dct2[key],", "mess_io.reader.pes( input_string=INP_STR, read_fake=True) ref_energy_dct2 = { 'F1': -1.0, 'F2': 2.22, 'P1': 0.0, 'P2':", "fake wells energy_dct1, conn_lst1 = mess_io.reader.pes( input_string=INP_STR, read_fake=False) ref_energy_dct1 = { 'P1': 0.0,", "all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key]) for key in energy_dct1) assert conn_lst1 == ref_conn_lst1 # Test reading", "== set(ref_energy_dct2.keys()) assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key]) for key in energy_dct2) assert conn_lst2 == ref_conn_lst2", "set(ref_energy_dct2.keys()) assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key]) for key in energy_dct2) assert conn_lst2 == ref_conn_lst2 if", "= { 'F1': -1.0, 'F2': 2.22, 'P1': 0.0, 'P2': 3.22, 'FRB1': 0.0, 'FPB1':", "\"\"\" tests pes reader \"\"\" import os import numpy import mess_io PATH =", "assert conn_lst1 == ref_conn_lst1 # Test reading the entire PES with fake wells", "os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(PATH, 'data') DATA_NAME = 'rates.inp' with open(os.path.join(DATA_PATH, DATA_NAME), 'r') as", "'P1': 0.0, 'P2': 3.22, 'B1': 13.23 } ref_conn_lst1 = ( ('P1', 'B1'), ('B1',", "assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key]) for key in energy_dct2) assert conn_lst2 == ref_conn_lst2 if __name__" ]
[ "params['payment_amount'] = self.payment_amount if self.payment_idx: if hasattr(self.payment_idx, 'to_alipay_dict'): params['payment_idx'] = self.payment_idx.to_alipay_dict() else: params['payment_idx']", "return self._payment_idx @payment_idx.setter def payment_idx(self, value): self._payment_idx = value def to_alipay_dict(self): params =", "@payment_amount.setter def payment_amount(self, value): self._payment_amount = value @property def payment_idx(self): return self._payment_idx @payment_idx.setter", "= None @property def payment_amount(self): return self._payment_amount @payment_amount.setter def payment_amount(self, value): self._payment_amount =", "python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class", "payment_amount(self, value): self._payment_amount = value @property def payment_idx(self): return self._payment_idx @payment_idx.setter def payment_idx(self,", "class MultiStagePayLineInfo(object): def __init__(self): self._payment_amount = None self._payment_idx = None @property def payment_amount(self):", "self._payment_amount = None self._payment_idx = None @property def payment_amount(self): return self._payment_amount @payment_amount.setter def", "from_alipay_dict(d): if not d: return None o = MultiStagePayLineInfo() if 'payment_amount' in d:", "= value def to_alipay_dict(self): params = dict() if self.payment_amount: if hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount']", "None self._payment_idx = None @property def payment_amount(self): return self._payment_amount @payment_amount.setter def payment_amount(self, value):", "if hasattr(self.payment_idx, 'to_alipay_dict'): params['payment_idx'] = self.payment_idx.to_alipay_dict() else: params['payment_idx'] = self.payment_idx return params @staticmethod", "if not d: return None o = MultiStagePayLineInfo() if 'payment_amount' in d: o.payment_amount", "return None o = MultiStagePayLineInfo() if 'payment_amount' in d: o.payment_amount = d['payment_amount'] if", "payment_idx(self, value): self._payment_idx = value def to_alipay_dict(self): params = dict() if self.payment_amount: if", "@payment_idx.setter def payment_idx(self, value): self._payment_idx = value def to_alipay_dict(self): params = dict() if", "def from_alipay_dict(d): if not d: return None o = MultiStagePayLineInfo() if 'payment_amount' in", "* class MultiStagePayLineInfo(object): def __init__(self): self._payment_amount = None self._payment_idx = None @property def", "coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class MultiStagePayLineInfo(object): def __init__(self):", "MultiStagePayLineInfo() if 'payment_amount' in d: o.payment_amount = d['payment_amount'] if 'payment_idx' in d: o.payment_idx", "hasattr(self.payment_idx, 'to_alipay_dict'): params['payment_idx'] = self.payment_idx.to_alipay_dict() else: params['payment_idx'] = self.payment_idx return params @staticmethod def", "self._payment_idx = value def to_alipay_dict(self): params = dict() if self.payment_amount: if hasattr(self.payment_amount, 'to_alipay_dict'):", "-*- import json from alipay.aop.api.constant.ParamConstants import * class MultiStagePayLineInfo(object): def __init__(self): self._payment_amount =", "def __init__(self): self._payment_amount = None self._payment_idx = None @property def payment_amount(self): return self._payment_amount", "= self.payment_amount.to_alipay_dict() else: params['payment_amount'] = self.payment_amount if self.payment_idx: if hasattr(self.payment_idx, 'to_alipay_dict'): params['payment_idx'] =", "= dict() if self.payment_amount: if hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount'] = self.payment_amount.to_alipay_dict() else: params['payment_amount'] =", "<reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import", "dict() if self.payment_amount: if hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount'] = self.payment_amount.to_alipay_dict() else: params['payment_amount'] = self.payment_amount", "= self.payment_idx.to_alipay_dict() else: params['payment_idx'] = self.payment_idx return params @staticmethod def from_alipay_dict(d): if not", "value @property def payment_idx(self): return self._payment_idx @payment_idx.setter def payment_idx(self, value): self._payment_idx = value", "self._payment_amount @payment_amount.setter def payment_amount(self, value): self._payment_amount = value @property def payment_idx(self): return self._payment_idx", "@property def payment_amount(self): return self._payment_amount @payment_amount.setter def payment_amount(self, value): self._payment_amount = value @property", "None @property def payment_amount(self): return self._payment_amount @payment_amount.setter def payment_amount(self, value): self._payment_amount = value", "= MultiStagePayLineInfo() if 'payment_amount' in d: o.payment_amount = d['payment_amount'] if 'payment_idx' in d:", "= self.payment_idx return params @staticmethod def from_alipay_dict(d): if not d: return None o", "@staticmethod def from_alipay_dict(d): if not d: return None o = MultiStagePayLineInfo() if 'payment_amount'", "self.payment_amount.to_alipay_dict() else: params['payment_amount'] = self.payment_amount if self.payment_idx: if hasattr(self.payment_idx, 'to_alipay_dict'): params['payment_idx'] = self.payment_idx.to_alipay_dict()", "payment_idx(self): return self._payment_idx @payment_idx.setter def payment_idx(self, value): self._payment_idx = value def to_alipay_dict(self): params", "value def to_alipay_dict(self): params = dict() if self.payment_amount: if hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount'] =", "self.payment_idx: if hasattr(self.payment_idx, 'to_alipay_dict'): params['payment_idx'] = self.payment_idx.to_alipay_dict() else: params['payment_idx'] = self.payment_idx return params", "if self.payment_idx: if hasattr(self.payment_idx, 'to_alipay_dict'): params['payment_idx'] = self.payment_idx.to_alipay_dict() else: params['payment_idx'] = self.payment_idx return", "utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class MultiStagePayLineInfo(object): def __init__(self): self._payment_amount", "'to_alipay_dict'): params['payment_amount'] = self.payment_amount.to_alipay_dict() else: params['payment_amount'] = self.payment_amount if self.payment_idx: if hasattr(self.payment_idx, 'to_alipay_dict'):", "params['payment_amount'] = self.payment_amount.to_alipay_dict() else: params['payment_amount'] = self.payment_amount if self.payment_idx: if hasattr(self.payment_idx, 'to_alipay_dict'): params['payment_idx']", "if 'payment_amount' in d: o.payment_amount = d['payment_amount'] if 'payment_idx' in d: o.payment_idx =", "self.payment_idx return params @staticmethod def from_alipay_dict(d): if not d: return None o =", "return params @staticmethod def from_alipay_dict(d): if not d: return None o = MultiStagePayLineInfo()", "d: o.payment_amount = d['payment_amount'] if 'payment_idx' in d: o.payment_idx = d['payment_idx'] return o", "def to_alipay_dict(self): params = dict() if self.payment_amount: if hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount'] = self.payment_amount.to_alipay_dict()", "self.payment_amount: if hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount'] = self.payment_amount.to_alipay_dict() else: params['payment_amount'] = self.payment_amount if self.payment_idx:", "else: params['payment_amount'] = self.payment_amount if self.payment_idx: if hasattr(self.payment_idx, 'to_alipay_dict'): params['payment_idx'] = self.payment_idx.to_alipay_dict() else:", "def payment_amount(self, value): self._payment_amount = value @property def payment_idx(self): return self._payment_idx @payment_idx.setter def", "alipay.aop.api.constant.ParamConstants import * class MultiStagePayLineInfo(object): def __init__(self): self._payment_amount = None self._payment_idx = None", "None o = MultiStagePayLineInfo() if 'payment_amount' in d: o.payment_amount = d['payment_amount'] if 'payment_idx'", "else: params['payment_idx'] = self.payment_idx return params @staticmethod def from_alipay_dict(d): if not d: return", "def payment_idx(self, value): self._payment_idx = value def to_alipay_dict(self): params = dict() if self.payment_amount:", "if self.payment_amount: if hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount'] = self.payment_amount.to_alipay_dict() else: params['payment_amount'] = self.payment_amount if", "value): self._payment_idx = value def to_alipay_dict(self): params = dict() if self.payment_amount: if hasattr(self.payment_amount,", "# -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class MultiStagePayLineInfo(object):", "json from alipay.aop.api.constant.ParamConstants import * class MultiStagePayLineInfo(object): def __init__(self): self._payment_amount = None self._payment_idx", "#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import *", "self.payment_idx.to_alipay_dict() else: params['payment_idx'] = self.payment_idx return params @staticmethod def from_alipay_dict(d): if not d:", "= value @property def payment_idx(self): return self._payment_idx @payment_idx.setter def payment_idx(self, value): self._payment_idx =", "self._payment_idx @payment_idx.setter def payment_idx(self, value): self._payment_idx = value def to_alipay_dict(self): params = dict()", "params @staticmethod def from_alipay_dict(d): if not d: return None o = MultiStagePayLineInfo() if", "return self._payment_amount @payment_amount.setter def payment_amount(self, value): self._payment_amount = value @property def payment_idx(self): return", "def payment_amount(self): return self._payment_amount @payment_amount.setter def payment_amount(self, value): self._payment_amount = value @property def", "def payment_idx(self): return self._payment_idx @payment_idx.setter def payment_idx(self, value): self._payment_idx = value def to_alipay_dict(self):", "__init__(self): self._payment_amount = None self._payment_idx = None @property def payment_amount(self): return self._payment_amount @payment_amount.setter", "to_alipay_dict(self): params = dict() if self.payment_amount: if hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount'] = self.payment_amount.to_alipay_dict() else:", "payment_amount(self): return self._payment_amount @payment_amount.setter def payment_amount(self, value): self._payment_amount = value @property def payment_idx(self):", "d: return None o = MultiStagePayLineInfo() if 'payment_amount' in d: o.payment_amount = d['payment_amount']", "value): self._payment_amount = value @property def payment_idx(self): return self._payment_idx @payment_idx.setter def payment_idx(self, value):", "o = MultiStagePayLineInfo() if 'payment_amount' in d: o.payment_amount = d['payment_amount'] if 'payment_idx' in", "-*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class MultiStagePayLineInfo(object): def", "'to_alipay_dict'): params['payment_idx'] = self.payment_idx.to_alipay_dict() else: params['payment_idx'] = self.payment_idx return params @staticmethod def from_alipay_dict(d):", "params['payment_idx'] = self.payment_idx.to_alipay_dict() else: params['payment_idx'] = self.payment_idx return params @staticmethod def from_alipay_dict(d): if", "in d: o.payment_amount = d['payment_amount'] if 'payment_idx' in d: o.payment_idx = d['payment_idx'] return", "self._payment_idx = None @property def payment_amount(self): return self._payment_amount @payment_amount.setter def payment_amount(self, value): self._payment_amount", "hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount'] = self.payment_amount.to_alipay_dict() else: params['payment_amount'] = self.payment_amount if self.payment_idx: if hasattr(self.payment_idx,", "@property def payment_idx(self): return self._payment_idx @payment_idx.setter def payment_idx(self, value): self._payment_idx = value def", "self.payment_amount if self.payment_idx: if hasattr(self.payment_idx, 'to_alipay_dict'): params['payment_idx'] = self.payment_idx.to_alipay_dict() else: params['payment_idx'] = self.payment_idx", "not d: return None o = MultiStagePayLineInfo() if 'payment_amount' in d: o.payment_amount =", "= None self._payment_idx = None @property def payment_amount(self): return self._payment_amount @payment_amount.setter def payment_amount(self,", "'payment_amount' in d: o.payment_amount = d['payment_amount'] if 'payment_idx' in d: o.payment_idx = d['payment_idx']", "import * class MultiStagePayLineInfo(object): def __init__(self): self._payment_amount = None self._payment_idx = None @property", "from alipay.aop.api.constant.ParamConstants import * class MultiStagePayLineInfo(object): def __init__(self): self._payment_amount = None self._payment_idx =", "= self.payment_amount if self.payment_idx: if hasattr(self.payment_idx, 'to_alipay_dict'): params['payment_idx'] = self.payment_idx.to_alipay_dict() else: params['payment_idx'] =", "self._payment_amount = value @property def payment_idx(self): return self._payment_idx @payment_idx.setter def payment_idx(self, value): self._payment_idx", "params['payment_idx'] = self.payment_idx return params @staticmethod def from_alipay_dict(d): if not d: return None", "if hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount'] = self.payment_amount.to_alipay_dict() else: params['payment_amount'] = self.payment_amount if self.payment_idx: if", "MultiStagePayLineInfo(object): def __init__(self): self._payment_amount = None self._payment_idx = None @property def payment_amount(self): return", "import json from alipay.aop.api.constant.ParamConstants import * class MultiStagePayLineInfo(object): def __init__(self): self._payment_amount = None", "params = dict() if self.payment_amount: if hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount'] = self.payment_amount.to_alipay_dict() else: params['payment_amount']" ]
[ "= False try: yield s s.commit() except: s.rollback() raise finally: s.close() def get_session():", "scripts import tabledef from flask import session from sqlalchemy.orm import sessionmaker from contextlib", "s.rollback() raise finally: s.close() def get_session(): return sessionmaker(bind=tabledef.engine)() def get_natural_language_query(query): #with session_scope() as", "csv, copy from watson_developer_cloud import DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager def session_scope(): \"\"\"Provide", "datetime, json, os, csv, copy from watson_developer_cloud import DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager", "session_scope(): \"\"\"Provide a transactional scope around a series of operations.\"\"\" s = get_session()", "coding: utf-8 -*- from scripts import tabledef from flask import session from sqlalchemy.orm", "yield s s.commit() except: s.rollback() raise finally: s.close() def get_session(): return sessionmaker(bind=tabledef.engine)() def", "of operations.\"\"\" s = get_session() s.expire_on_commit = False try: yield s s.commit() except:", "get_session(): return sessionmaker(bind=tabledef.engine)() def get_natural_language_query(query): #with session_scope() as s: print(\"query is\"+query) discovery =", "json, os, csv, copy from watson_developer_cloud import DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager def", "contextmanager import bcrypt import sys, subprocess, ipaddress, time, datetime, json, os, csv, copy", "get_natural_language_query(query): #with session_scope() as s: print(\"query is\"+query) discovery = DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\") my_query", "password=\"<PASSWORD>\") my_query = discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true', passages_count='1', count=1, highlight='true') p_passage=my_query['passages'][0][\"passage_text\"] p_score=my_query['passages'][0][\"passage_score\"] p_id=my_query['passages'][0][\"document_id\"]", "= DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\") my_query = discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true', passages_count='1', count=1, highlight='true')", "import DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager def session_scope(): \"\"\"Provide a transactional scope around", "around a series of operations.\"\"\" s = get_session() s.expire_on_commit = False try: yield", "= get_session() s.expire_on_commit = False try: yield s s.commit() except: s.rollback() raise finally:", "bcrypt import sys, subprocess, ipaddress, time, datetime, json, os, csv, copy from watson_developer_cloud", "discovery = DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\") my_query = discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true', passages_count='1', count=1,", "import bcrypt import sys, subprocess, ipaddress, time, datetime, json, os, csv, copy from", "sessionmaker from contextlib import contextmanager import bcrypt import sys, subprocess, ipaddress, time, datetime,", "import session from sqlalchemy.orm import sessionmaker from contextlib import contextmanager import bcrypt import", "subprocess, ipaddress, time, datetime, json, os, csv, copy from watson_developer_cloud import DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\"", "ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager def session_scope(): \"\"\"Provide a transactional scope around a series of operations.\"\"\"", "def get_natural_language_query(query): #with session_scope() as s: print(\"query is\"+query) discovery = DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\")", "def session_scope(): \"\"\"Provide a transactional scope around a series of operations.\"\"\" s =", "as s: print(\"query is\"+query) discovery = DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\") my_query = discovery.query(environment_id=EnvID, collection_id=ColID,", "session_scope() as s: print(\"query is\"+query) discovery = DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\") my_query = discovery.query(environment_id=EnvID,", "\"\"\"Provide a transactional scope around a series of operations.\"\"\" s = get_session() s.expire_on_commit", "from sqlalchemy.orm import sessionmaker from contextlib import contextmanager import bcrypt import sys, subprocess,", "operations.\"\"\" s = get_session() s.expire_on_commit = False try: yield s s.commit() except: s.rollback()", "os, csv, copy from watson_developer_cloud import DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager def session_scope():", "username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\") my_query = discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true', passages_count='1', count=1, highlight='true') p_passage=my_query['passages'][0][\"passage_text\"] p_score=my_query['passages'][0][\"passage_score\"]", "ipaddress, time, datetime, json, os, csv, copy from watson_developer_cloud import DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\"", "flask import session from sqlalchemy.orm import sessionmaker from contextlib import contextmanager import bcrypt", "series of operations.\"\"\" s = get_session() s.expire_on_commit = False try: yield s s.commit()", "False try: yield s s.commit() except: s.rollback() raise finally: s.close() def get_session(): return", "s s.commit() except: s.rollback() raise finally: s.close() def get_session(): return sessionmaker(bind=tabledef.engine)() def get_natural_language_query(query):", "time, datetime, json, os, csv, copy from watson_developer_cloud import DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\"", "is\"+query) discovery = DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\") my_query = discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true', passages_count='1',", "get_session() s.expire_on_commit = False try: yield s s.commit() except: s.rollback() raise finally: s.close()", "transactional scope around a series of operations.\"\"\" s = get_session() s.expire_on_commit = False", "s: print(\"query is\"+query) discovery = DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\") my_query = discovery.query(environment_id=EnvID, collection_id=ColID, query=query,", "contextlib import contextmanager import bcrypt import sys, subprocess, ipaddress, time, datetime, json, os,", "from flask import session from sqlalchemy.orm import sessionmaker from contextlib import contextmanager import", "s = get_session() s.expire_on_commit = False try: yield s s.commit() except: s.rollback() raise", "watson_developer_cloud import DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager def session_scope(): \"\"\"Provide a transactional scope", "-*- coding: utf-8 -*- from scripts import tabledef from flask import session from", "import tabledef from flask import session from sqlalchemy.orm import sessionmaker from contextlib import", "from contextlib import contextmanager import bcrypt import sys, subprocess, ipaddress, time, datetime, json,", "@contextmanager def session_scope(): \"\"\"Provide a transactional scope around a series of operations.\"\"\" s", "s.close() def get_session(): return sessionmaker(bind=tabledef.engine)() def get_natural_language_query(query): #with session_scope() as s: print(\"query is\"+query)", "session from sqlalchemy.orm import sessionmaker from contextlib import contextmanager import bcrypt import sys,", "return sessionmaker(bind=tabledef.engine)() def get_natural_language_query(query): #with session_scope() as s: print(\"query is\"+query) discovery = DiscoveryV1(version='2018-03-05',", "import contextmanager import bcrypt import sys, subprocess, ipaddress, time, datetime, json, os, csv,", "a transactional scope around a series of operations.\"\"\" s = get_session() s.expire_on_commit =", "collection_id=ColID, query=query, passages='true', passages_count='1', count=1, highlight='true') p_passage=my_query['passages'][0][\"passage_text\"] p_score=my_query['passages'][0][\"passage_score\"] p_id=my_query['passages'][0][\"document_id\"] querylist = [p_passage,p_score,p_id] return", "print(\"query is\"+query) discovery = DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\") my_query = discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true',", "raise finally: s.close() def get_session(): return sessionmaker(bind=tabledef.engine)() def get_natural_language_query(query): #with session_scope() as s:", "#with session_scope() as s: print(\"query is\"+query) discovery = DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\") my_query =", "a series of operations.\"\"\" s = get_session() s.expire_on_commit = False try: yield s", "except: s.rollback() raise finally: s.close() def get_session(): return sessionmaker(bind=tabledef.engine)() def get_natural_language_query(query): #with session_scope()", "import sessionmaker from contextlib import contextmanager import bcrypt import sys, subprocess, ipaddress, time,", "sessionmaker(bind=tabledef.engine)() def get_natural_language_query(query): #with session_scope() as s: print(\"query is\"+query) discovery = DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\",", "ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager def session_scope(): \"\"\"Provide a transactional scope around a series of", "-*- from scripts import tabledef from flask import session from sqlalchemy.orm import sessionmaker", "finally: s.close() def get_session(): return sessionmaker(bind=tabledef.engine)() def get_natural_language_query(query): #with session_scope() as s: print(\"query", "my_query = discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true', passages_count='1', count=1, highlight='true') p_passage=my_query['passages'][0][\"passage_text\"] p_score=my_query['passages'][0][\"passage_score\"] p_id=my_query['passages'][0][\"document_id\"] querylist", "# -*- coding: utf-8 -*- from scripts import tabledef from flask import session", "= discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true', passages_count='1', count=1, highlight='true') p_passage=my_query['passages'][0][\"passage_text\"] p_score=my_query['passages'][0][\"passage_score\"] p_id=my_query['passages'][0][\"document_id\"] querylist =", "DiscoveryV1(version='2018-03-05', username=\"9e523dc4-1206-4898-a30f-faf75cd8526b\", password=\"<PASSWORD>\") my_query = discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true', passages_count='1', count=1, highlight='true') p_passage=my_query['passages'][0][\"passage_text\"]", "utf-8 -*- from scripts import tabledef from flask import session from sqlalchemy.orm import", "s.expire_on_commit = False try: yield s s.commit() except: s.rollback() raise finally: s.close() def", "EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager def session_scope(): \"\"\"Provide a transactional scope around a series", "discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true', passages_count='1', count=1, highlight='true') p_passage=my_query['passages'][0][\"passage_text\"] p_score=my_query['passages'][0][\"passage_score\"] p_id=my_query['passages'][0][\"document_id\"] querylist = [p_passage,p_score,p_id]", "query=query, passages='true', passages_count='1', count=1, highlight='true') p_passage=my_query['passages'][0][\"passage_text\"] p_score=my_query['passages'][0][\"passage_score\"] p_id=my_query['passages'][0][\"document_id\"] querylist = [p_passage,p_score,p_id] return querylist", "from scripts import tabledef from flask import session from sqlalchemy.orm import sessionmaker from", "import sys, subprocess, ipaddress, time, datetime, json, os, csv, copy from watson_developer_cloud import", "copy from watson_developer_cloud import DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager def session_scope(): \"\"\"Provide a", "def get_session(): return sessionmaker(bind=tabledef.engine)() def get_natural_language_query(query): #with session_scope() as s: print(\"query is\"+query) discovery", "s.commit() except: s.rollback() raise finally: s.close() def get_session(): return sessionmaker(bind=tabledef.engine)() def get_natural_language_query(query): #with", "tabledef from flask import session from sqlalchemy.orm import sessionmaker from contextlib import contextmanager", "try: yield s s.commit() except: s.rollback() raise finally: s.close() def get_session(): return sessionmaker(bind=tabledef.engine)()", "from watson_developer_cloud import DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager def session_scope(): \"\"\"Provide a transactional", "sqlalchemy.orm import sessionmaker from contextlib import contextmanager import bcrypt import sys, subprocess, ipaddress,", "DiscoveryV1 EnvID=\"5aec3469-82f9-49cb-9718-e3d0526a85f7\" ColID=\"ccc5a579-296d-445f-a4cf-9fd81c536e8d\" ConfID=\"e813ec51-af96-422f-943c-65d776818292\" @contextmanager def session_scope(): \"\"\"Provide a transactional scope around a", "scope around a series of operations.\"\"\" s = get_session() s.expire_on_commit = False try:", "sys, subprocess, ipaddress, time, datetime, json, os, csv, copy from watson_developer_cloud import DiscoveryV1" ]
[ "getQueryCriteria(starttime2, endtime2) matchCountSummary = dict() rt = Match.objects.filter(query2) for match in rt: if", "= True break matchjsonstr = simplejson.dumps(matches) return matchjsonstr def updateMatchQuerySetEnrollInfo(matchQuerySet, user): enrolledMatch =", "> 0: for enrollinfo in enrolledMatch: for match in matchQuerySet: if enrollinfo.match.id ==", "= MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user)) if len(enrolledMatch) > 0: matches = simplejson.loads(matchjsonstr) for enrollinfo", "matches = Match.objects.filter(query).order_by('starttime') return matches, starttime, endtime def index(request, view = None): starttime", "None intervals = 7 starttime2 = datetime.datetime.today() endtime2 = starttime2 + relativedelta(days=intervals) if", "matches, starttime, endtime = getMatchByRequest(request, starttime) if request.GET.get('f') == 'json': jsonstr = tojson(matches,", "False: msg = {'rt': 2, 'msg': 'already enrolled'} elif created != False: msg", "from django.core.exceptions import PermissionDenied from django.db.models.aggregates import Max from django.db.models.query_utils import Q from", "None: starttimenative = datetime.datetime.today() localtz = pytz.timezone(settings.TIME_ZONE) starttime = localtz.localize(starttimenative) try: if request.GET.get('starttime')", "except Exception: pass matches, starttime, endtime = getMatchByRequest(request, starttime) if request.GET.get('f') == 'json':", "'bonus', 'starttime', 'description'), ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() if request.user.is_authenticated(): jsonstr", "try: if request.GET.get('endtime') is not None: endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except Exception: pass query", "activity(request, matchid): match = getMatch(matchid) if match.type == 1: return redirect('match_detail', matchid=matchid) return", "match_fields from billiards.settings import TEMPLATE_ROOT, TIME_ZONE def updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray)", "Exception: pass matches, starttime, endtime = getMatchByRequest(request, starttime) if request.GET.get('f') == 'json': jsonstr", "elif match.type == 2: return HttpResponse(json.dumps({'rt': 5, 'msg': 'can not enroll activity'}), content_type=\"application/json\")", "datetime.datetime.today() endtime2 = starttime2 + relativedelta(days=intervals) if 's' in request.GET: try: starttimeS =", "import PermissionDenied from django.db.models.aggregates import Max from django.db.models.query_utils import Q from django.http import", "= 1 topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def ValuesQuerySetToDict(vqs): return [{'bonus': item['bonus'], 'starttime': item['starttime'].strftime(datefmt)} for", "starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if set_to_midnight(starttime2) <= set_to_midnight(starttimeS) <= set_to_midnight(endtime2): starttime = starttimeS except", "query = getQueryCriteria(starttime, endtime) matches = Match.objects.filter(query).order_by('starttime') return matches, starttime, endtime def index(request,", "break return matchQuerySet datefmt = \"%Y-%m-%d\" def set_to_midnight(dt): midnight = datetime.time(0) localtz =", "datefmt = \"%Y-%m-%d\" def set_to_midnight(dt): midnight = datetime.time(0) localtz = pytz.timezone(settings.TIME_ZONE) return localtz.localize(datetime.datetime.combine(dt.date(),", "matchid): match = getMatch(matchid) if match.type == 1: return redirect('match_detail', matchid=matchid) return render_to_response(TEMPLATE_ROOT", "starttime = None intervals = 7 starttime2 = datetime.datetime.today() endtime2 = starttime2 +", "import settings from billiards.commons import tojson from billiards.models import Match, MatchEnroll, \\ match_fields", "match.type == 1: return redirect('match_detail', matchid=matchid) return render_to_response(TEMPLATE_ROOT + 'activity_detail.html', {'match': match}, context_instance=RequestContext(request))", "obj, created = MatchEnroll.objects.get_or_create(match=match, user=request.user, defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if obj != False: msg =", "starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except Exception: pass if endtime == None: endtime = starttime", "HttpResponse(json.dumps(msg), content_type=\"application/json\") def redbull_2014_05(request): if 'f' in request.GET and request.GET.get('f') == 'json': redbull_matches", "import Max from django.db.models.query_utils import Q from django.http import HttpResponse from django.shortcuts import", "Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True)", "set_to_midnight(starttime2) <= set_to_midnight(starttimeS) <= set_to_midnight(endtime2): starttime = starttimeS except Exception: pass matches, starttime,", "user, matchArray): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user)) if len(enrolledMatch) > 0: matches =", "try: starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if set_to_midnight(starttime2) <= set_to_midnight(starttimeS) <= set_to_midnight(endtime2): starttime = starttimeS", "localtz = pytz.timezone(settings.TIME_ZONE) return localtz.localize(datetime.datetime.combine(dt.date(), midnight)) def getQueryCriteria(starttime, endtime): return Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved')", "= StringIO() json_serializer.serialize([match], fields=('id', 'poolroom', 'title', 'bonus', 'starttime', 'description'), ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True)", "-*- # encoding: utf-8 ''' Created on 2013年10月22日 @author: kane ''' from StringIO", "matches = simplejson.loads(matchjsonstr) for enrollinfo in enrolledMatch: for match in matches: if enrollinfo.match.id", "return matches, starttime, endtime def index(request, view = None): starttime = None intervals", "utf-8 -*- # encoding: utf-8 ''' Created on 2013年10月22日 @author: kane ''' from", "vqs] return render_to_response(TEMPLATE_ROOT + page, {'matches': matches, 'startdate': starttime2, 'enddate': endtime2, 'intervals': intervals,", "2: return redirect('activity_detail', matchid=matchid) if request.GET.get('f') == 'json': json_serializer = serializers.get_serializer(\"json\")() stream =", "is invalid'}), content_type=\"application/json\") elif match.type == 2: return HttpResponse(json.dumps({'rt': 5, 'msg': 'can not", "billiards.settings import TEMPLATE_ROOT, TIME_ZONE def updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user))", "matchid=matchid) if request.GET.get('f') == 'json': json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize([match], fields=('id',", "pass if endtime == None: endtime = starttime + relativedelta(days=deltadays) try: if request.GET.get('endtime')", "serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize([match], fields=('id', 'poolroom', 'title', 'bonus', 'starttime', 'description'), ensure_ascii=False, stream=stream,", "render_to_response(TEMPLATE_ROOT + 'match_detail.html', {'match': match}, context_instance=RequestContext(request)) def activity(request, matchid): match = getMatch(matchid) if", "match in matches: if enrollinfo.match.id == match['pk']: match['fields']['enrolled'] = True break matchjsonstr =", "if match.type == 2: return redirect('activity_detail', matchid=matchid) if request.GET.get('f') == 'json': json_serializer =", "= updateMatchQuerySetEnrollInfo([match], request.user)[0] return render_to_response(TEMPLATE_ROOT + 'match_detail.html', {'match': match}, context_instance=RequestContext(request)) def activity(request, matchid):", "matchCountSummary = dict() rt = Match.objects.filter(query2) for match in rt: if match.starttime.strftime(datefmt) in", "for item in vqs] return render_to_response(TEMPLATE_ROOT + page, {'matches': matches, 'startdate': starttime2, 'enddate':", "if request.GET.get('endtime') is not None: endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except Exception: pass query =", "== 'json': jsonstr = tojson(matches, match_fields) if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches)", "5, 'msg': 'can not enroll activity'}), content_type=\"application/json\") obj, created = MatchEnroll.objects.get_or_create(match=match, user=request.user, defaults={'enrolltime':", "'msg': 'enrolled'} return HttpResponse(json.dumps(msg), content_type=\"application/json\") def redbull_2014_05(request): if 'f' in request.GET and request.GET.get('f')", "'can not enroll activity'}), content_type=\"application/json\") obj, created = MatchEnroll.objects.get_or_create(match=match, user=request.user, defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if", "= datetime.time(0) localtz = pytz.timezone(settings.TIME_ZONE) return localtz.localize(datetime.datetime.combine(dt.date(), midnight)) def getQueryCriteria(starttime, endtime): return Q(starttime__gte=set_to_midnight(starttime))", "request.GET.get('f') == 'json': json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize([match], fields=('id', 'poolroom', 'title',", "\\ match_fields from billiards.settings import TEMPLATE_ROOT, TIME_ZONE def updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray): enrolledMatch =", "len(enrolledMatch) > 0: for enrollinfo in enrolledMatch: for match in matchQuerySet: if enrollinfo.match.id", "context_instance=RequestContext(request)) def enroll(request, matchid): if not request.user.is_authenticated(): raise PermissionDenied match = getMatch(matchid) if", "if request.GET.get('starttime') is not None: starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except Exception: pass if endtime", "'starttime': item['starttime'].strftime(datefmt)} for item in vqs] return render_to_response(TEMPLATE_ROOT + page, {'matches': matches, 'startdate':", "starttimeS except Exception: pass matches, starttime, endtime = getMatchByRequest(request, starttime) if request.GET.get('f') ==", "return [{'bonus': item['bonus'], 'starttime': item['starttime'].strftime(datefmt)} for item in vqs] return render_to_response(TEMPLATE_ROOT + page,", "simplejson import pytz from billiards import settings from billiards.commons import tojson from billiards.models", "relativedelta(days=deltadays) try: if request.GET.get('endtime') is not None: endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except Exception: pass", "enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user)) if len(enrolledMatch) > 0: for enrollinfo in enrolledMatch:", "json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() return HttpResponse(jsonstr) return render_to_response(TEMPLATE_ROOT", "elif created != False: msg = {'rt': 1, 'msg': 'enrolled'} return HttpResponse(json.dumps(msg), content_type=\"application/json\")", "set_to_midnight(starttimeS) <= set_to_midnight(endtime2): starttime = starttimeS except Exception: pass matches, starttime, endtime =", "== 2: return HttpResponse(json.dumps({'rt': 5, 'msg': 'can not enroll activity'}), content_type=\"application/json\") obj, created", "simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), }, context_instance=RequestContext(request)) def getMatch(matchid): return get_object_or_404(Match, pk=matchid, status='approved') def detail(request, matchid): match", "if 'f' in request.GET and request.GET.get('f') == 'json': redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer =", "matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), }, context_instance=RequestContext(request)) def getMatch(matchid): return get_object_or_404(Match, pk=matchid, status='approved') def detail(request,", "if len(enrolledMatch) > 0: for enrollinfo in enrolledMatch: for match in matchQuerySet: if", "'already enrolled'} elif created != False: msg = {'rt': 1, 'msg': 'enrolled'} return", "request.GET and request.GET.get('f') == 'json': redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer = serializers.get_serializer(\"json\")() stream =", "def set_to_midnight(dt): midnight = datetime.time(0) localtz = pytz.timezone(settings.TIME_ZONE) return localtz.localize(datetime.datetime.combine(dt.date(), midnight)) def getQueryCriteria(starttime,", "Match.objects.filter(query).order_by('starttime') return matches, starttime, endtime def index(request, view = None): starttime = None", "detail(request, matchid): match = getMatch(matchid) if match.type == 2: return redirect('activity_detail', matchid=matchid) if", "in matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)] += 1 else: matchCountSummary[match.starttime.strftime(datefmt)] = 1 topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def", "''' from StringIO import StringIO import datetime import json from dateutil.relativedelta import relativedelta", "request.user, [match]) return HttpResponse(jsonstr) if request.user.is_authenticated(): match = updateMatchQuerySetEnrollInfo([match], request.user)[0] return render_to_response(TEMPLATE_ROOT +", "fields=match_fields, ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() return HttpResponse(jsonstr) return render_to_response(TEMPLATE_ROOT +", "if enrollinfo.match.id == match.id: setattr(match, 'enrolled', True) break return matchQuerySet datefmt = \"%Y-%m-%d\"", "HttpResponse(jsonstr) if request.user.is_authenticated(): match = updateMatchQuerySetEnrollInfo([match], request.user)[0] return render_to_response(TEMPLATE_ROOT + 'match_detail.html', {'match': match},", "= getMatchByRequest(request, starttime) if request.GET.get('f') == 'json': jsonstr = tojson(matches, match_fields) if request.user.is_authenticated():", "1): if starttime == None: starttimenative = datetime.datetime.today() localtz = pytz.timezone(settings.TIME_ZONE) starttime =", "redirect('activity_detail', matchid=matchid) if request.GET.get('f') == 'json': json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize([match],", "json from dateutil.relativedelta import relativedelta from django.core import serializers from django.core.exceptions import PermissionDenied", "0: for enrollinfo in enrolledMatch: for match in matchQuerySet: if enrollinfo.match.id == match.id:", "render_to_response, get_object_or_404, redirect from django.template.context import RequestContext from django.utils import simplejson import pytz", "!= False: msg = {'rt': 1, 'msg': 'enrolled'} return HttpResponse(json.dumps(msg), content_type=\"application/json\") def redbull_2014_05(request):", "MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user)) if len(enrolledMatch) > 0: for enrollinfo in enrolledMatch: for match", "updateMatchQuerySetEnrollInfo(matchQuerySet, user): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user)) if len(enrolledMatch) > 0: for enrollinfo", "updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user)) if len(enrolledMatch) > 0: matches", "dateutil.relativedelta import relativedelta from django.core import serializers from django.core.exceptions import PermissionDenied from django.db.models.aggregates", "msg = {'rt': 2, 'msg': 'already enrolled'} elif created != False: msg =", "= getMatch(matchid) if match.type == 2: return redirect('activity_detail', matchid=matchid) if request.GET.get('f') == 'json':", "== 'json': json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize([match], fields=('id', 'poolroom', 'title', 'bonus',", "'enrolled', True) break return matchQuerySet datefmt = \"%Y-%m-%d\" def set_to_midnight(dt): midnight = datetime.time(0)", "if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match]) return HttpResponse(jsonstr) if request.user.is_authenticated(): match =", "import pytz from billiards import settings from billiards.commons import tojson from billiards.models import", "localtz.localize(starttimenative) try: if request.GET.get('starttime') is not None: starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except Exception: pass", "tojson from billiards.models import Match, MatchEnroll, \\ match_fields from billiards.settings import TEMPLATE_ROOT, TIME_ZONE", "billiards.models import Match, MatchEnroll, \\ match_fields from billiards.settings import TEMPLATE_ROOT, TIME_ZONE def updateMatchJsonStrEnrollInfo(matchjsonstr,", "in vqs] return render_to_response(TEMPLATE_ROOT + page, {'matches': matches, 'startdate': starttime2, 'enddate': endtime2, 'intervals':", "from django.core import serializers from django.core.exceptions import PermissionDenied from django.db.models.aggregates import Max from", "datetime.time(0) localtz = pytz.timezone(settings.TIME_ZONE) return localtz.localize(datetime.datetime.combine(dt.date(), midnight)) def getQueryCriteria(starttime, endtime): return Q(starttime__gte=set_to_midnight(starttime)) &", "return render_to_response(TEMPLATE_ROOT + 'match_detail.html', {'match': match}, context_instance=RequestContext(request)) def activity(request, matchid): match = getMatch(matchid)", "= datetime.datetime.today() localtz = pytz.timezone(settings.TIME_ZONE) starttime = localtz.localize(starttimenative) try: if request.GET.get('starttime') is not", "RequestContext from django.utils import simplejson import pytz from billiards import settings from billiards.commons", "billiards.commons import tojson from billiards.models import Match, MatchEnroll, \\ match_fields from billiards.settings import", "= None, deltadays = 1): if starttime == None: starttimenative = datetime.datetime.today() localtz", "1 topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def ValuesQuerySetToDict(vqs): return [{'bonus': item['bonus'], 'starttime': item['starttime'].strftime(datefmt)} for item", "import tojson from billiards.models import Match, MatchEnroll, \\ match_fields from billiards.settings import TEMPLATE_ROOT,", "from billiards.commons import tojson from billiards.models import Match, MatchEnroll, \\ match_fields from billiards.settings", "+ relativedelta(days=deltadays) try: if request.GET.get('endtime') is not None: endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except Exception:", "= Match.objects.filter(query).order_by('starttime') return matches, starttime, endtime def index(request, view = None): starttime =", "matches, 'startdate': starttime2, 'enddate': endtime2, 'intervals': intervals, 'matchsummary': matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), }, context_instance=RequestContext(request))", "pytz from billiards import settings from billiards.commons import tojson from billiards.models import Match,", "== None: starttimenative = datetime.datetime.today() localtz = pytz.timezone(settings.TIME_ZONE) starttime = localtz.localize(starttimenative) try: if", "match}, context_instance=RequestContext(request)) def activity(request, matchid): match = getMatch(matchid) if match.type == 1: return", "{'rt': 2, 'msg': 'already enrolled'} elif created != False: msg = {'rt': 1,", "None: endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except Exception: pass query = getQueryCriteria(starttime, endtime) matches =", "on 2013年10月22日 @author: kane ''' from StringIO import StringIO import datetime import json", "{'rt': 1, 'msg': 'enrolled'} return HttpResponse(json.dumps(msg), content_type=\"application/json\") def redbull_2014_05(request): if 'f' in request.GET", "StringIO() json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() return HttpResponse(jsonstr) return", "else: matchCountSummary[match.starttime.strftime(datefmt)] = 1 topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def ValuesQuerySetToDict(vqs): return [{'bonus': item['bonus'], 'starttime':", "stream = StringIO() json_serializer.serialize([match], fields=('id', 'poolroom', 'title', 'bonus', 'starttime', 'description'), ensure_ascii=False, stream=stream, indent=2,", "= simplejson.dumps(matches) return matchjsonstr def updateMatchQuerySetEnrollInfo(matchQuerySet, user): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user)) if", "fields=('id', 'poolroom', 'title', 'bonus', 'starttime', 'description'), ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue()", "Exception: pass query = getQueryCriteria(starttime, endtime) matches = Match.objects.filter(query).order_by('starttime') return matches, starttime, endtime", "def detail(request, matchid): match = getMatch(matchid) if match.type == 2: return redirect('activity_detail', matchid=matchid)", "invalid'}), content_type=\"application/json\") elif match.type == 2: return HttpResponse(json.dumps({'rt': 5, 'msg': 'can not enroll", "enrolled'} elif created != False: msg = {'rt': 1, 'msg': 'enrolled'} return HttpResponse(json.dumps(msg),", "query2 = getQueryCriteria(starttime2, endtime2) matchCountSummary = dict() rt = Match.objects.filter(query2) for match in", "= {'rt': 1, 'msg': 'enrolled'} return HttpResponse(json.dumps(msg), content_type=\"application/json\") def redbull_2014_05(request): if 'f' in", "None): starttime = None intervals = 7 starttime2 = datetime.datetime.today() endtime2 = starttime2", "not None: starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except Exception: pass if endtime == None: endtime", "& Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime)) def getMatchByRequest(request, starttime = None, endtime = None, deltadays", "stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match])", "intervals = 7 starttime2 = datetime.datetime.today() endtime2 = starttime2 + relativedelta(days=intervals) if 's'", "datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if obj != False: msg = {'rt': 2, 'msg': 'already enrolled'} elif", "Q(starttime__lt=set_to_midnight(endtime)) def getMatchByRequest(request, starttime = None, endtime = None, deltadays = 1): if", "request.GET.get('f') == 'json': jsonstr = tojson(matches, match_fields) if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user,", "[match]) return HttpResponse(jsonstr) if request.user.is_authenticated(): match = updateMatchQuerySetEnrollInfo([match], request.user)[0] return render_to_response(TEMPLATE_ROOT + 'match_detail.html',", "activity'}), content_type=\"application/json\") obj, created = MatchEnroll.objects.get_or_create(match=match, user=request.user, defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if obj != False:", "pk=matchid, status='approved') def detail(request, matchid): match = getMatch(matchid) if match.type == 2: return", "user=request.user, defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if obj != False: msg = {'rt': 2, 'msg': 'already", "if set_to_midnight(starttime2) <= set_to_midnight(starttimeS) <= set_to_midnight(endtime2): starttime = starttimeS except Exception: pass matches,", "Match, MatchEnroll, \\ match_fields from billiards.settings import TEMPLATE_ROOT, TIME_ZONE def updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray):", "json_serializer.serialize([match], fields=('id', 'poolroom', 'title', 'bonus', 'starttime', 'description'), ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr =", "if 's' in request.GET: try: starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if set_to_midnight(starttime2) <= set_to_midnight(starttimeS) <=", "return HttpResponse(jsonstr) page = 'match.html' query2 = getQueryCriteria(starttime2, endtime2) matchCountSummary = dict() rt", "= Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def ValuesQuerySetToDict(vqs): return [{'bonus': item['bonus'], 'starttime': item['starttime'].strftime(datefmt)} for item in vqs]", "request.GET.get('starttime') is not None: starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except Exception: pass if endtime ==", "updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches) return HttpResponse(jsonstr) page = 'match.html' query2 = getQueryCriteria(starttime2, endtime2) matchCountSummary", "if match.is_expired: return HttpResponse(json.dumps({'rt': 3, 'msg': 'match is expired'}), content_type=\"application/json\") elif match.status !=", "relativedelta(days=intervals) if 's' in request.GET: try: starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if set_to_midnight(starttime2) <= set_to_midnight(starttimeS)", "content_type=\"application/json\") def redbull_2014_05(request): if 'f' in request.GET and request.GET.get('f') == 'json': redbull_matches =", "endtime = starttime + relativedelta(days=deltadays) try: if request.GET.get('endtime') is not None: endtime =", "enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user)) if len(enrolledMatch) > 0: matches = simplejson.loads(matchjsonstr) for", "import Q from django.http import HttpResponse from django.shortcuts import render_to_response, get_object_or_404, redirect from", "expired'}), content_type=\"application/json\") elif match.status != 'approved': return HttpResponse(json.dumps({'rt': 4, 'msg': 'match is invalid'}),", "simplejson.loads(matchjsonstr) for enrollinfo in enrolledMatch: for match in matches: if enrollinfo.match.id == match['pk']:", "render_to_response(TEMPLATE_ROOT + page, {'matches': matches, 'startdate': starttime2, 'enddate': endtime2, 'intervals': intervals, 'matchsummary': matchCountSummary,", "request.GET.get('endtime') is not None: endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except Exception: pass query = getQueryCriteria(starttime,", "matchid): match = getMatch(matchid) if match.type == 2: return redirect('activity_detail', matchid=matchid) if request.GET.get('f')", "deltadays = 1): if starttime == None: starttimenative = datetime.datetime.today() localtz = pytz.timezone(settings.TIME_ZONE)", "pass matches, starttime, endtime = getMatchByRequest(request, starttime) if request.GET.get('f') == 'json': jsonstr =", "HttpResponse(jsonstr) return render_to_response(TEMPLATE_ROOT + 'redbull/match_poolroom.html', {}, context_instance=RequestContext(request)) def winners(request): return render_to_response(TEMPLATE_ROOT + 'redbull/match_winners.html',", "= Match.objects.filter(query2) for match in rt: if match.starttime.strftime(datefmt) in matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)] += 1", "matchjsonstr def updateMatchQuerySetEnrollInfo(matchQuerySet, user): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user)) if len(enrolledMatch) > 0:", "if request.user.is_authenticated(): match = updateMatchQuerySetEnrollInfo([match], request.user)[0] return render_to_response(TEMPLATE_ROOT + 'match_detail.html', {'match': match}, context_instance=RequestContext(request))", "match = getMatch(matchid) if match.type == 2: return redirect('activity_detail', matchid=matchid) if request.GET.get('f') ==", "False: msg = {'rt': 1, 'msg': 'enrolled'} return HttpResponse(json.dumps(msg), content_type=\"application/json\") def redbull_2014_05(request): if", "'f' in request.GET and request.GET.get('f') == 'json': redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer = serializers.get_serializer(\"json\")()", "= stream.getvalue() return HttpResponse(jsonstr) return render_to_response(TEMPLATE_ROOT + 'redbull/match_poolroom.html', {}, context_instance=RequestContext(request)) def winners(request): return", "from billiards.models import Match, MatchEnroll, \\ match_fields from billiards.settings import TEMPLATE_ROOT, TIME_ZONE def", "pytz.timezone(settings.TIME_ZONE) return localtz.localize(datetime.datetime.combine(dt.date(), midnight)) def getQueryCriteria(starttime, endtime): return Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime))", "updateMatchQuerySetEnrollInfo([match], request.user)[0] return render_to_response(TEMPLATE_ROOT + 'match_detail.html', {'match': match}, context_instance=RequestContext(request)) def activity(request, matchid): match", "serializers from django.core.exceptions import PermissionDenied from django.db.models.aggregates import Max from django.db.models.query_utils import Q", "match in matchQuerySet: if enrollinfo.match.id == match.id: setattr(match, 'enrolled', True) break return matchQuerySet", "match_fields) if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches) return HttpResponse(jsonstr) page = 'match.html'", "= starttimeS except Exception: pass matches, starttime, endtime = getMatchByRequest(request, starttime) if request.GET.get('f')", "+ page, {'matches': matches, 'startdate': starttime2, 'enddate': endtime2, 'intervals': intervals, 'matchsummary': matchCountSummary, 'bonussummary':", "'msg': 'match is expired'}), content_type=\"application/json\") elif match.status != 'approved': return HttpResponse(json.dumps({'rt': 4, 'msg':", "'msg': 'can not enroll activity'}), content_type=\"application/json\") obj, created = MatchEnroll.objects.get_or_create(match=match, user=request.user, defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))})", "+ 'match_detail.html', {'match': match}, context_instance=RequestContext(request)) def activity(request, matchid): match = getMatch(matchid) if match.type", "'json': jsonstr = tojson(matches, match_fields) if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches) return", "import HttpResponse from django.shortcuts import render_to_response, get_object_or_404, redirect from django.template.context import RequestContext from", "is not None: starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except Exception: pass if endtime == None:", "matches: if enrollinfo.match.id == match['pk']: match['fields']['enrolled'] = True break matchjsonstr = simplejson.dumps(matches) return", "not request.user.is_authenticated(): raise PermissionDenied match = getMatch(matchid) if match.is_expired: return HttpResponse(json.dumps({'rt': 3, 'msg':", "django.utils import simplejson import pytz from billiards import settings from billiards.commons import tojson", "endtime2 = starttime2 + relativedelta(days=intervals) if 's' in request.GET: try: starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s')))", "match.type == 2: return redirect('activity_detail', matchid=matchid) if request.GET.get('f') == 'json': json_serializer = serializers.get_serializer(\"json\")()", "'matchsummary': matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), }, context_instance=RequestContext(request)) def getMatch(matchid): return get_object_or_404(Match, pk=matchid, status='approved') def", "= datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except Exception: pass query = getQueryCriteria(starttime, endtime) matches = Match.objects.filter(query).order_by('starttime') return", "indent=2, use_natural_keys=True) jsonstr = stream.getvalue() if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match]) return", "return HttpResponse(jsonstr) return render_to_response(TEMPLATE_ROOT + 'redbull/match_poolroom.html', {}, context_instance=RequestContext(request)) def winners(request): return render_to_response(TEMPLATE_ROOT +", "= pytz.timezone(settings.TIME_ZONE) return localtz.localize(datetime.datetime.combine(dt.date(), midnight)) def getQueryCriteria(starttime, endtime): return Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved') &", "match}, context_instance=RequestContext(request)) def enroll(request, matchid): if not request.user.is_authenticated(): raise PermissionDenied match = getMatch(matchid)", "django.core import serializers from django.core.exceptions import PermissionDenied from django.db.models.aggregates import Max from django.db.models.query_utils", "if match.starttime.strftime(datefmt) in matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)] += 1 else: matchCountSummary[match.starttime.strftime(datefmt)] = 1 topOneBonusSummary =", "<= set_to_midnight(endtime2): starttime = starttimeS except Exception: pass matches, starttime, endtime = getMatchByRequest(request,", "for match in matches: if enrollinfo.match.id == match['pk']: match['fields']['enrolled'] = True break matchjsonstr", "request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match]) return HttpResponse(jsonstr) if request.user.is_authenticated(): match = updateMatchQuerySetEnrollInfo([match],", "localtz.localize(datetime.datetime.combine(dt.date(), midnight)) def getQueryCriteria(starttime, endtime): return Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime)) def getMatchByRequest(request,", "stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() return HttpResponse(jsonstr) return render_to_response(TEMPLATE_ROOT + 'redbull/match_poolroom.html', {},", "None: endtime = starttime + relativedelta(days=deltadays) try: if request.GET.get('endtime') is not None: endtime", "= simplejson.loads(matchjsonstr) for enrollinfo in enrolledMatch: for match in matches: if enrollinfo.match.id ==", "True break matchjsonstr = simplejson.dumps(matches) return matchjsonstr def updateMatchQuerySetEnrollInfo(matchQuerySet, user): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet)", "try: if request.GET.get('starttime') is not None: starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except Exception: pass if", "getMatch(matchid) if match.type == 1: return redirect('match_detail', matchid=matchid) return render_to_response(TEMPLATE_ROOT + 'activity_detail.html', {'match':", "def updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user)) if len(enrolledMatch) > 0:", "break matchjsonstr = simplejson.dumps(matches) return matchjsonstr def updateMatchQuerySetEnrollInfo(matchQuerySet, user): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet) &", "not None: endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except Exception: pass query = getQueryCriteria(starttime, endtime) matches", "starttime2, 'enddate': endtime2, 'intervals': intervals, 'matchsummary': matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), }, context_instance=RequestContext(request)) def getMatch(matchid):", "import serializers from django.core.exceptions import PermissionDenied from django.db.models.aggregates import Max from django.db.models.query_utils import", "starttime = localtz.localize(starttimenative) try: if request.GET.get('starttime') is not None: starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except", "<= set_to_midnight(starttimeS) <= set_to_midnight(endtime2): starttime = starttimeS except Exception: pass matches, starttime, endtime", "django.shortcuts import render_to_response, get_object_or_404, redirect from django.template.context import RequestContext from django.utils import simplejson", "3, 'msg': 'match is expired'}), content_type=\"application/json\") elif match.status != 'approved': return HttpResponse(json.dumps({'rt': 4,", "= set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except Exception: pass if endtime == None: endtime = starttime +", "return redirect('activity_detail', matchid=matchid) if request.GET.get('f') == 'json': json_serializer = serializers.get_serializer(\"json\")() stream = StringIO()", "page, {'matches': matches, 'startdate': starttime2, 'enddate': endtime2, 'intervals': intervals, 'matchsummary': matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)),", "def redbull_2014_05(request): if 'f' in request.GET and request.GET.get('f') == 'json': redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime')", "request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches) return HttpResponse(jsonstr) page = 'match.html' query2 =", "request.user.is_authenticated(): raise PermissionDenied match = getMatch(matchid) if match.is_expired: return HttpResponse(json.dumps({'rt': 3, 'msg': 'match", "relativedelta from django.core import serializers from django.core.exceptions import PermissionDenied from django.db.models.aggregates import Max", "endtime = getMatchByRequest(request, starttime) if request.GET.get('f') == 'json': jsonstr = tojson(matches, match_fields) if", "return get_object_or_404(Match, pk=matchid, status='approved') def detail(request, matchid): match = getMatch(matchid) if match.type ==", "+ 'activity_detail.html', {'match': match}, context_instance=RequestContext(request)) def enroll(request, matchid): if not request.user.is_authenticated(): raise PermissionDenied", "!= 'approved': return HttpResponse(json.dumps({'rt': 4, 'msg': 'match is invalid'}), content_type=\"application/json\") elif match.type ==", "stream.getvalue() if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match]) return HttpResponse(jsonstr) if request.user.is_authenticated(): match", "Created on 2013年10月22日 @author: kane ''' from StringIO import StringIO import datetime import", "MatchEnroll, \\ match_fields from billiards.settings import TEMPLATE_ROOT, TIME_ZONE def updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray): enrolledMatch", "redirect from django.template.context import RequestContext from django.utils import simplejson import pytz from billiards", "endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except Exception: pass query = getQueryCriteria(starttime, endtime) matches = Match.objects.filter(query).order_by('starttime')", "return HttpResponse(json.dumps({'rt': 3, 'msg': 'match is expired'}), content_type=\"application/json\") elif match.status != 'approved': return", "context_instance=RequestContext(request)) def activity(request, matchid): match = getMatch(matchid) if match.type == 1: return redirect('match_detail',", "Max from django.db.models.query_utils import Q from django.http import HttpResponse from django.shortcuts import render_to_response,", "in rt: if match.starttime.strftime(datefmt) in matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)] += 1 else: matchCountSummary[match.starttime.strftime(datefmt)] = 1", "import RequestContext from django.utils import simplejson import pytz from billiards import settings from", "simplejson.dumps(matches) return matchjsonstr def updateMatchQuerySetEnrollInfo(matchQuerySet, user): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user)) if len(enrolledMatch)", "request.user.is_authenticated(): match = updateMatchQuerySetEnrollInfo([match], request.user)[0] return render_to_response(TEMPLATE_ROOT + 'match_detail.html', {'match': match}, context_instance=RequestContext(request)) def", "updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match]) return HttpResponse(jsonstr) if request.user.is_authenticated(): match = updateMatchQuerySetEnrollInfo([match], request.user)[0] return render_to_response(TEMPLATE_ROOT", "msg = {'rt': 1, 'msg': 'enrolled'} return HttpResponse(json.dumps(msg), content_type=\"application/json\") def redbull_2014_05(request): if 'f'", "datetime import json from dateutil.relativedelta import relativedelta from django.core import serializers from django.core.exceptions", "jsonstr = stream.getvalue() return HttpResponse(jsonstr) return render_to_response(TEMPLATE_ROOT + 'redbull/match_poolroom.html', {}, context_instance=RequestContext(request)) def winners(request):", "import json from dateutil.relativedelta import relativedelta from django.core import serializers from django.core.exceptions import", "context_instance=RequestContext(request)) def getMatch(matchid): return get_object_or_404(Match, pk=matchid, status='approved') def detail(request, matchid): match = getMatch(matchid)", "import StringIO import datetime import json from dateutil.relativedelta import relativedelta from django.core import", "json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize([match], fields=('id', 'poolroom', 'title', 'bonus', 'starttime', 'description'),", "StringIO import StringIO import datetime import json from dateutil.relativedelta import relativedelta from django.core", "dict() rt = Match.objects.filter(query2) for match in rt: if match.starttime.strftime(datefmt) in matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)]", "match = updateMatchQuerySetEnrollInfo([match], request.user)[0] return render_to_response(TEMPLATE_ROOT + 'match_detail.html', {'match': match}, context_instance=RequestContext(request)) def activity(request,", "from dateutil.relativedelta import relativedelta from django.core import serializers from django.core.exceptions import PermissionDenied from", "redirect('match_detail', matchid=matchid) return render_to_response(TEMPLATE_ROOT + 'activity_detail.html', {'match': match}, context_instance=RequestContext(request)) def enroll(request, matchid): if", "for match in rt: if match.starttime.strftime(datefmt) in matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)] += 1 else: matchCountSummary[match.starttime.strftime(datefmt)]", "2, 'msg': 'already enrolled'} elif created != False: msg = {'rt': 1, 'msg':", "matchCountSummary[match.starttime.strftime(datefmt)] = 1 topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def ValuesQuerySetToDict(vqs): return [{'bonus': item['bonus'], 'starttime': item['starttime'].strftime(datefmt)}", "HttpResponse(jsonstr) page = 'match.html' query2 = getQueryCriteria(starttime2, endtime2) matchCountSummary = dict() rt =", "enrollinfo in enrolledMatch: for match in matchQuerySet: if enrollinfo.match.id == match.id: setattr(match, 'enrolled',", "in matchQuerySet: if enrollinfo.match.id == match.id: setattr(match, 'enrolled', True) break return matchQuerySet datefmt", "enroll(request, matchid): if not request.user.is_authenticated(): raise PermissionDenied match = getMatch(matchid) if match.is_expired: return", "item['starttime'].strftime(datefmt)} for item in vqs] return render_to_response(TEMPLATE_ROOT + page, {'matches': matches, 'startdate': starttime2,", "matchid): if not request.user.is_authenticated(): raise PermissionDenied match = getMatch(matchid) if match.is_expired: return HttpResponse(json.dumps({'rt':", "{'match': match}, context_instance=RequestContext(request)) def activity(request, matchid): match = getMatch(matchid) if match.type == 1:", "endtime2) matchCountSummary = dict() rt = Match.objects.filter(query2) for match in rt: if match.starttime.strftime(datefmt)", "# encoding: utf-8 ''' Created on 2013年10月22日 @author: kane ''' from StringIO import", "= tojson(matches, match_fields) if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches) return HttpResponse(jsonstr) page", "not enroll activity'}), content_type=\"application/json\") obj, created = MatchEnroll.objects.get_or_create(match=match, user=request.user, defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if obj", "getMatchByRequest(request, starttime) if request.GET.get('f') == 'json': jsonstr = tojson(matches, match_fields) if request.user.is_authenticated(): jsonstr", "django.db.models.aggregates import Max from django.db.models.query_utils import Q from django.http import HttpResponse from django.shortcuts", "HttpResponse from django.shortcuts import render_to_response, get_object_or_404, redirect from django.template.context import RequestContext from django.utils", "jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches) return HttpResponse(jsonstr) page = 'match.html' query2 = getQueryCriteria(starttime2,", "StringIO import datetime import json from dateutil.relativedelta import relativedelta from django.core import serializers", "return HttpResponse(json.dumps({'rt': 4, 'msg': 'match is invalid'}), content_type=\"application/json\") elif match.type == 2: return", "indent=2, use_natural_keys=True) jsonstr = stream.getvalue() return HttpResponse(jsonstr) return render_to_response(TEMPLATE_ROOT + 'redbull/match_poolroom.html', {}, context_instance=RequestContext(request))", "PermissionDenied match = getMatch(matchid) if match.is_expired: return HttpResponse(json.dumps({'rt': 3, 'msg': 'match is expired'}),", "getMatch(matchid) if match.is_expired: return HttpResponse(json.dumps({'rt': 3, 'msg': 'match is expired'}), content_type=\"application/json\") elif match.status", "StringIO() json_serializer.serialize([match], fields=('id', 'poolroom', 'title', 'bonus', 'starttime', 'description'), ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr", "import TEMPLATE_ROOT, TIME_ZONE def updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user)) if", "= {'rt': 2, 'msg': 'already enrolled'} elif created != False: msg = {'rt':", "page = 'match.html' query2 = getQueryCriteria(starttime2, endtime2) matchCountSummary = dict() rt = Match.objects.filter(query2)", "'poolroom', 'title', 'bonus', 'starttime', 'description'), ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() if", "-*- coding: utf-8 -*- # encoding: utf-8 ''' Created on 2013年10月22日 @author: kane", "starttime2 + relativedelta(days=intervals) if 's' in request.GET: try: starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if set_to_midnight(starttime2)", "}, context_instance=RequestContext(request)) def getMatch(matchid): return get_object_or_404(Match, pk=matchid, status='approved') def detail(request, matchid): match =", "Q(user__exact=user)) if len(enrolledMatch) > 0: for enrollinfo in enrolledMatch: for match in matchQuerySet:", "from django.shortcuts import render_to_response, get_object_or_404, redirect from django.template.context import RequestContext from django.utils import", "Q(user__exact=user)) if len(enrolledMatch) > 0: matches = simplejson.loads(matchjsonstr) for enrollinfo in enrolledMatch: for", "raise PermissionDenied match = getMatch(matchid) if match.is_expired: return HttpResponse(json.dumps({'rt': 3, 'msg': 'match is", "> 0: matches = simplejson.loads(matchjsonstr) for enrollinfo in enrolledMatch: for match in matches:", "and request.GET.get('f') == 'json': redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer = serializers.get_serializer(\"json\")() stream = StringIO()", "matchArray): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user)) if len(enrolledMatch) > 0: matches = simplejson.loads(matchjsonstr)", "starttime = None, endtime = None, deltadays = 1): if starttime == None:", "= None): starttime = None intervals = 7 starttime2 = datetime.datetime.today() endtime2 =", "return Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime)) def getMatchByRequest(request, starttime = None, endtime =", "return redirect('match_detail', matchid=matchid) return render_to_response(TEMPLATE_ROOT + 'activity_detail.html', {'match': match}, context_instance=RequestContext(request)) def enroll(request, matchid):", "coding: utf-8 -*- # encoding: utf-8 ''' Created on 2013年10月22日 @author: kane '''", "None: starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except Exception: pass if endtime == None: endtime =", "matchCountSummary[match.starttime.strftime(datefmt)] += 1 else: matchCountSummary[match.starttime.strftime(datefmt)] = 1 topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def ValuesQuerySetToDict(vqs): return", "request.user, matches) return HttpResponse(jsonstr) page = 'match.html' query2 = getQueryCriteria(starttime2, endtime2) matchCountSummary =", "match.type == 2: return HttpResponse(json.dumps({'rt': 5, 'msg': 'can not enroll activity'}), content_type=\"application/json\") obj,", "'starttime', 'description'), ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() if request.user.is_authenticated(): jsonstr =", "midnight)) def getQueryCriteria(starttime, endtime): return Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime)) def getMatchByRequest(request, starttime", "user): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user)) if len(enrolledMatch) > 0: for enrollinfo in", "in enrolledMatch: for match in matches: if enrollinfo.match.id == match['pk']: match['fields']['enrolled'] = True", "ValuesQuerySetToDict(vqs): return [{'bonus': item['bonus'], 'starttime': item['starttime'].strftime(datefmt)} for item in vqs] return render_to_response(TEMPLATE_ROOT +", "get_object_or_404, redirect from django.template.context import RequestContext from django.utils import simplejson import pytz from", "if not request.user.is_authenticated(): raise PermissionDenied match = getMatch(matchid) if match.is_expired: return HttpResponse(json.dumps({'rt': 3,", "item in vqs] return render_to_response(TEMPLATE_ROOT + page, {'matches': matches, 'startdate': starttime2, 'enddate': endtime2,", "django.template.context import RequestContext from django.utils import simplejson import pytz from billiards import settings", "None, endtime = None, deltadays = 1): if starttime == None: starttimenative =", "created != False: msg = {'rt': 1, 'msg': 'enrolled'} return HttpResponse(json.dumps(msg), content_type=\"application/json\") def", "for enrollinfo in enrolledMatch: for match in matches: if enrollinfo.match.id == match['pk']: match['fields']['enrolled']", "match in rt: if match.starttime.strftime(datefmt) in matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)] += 1 else: matchCountSummary[match.starttime.strftime(datefmt)] =", "jsonstr = stream.getvalue() if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match]) return HttpResponse(jsonstr) if", "TEMPLATE_ROOT, TIME_ZONE def updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user)) if len(enrolledMatch)", "in enrolledMatch: for match in matchQuerySet: if enrollinfo.match.id == match.id: setattr(match, 'enrolled', True)", "item['bonus'], 'starttime': item['starttime'].strftime(datefmt)} for item in vqs] return render_to_response(TEMPLATE_ROOT + page, {'matches': matches,", "starttime, endtime = getMatchByRequest(request, starttime) if request.GET.get('f') == 'json': jsonstr = tojson(matches, match_fields)", "starttime, endtime def index(request, view = None): starttime = None intervals = 7", "redbull_2014_05(request): if 'f' in request.GET and request.GET.get('f') == 'json': redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer", "starttimenative = datetime.datetime.today() localtz = pytz.timezone(settings.TIME_ZONE) starttime = localtz.localize(starttimenative) try: if request.GET.get('starttime') is", "from django.http import HttpResponse from django.shortcuts import render_to_response, get_object_or_404, redirect from django.template.context import", "datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except Exception: pass query = getQueryCriteria(starttime, endtime) matches = Match.objects.filter(query).order_by('starttime') return matches,", "def activity(request, matchid): match = getMatch(matchid) if match.type == 1: return redirect('match_detail', matchid=matchid)", "Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime)) def getMatchByRequest(request, starttime = None, endtime = None,", "django.db.models.query_utils import Q from django.http import HttpResponse from django.shortcuts import render_to_response, get_object_or_404, redirect", "endtime == None: endtime = starttime + relativedelta(days=deltadays) try: if request.GET.get('endtime') is not", "request.user)[0] return render_to_response(TEMPLATE_ROOT + 'match_detail.html', {'match': match}, context_instance=RequestContext(request)) def activity(request, matchid): match =", "is expired'}), content_type=\"application/json\") elif match.status != 'approved': return HttpResponse(json.dumps({'rt': 4, 'msg': 'match is", "if enrollinfo.match.id == match['pk']: match['fields']['enrolled'] = True break matchjsonstr = simplejson.dumps(matches) return matchjsonstr", "pass query = getQueryCriteria(starttime, endtime) matches = Match.objects.filter(query).order_by('starttime') return matches, starttime, endtime def", "use_natural_keys=True) jsonstr = stream.getvalue() if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match]) return HttpResponse(jsonstr)", "defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if obj != False: msg = {'rt': 2, 'msg': 'already enrolled'}", "json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr", "= None intervals = 7 starttime2 = datetime.datetime.today() endtime2 = starttime2 + relativedelta(days=intervals)", "except Exception: pass if endtime == None: endtime = starttime + relativedelta(days=deltadays) try:", "encoding: utf-8 ''' Created on 2013年10月22日 @author: kane ''' from StringIO import StringIO", "4, 'msg': 'match is invalid'}), content_type=\"application/json\") elif match.type == 2: return HttpResponse(json.dumps({'rt': 5,", "{'matches': matches, 'startdate': starttime2, 'enddate': endtime2, 'intervals': intervals, 'matchsummary': matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), },", "match.is_expired: return HttpResponse(json.dumps({'rt': 3, 'msg': 'match is expired'}), content_type=\"application/json\") elif match.status != 'approved':", "from billiards.settings import TEMPLATE_ROOT, TIME_ZONE def updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray) &", "= Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False, stream=stream, indent=2,", "= serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize([match], fields=('id', 'poolroom', 'title', 'bonus', 'starttime', 'description'), ensure_ascii=False,", "return localtz.localize(datetime.datetime.combine(dt.date(), midnight)) def getQueryCriteria(starttime, endtime): return Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime)) def", "getQueryCriteria(starttime, endtime) matches = Match.objects.filter(query).order_by('starttime') return matches, starttime, endtime def index(request, view =", "from django.template.context import RequestContext from django.utils import simplejson import pytz from billiards import", "= MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user)) if len(enrolledMatch) > 0: for enrollinfo in enrolledMatch: for", "'startdate': starttime2, 'enddate': endtime2, 'intervals': intervals, 'matchsummary': matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), }, context_instance=RequestContext(request)) def", "match['pk']: match['fields']['enrolled'] = True break matchjsonstr = simplejson.dumps(matches) return matchjsonstr def updateMatchQuerySetEnrollInfo(matchQuerySet, user):", "endtime def index(request, view = None): starttime = None intervals = 7 starttime2", "MatchEnroll.objects.get_or_create(match=match, user=request.user, defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if obj != False: msg = {'rt': 2, 'msg':", "def enroll(request, matchid): if not request.user.is_authenticated(): raise PermissionDenied match = getMatch(matchid) if match.is_expired:", "= updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches) return HttpResponse(jsonstr) page = 'match.html' query2 = getQueryCriteria(starttime2, endtime2)", "render_to_response(TEMPLATE_ROOT + 'activity_detail.html', {'match': match}, context_instance=RequestContext(request)) def enroll(request, matchid): if not request.user.is_authenticated(): raise", "stream = StringIO() json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() return", "def getMatchByRequest(request, starttime = None, endtime = None, deltadays = 1): if starttime", "<reponame>zxkane/billiards<filename>billiards/billiards/views/match.py # -*- coding: utf-8 -*- # encoding: utf-8 ''' Created on 2013年10月22日", "starttime) if request.GET.get('f') == 'json': jsonstr = tojson(matches, match_fields) if request.user.is_authenticated(): jsonstr =", "content_type=\"application/json\") obj, created = MatchEnroll.objects.get_or_create(match=match, user=request.user, defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if obj != False: msg", "import datetime import json from dateutil.relativedelta import relativedelta from django.core import serializers from", "redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False, stream=stream,", "HttpResponse(json.dumps({'rt': 5, 'msg': 'can not enroll activity'}), content_type=\"application/json\") obj, created = MatchEnroll.objects.get_or_create(match=match, user=request.user,", "return render_to_response(TEMPLATE_ROOT + 'redbull/match_poolroom.html', {}, context_instance=RequestContext(request)) def winners(request): return render_to_response(TEMPLATE_ROOT + 'redbull/match_winners.html', context_instance=RequestContext(request))", "serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue()", "content_type=\"application/json\") elif match.status != 'approved': return HttpResponse(json.dumps({'rt': 4, 'msg': 'match is invalid'}), content_type=\"application/json\")", "status='approved') def detail(request, matchid): match = getMatch(matchid) if match.type == 2: return redirect('activity_detail',", "import relativedelta from django.core import serializers from django.core.exceptions import PermissionDenied from django.db.models.aggregates import", "is not None: endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except Exception: pass query = getQueryCriteria(starttime, endtime)", "matches, starttime, endtime def index(request, view = None): starttime = None intervals =", "HttpResponse(json.dumps({'rt': 4, 'msg': 'match is invalid'}), content_type=\"application/json\") elif match.type == 2: return HttpResponse(json.dumps({'rt':", "= 1): if starttime == None: starttimenative = datetime.datetime.today() localtz = pytz.timezone(settings.TIME_ZONE) starttime", "ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() return HttpResponse(jsonstr) return render_to_response(TEMPLATE_ROOT + 'redbull/match_poolroom.html',", "starttime = starttimeS except Exception: pass matches, starttime, endtime = getMatchByRequest(request, starttime) if", "'msg': 'already enrolled'} elif created != False: msg = {'rt': 1, 'msg': 'enrolled'}", "django.http import HttpResponse from django.shortcuts import render_to_response, get_object_or_404, redirect from django.template.context import RequestContext", "starttime + relativedelta(days=deltadays) try: if request.GET.get('endtime') is not None: endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime'))) except", "Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime)) def getMatchByRequest(request, starttime = None, endtime = None, deltadays =", "= dict() rt = Match.objects.filter(query2) for match in rt: if match.starttime.strftime(datefmt) in matchCountSummary:", "enrollinfo in enrolledMatch: for match in matches: if enrollinfo.match.id == match['pk']: match['fields']['enrolled'] =", "rt: if match.starttime.strftime(datefmt) in matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)] += 1 else: matchCountSummary[match.starttime.strftime(datefmt)] = 1 topOneBonusSummary", "Q from django.http import HttpResponse from django.shortcuts import render_to_response, get_object_or_404, redirect from django.template.context", "= starttime2 + relativedelta(days=intervals) if 's' in request.GET: try: starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if", "TIME_ZONE def updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user)) if len(enrolledMatch) >", "len(enrolledMatch) > 0: matches = simplejson.loads(matchjsonstr) for enrollinfo in enrolledMatch: for match in", "& Q(user__exact=user)) if len(enrolledMatch) > 0: matches = simplejson.loads(matchjsonstr) for enrollinfo in enrolledMatch:", "1 else: matchCountSummary[match.starttime.strftime(datefmt)] = 1 topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def ValuesQuerySetToDict(vqs): return [{'bonus': item['bonus'],", "content_type=\"application/json\") elif match.type == 2: return HttpResponse(json.dumps({'rt': 5, 'msg': 'can not enroll activity'}),", "from django.db.models.query_utils import Q from django.http import HttpResponse from django.shortcuts import render_to_response, get_object_or_404,", "= pytz.timezone(settings.TIME_ZONE) starttime = localtz.localize(starttimenative) try: if request.GET.get('starttime') is not None: starttime =", "+ relativedelta(days=intervals) if 's' in request.GET: try: starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if set_to_midnight(starttime2) <=", "= datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if set_to_midnight(starttime2) <= set_to_midnight(starttimeS) <= set_to_midnight(endtime2): starttime = starttimeS except Exception:", "Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def ValuesQuerySetToDict(vqs): return [{'bonus': item['bonus'], 'starttime': item['starttime'].strftime(datefmt)} for item in vqs] return", "'approved': return HttpResponse(json.dumps({'rt': 4, 'msg': 'match is invalid'}), content_type=\"application/json\") elif match.type == 2:", "in matches: if enrollinfo.match.id == match['pk']: match['fields']['enrolled'] = True break matchjsonstr = simplejson.dumps(matches)", "from django.utils import simplejson import pytz from billiards import settings from billiards.commons import", "= MatchEnroll.objects.get_or_create(match=match, user=request.user, defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if obj != False: msg = {'rt': 2,", "rt = Match.objects.filter(query2) for match in rt: if match.starttime.strftime(datefmt) in matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)] +=", "\"%Y-%m-%d\" def set_to_midnight(dt): midnight = datetime.time(0) localtz = pytz.timezone(settings.TIME_ZONE) return localtz.localize(datetime.datetime.combine(dt.date(), midnight)) def", "1: return redirect('match_detail', matchid=matchid) return render_to_response(TEMPLATE_ROOT + 'activity_detail.html', {'match': match}, context_instance=RequestContext(request)) def enroll(request,", "enrollinfo.match.id == match.id: setattr(match, 'enrolled', True) break return matchQuerySet datefmt = \"%Y-%m-%d\" def", "PermissionDenied from django.db.models.aggregates import Max from django.db.models.query_utils import Q from django.http import HttpResponse", "''' Created on 2013年10月22日 @author: kane ''' from StringIO import StringIO import datetime", "localtz = pytz.timezone(settings.TIME_ZONE) starttime = localtz.localize(starttimenative) try: if request.GET.get('starttime') is not None: starttime", "= getMatch(matchid) if match.type == 1: return redirect('match_detail', matchid=matchid) return render_to_response(TEMPLATE_ROOT + 'activity_detail.html',", "import simplejson import pytz from billiards import settings from billiards.commons import tojson from", "enrolledMatch: for match in matchQuerySet: if enrollinfo.match.id == match.id: setattr(match, 'enrolled', True) break", "+= 1 else: matchCountSummary[match.starttime.strftime(datefmt)] = 1 topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def ValuesQuerySetToDict(vqs): return [{'bonus':", "from billiards import settings from billiards.commons import tojson from billiards.models import Match, MatchEnroll,", "pytz.timezone(settings.TIME_ZONE) starttime = localtz.localize(starttimenative) try: if request.GET.get('starttime') is not None: starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime'))))", "'match_detail.html', {'match': match}, context_instance=RequestContext(request)) def activity(request, matchid): match = getMatch(matchid) if match.type ==", "= 7 starttime2 = datetime.datetime.today() endtime2 = starttime2 + relativedelta(days=intervals) if 's' in", "0: matches = simplejson.loads(matchjsonstr) for enrollinfo in enrolledMatch: for match in matches: if", "= getMatch(matchid) if match.is_expired: return HttpResponse(json.dumps({'rt': 3, 'msg': 'match is expired'}), content_type=\"application/json\") elif", "from StringIO import StringIO import datetime import json from dateutil.relativedelta import relativedelta from", "match.starttime.strftime(datefmt) in matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)] += 1 else: matchCountSummary[match.starttime.strftime(datefmt)] = 1 topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max'])", "import render_to_response, get_object_or_404, redirect from django.template.context import RequestContext from django.utils import simplejson import", "if endtime == None: endtime = starttime + relativedelta(days=deltadays) try: if request.GET.get('endtime') is", "getQueryCriteria(starttime, endtime): return Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime)) def getMatchByRequest(request, starttime = None,", "request.GET: try: starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if set_to_midnight(starttime2) <= set_to_midnight(starttimeS) <= set_to_midnight(endtime2): starttime =", "matchQuerySet: if enrollinfo.match.id == match.id: setattr(match, 'enrolled', True) break return matchQuerySet datefmt =", "match.id: setattr(match, 'enrolled', True) break return matchQuerySet datefmt = \"%Y-%m-%d\" def set_to_midnight(dt): midnight", "get_object_or_404(Match, pk=matchid, status='approved') def detail(request, matchid): match = getMatch(matchid) if match.type == 2:", "'s' in request.GET: try: starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if set_to_midnight(starttime2) <= set_to_midnight(starttimeS) <= set_to_midnight(endtime2):", "for enrollinfo in enrolledMatch: for match in matchQuerySet: if enrollinfo.match.id == match.id: setattr(match,", "obj != False: msg = {'rt': 2, 'msg': 'already enrolled'} elif created !=", "if obj != False: msg = {'rt': 2, 'msg': 'already enrolled'} elif created", "& Q(user__exact=user)) if len(enrolledMatch) > 0: for enrollinfo in enrolledMatch: for match in", "MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user)) if len(enrolledMatch) > 0: matches = simplejson.loads(matchjsonstr) for enrollinfo in", "matchQuerySet datefmt = \"%Y-%m-%d\" def set_to_midnight(dt): midnight = datetime.time(0) localtz = pytz.timezone(settings.TIME_ZONE) return", "endtime): return Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime)) def getMatchByRequest(request, starttime = None, endtime", "'match is expired'}), content_type=\"application/json\") elif match.status != 'approved': return HttpResponse(json.dumps({'rt': 4, 'msg': 'match", "'match.html' query2 = getQueryCriteria(starttime2, endtime2) matchCountSummary = dict() rt = Match.objects.filter(query2) for match", "jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match]) return HttpResponse(jsonstr) if request.user.is_authenticated(): match = updateMatchQuerySetEnrollInfo([match], request.user)[0]", "endtime2, 'intervals': intervals, 'matchsummary': matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), }, context_instance=RequestContext(request)) def getMatch(matchid): return get_object_or_404(Match,", "stream.getvalue() return HttpResponse(jsonstr) return render_to_response(TEMPLATE_ROOT + 'redbull/match_poolroom.html', {}, context_instance=RequestContext(request)) def winners(request): return render_to_response(TEMPLATE_ROOT", "def updateMatchQuerySetEnrollInfo(matchQuerySet, user): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user)) if len(enrolledMatch) > 0: for", "use_natural_keys=True) jsonstr = stream.getvalue() return HttpResponse(jsonstr) return render_to_response(TEMPLATE_ROOT + 'redbull/match_poolroom.html', {}, context_instance=RequestContext(request)) def", "tojson(matches, match_fields) if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches) return HttpResponse(jsonstr) page =", "@author: kane ''' from StringIO import StringIO import datetime import json from dateutil.relativedelta", "== 2: return redirect('activity_detail', matchid=matchid) if request.GET.get('f') == 'json': json_serializer = serializers.get_serializer(\"json\")() stream", "endtime = None, deltadays = 1): if starttime == None: starttimenative = datetime.datetime.today()", "if match.type == 1: return redirect('match_detail', matchid=matchid) return render_to_response(TEMPLATE_ROOT + 'activity_detail.html', {'match': match},", "getMatch(matchid) if match.type == 2: return redirect('activity_detail', matchid=matchid) if request.GET.get('f') == 'json': json_serializer", "utf-8 ''' Created on 2013年10月22日 @author: kane ''' from StringIO import StringIO import", "== match['pk']: match['fields']['enrolled'] = True break matchjsonstr = simplejson.dumps(matches) return matchjsonstr def updateMatchQuerySetEnrollInfo(matchQuerySet,", "= datetime.datetime.today() endtime2 = starttime2 + relativedelta(days=intervals) if 's' in request.GET: try: starttimeS", "== 'json': redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize(redbull_matches, fields=match_fields,", "7 starttime2 = datetime.datetime.today() endtime2 = starttime2 + relativedelta(days=intervals) if 's' in request.GET:", "{'match': match}, context_instance=RequestContext(request)) def enroll(request, matchid): if not request.user.is_authenticated(): raise PermissionDenied match =", "except Exception: pass query = getQueryCriteria(starttime, endtime) matches = Match.objects.filter(query).order_by('starttime') return matches, starttime,", "matchjsonstr = simplejson.dumps(matches) return matchjsonstr def updateMatchQuerySetEnrollInfo(matchQuerySet, user): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user))", "in request.GET and request.GET.get('f') == 'json': redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer = serializers.get_serializer(\"json\")() stream", "= None, endtime = None, deltadays = 1): if starttime == None: starttimenative", "'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), }, context_instance=RequestContext(request)) def getMatch(matchid): return get_object_or_404(Match, pk=matchid, status='approved') def detail(request, matchid):", "jsonstr = tojson(matches, match_fields) if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches) return HttpResponse(jsonstr)", "'intervals': intervals, 'matchsummary': matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), }, context_instance=RequestContext(request)) def getMatch(matchid): return get_object_or_404(Match, pk=matchid,", "if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches) return HttpResponse(jsonstr) page = 'match.html' query2", "if starttime == None: starttimenative = datetime.datetime.today() localtz = pytz.timezone(settings.TIME_ZONE) starttime = localtz.localize(starttimenative)", "& Q(starttime__lt=set_to_midnight(endtime)) def getMatchByRequest(request, starttime = None, endtime = None, deltadays = 1):", "for match in matchQuerySet: if enrollinfo.match.id == match.id: setattr(match, 'enrolled', True) break return", "return matchjsonstr def updateMatchQuerySetEnrollInfo(matchQuerySet, user): enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user)) if len(enrolledMatch) >", "match = getMatch(matchid) if match.is_expired: return HttpResponse(json.dumps({'rt': 3, 'msg': 'match is expired'}), content_type=\"application/json\")", "'msg': 'match is invalid'}), content_type=\"application/json\") elif match.type == 2: return HttpResponse(json.dumps({'rt': 5, 'msg':", "from django.db.models.aggregates import Max from django.db.models.query_utils import Q from django.http import HttpResponse from", "def ValuesQuerySetToDict(vqs): return [{'bonus': item['bonus'], 'starttime': item['starttime'].strftime(datefmt)} for item in vqs] return render_to_response(TEMPLATE_ROOT", "return HttpResponse(jsonstr) if request.user.is_authenticated(): match = updateMatchQuerySetEnrollInfo([match], request.user)[0] return render_to_response(TEMPLATE_ROOT + 'match_detail.html', {'match':", "datetime.datetime.today() localtz = pytz.timezone(settings.TIME_ZONE) starttime = localtz.localize(starttimenative) try: if request.GET.get('starttime') is not None:", "def index(request, view = None): starttime = None intervals = 7 starttime2 =", "= getQueryCriteria(starttime2, endtime2) matchCountSummary = dict() rt = Match.objects.filter(query2) for match in rt:", "matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)] += 1 else: matchCountSummary[match.starttime.strftime(datefmt)] = 1 topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def ValuesQuerySetToDict(vqs):", "created = MatchEnroll.objects.get_or_create(match=match, user=request.user, defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if obj != False: msg = {'rt':", "endtime) matches = Match.objects.filter(query).order_by('starttime') return matches, starttime, endtime def index(request, view = None):", "request.GET.get('f') == 'json': redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize(redbull_matches,", "matchid=matchid) return render_to_response(TEMPLATE_ROOT + 'activity_detail.html', {'match': match}, context_instance=RequestContext(request)) def enroll(request, matchid): if not", "1, 'msg': 'enrolled'} return HttpResponse(json.dumps(msg), content_type=\"application/json\") def redbull_2014_05(request): if 'f' in request.GET and", "matches) return HttpResponse(jsonstr) page = 'match.html' query2 = getQueryCriteria(starttime2, endtime2) matchCountSummary = dict()", "match['fields']['enrolled'] = True break matchjsonstr = simplejson.dumps(matches) return matchjsonstr def updateMatchQuerySetEnrollInfo(matchQuerySet, user): enrolledMatch", "if len(enrolledMatch) > 0: matches = simplejson.loads(matchjsonstr) for enrollinfo in enrolledMatch: for match", "match = getMatch(matchid) if match.type == 1: return redirect('match_detail', matchid=matchid) return render_to_response(TEMPLATE_ROOT +", "Match.objects.filter(query2) for match in rt: if match.starttime.strftime(datefmt) in matchCountSummary: matchCountSummary[match.starttime.strftime(datefmt)] += 1 else:", "return HttpResponse(json.dumps({'rt': 5, 'msg': 'can not enroll activity'}), content_type=\"application/json\") obj, created = MatchEnroll.objects.get_or_create(match=match,", "def getQueryCriteria(starttime, endtime): return Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime)) def getMatchByRequest(request, starttime =", "'description'), ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr,", "view = None): starttime = None intervals = 7 starttime2 = datetime.datetime.today() endtime2", "!= False: msg = {'rt': 2, 'msg': 'already enrolled'} elif created != False:", "== 1: return redirect('match_detail', matchid=matchid) return render_to_response(TEMPLATE_ROOT + 'activity_detail.html', {'match': match}, context_instance=RequestContext(request)) def", "return render_to_response(TEMPLATE_ROOT + page, {'matches': matches, 'startdate': starttime2, 'enddate': endtime2, 'intervals': intervals, 'matchsummary':", "datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if set_to_midnight(starttime2) <= set_to_midnight(starttimeS) <= set_to_midnight(endtime2): starttime = starttimeS except Exception: pass", "'json': redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime') json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False,", "= StringIO() json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() return HttpResponse(jsonstr)", "set_to_midnight(endtime2): starttime = starttimeS except Exception: pass matches, starttime, endtime = getMatchByRequest(request, starttime)", "ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user,", "'match is invalid'}), content_type=\"application/json\") elif match.type == 2: return HttpResponse(json.dumps({'rt': 5, 'msg': 'can", "2013年10月22日 @author: kane ''' from StringIO import StringIO import datetime import json from", "== None: endtime = starttime + relativedelta(days=deltadays) try: if request.GET.get('endtime') is not None:", "[{'bonus': item['bonus'], 'starttime': item['starttime'].strftime(datefmt)} for item in vqs] return render_to_response(TEMPLATE_ROOT + page, {'matches':", "'title', 'bonus', 'starttime', 'description'), ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr = stream.getvalue() if request.user.is_authenticated():", "Exception: pass if endtime == None: endtime = starttime + relativedelta(days=deltadays) try: if", "match.status != 'approved': return HttpResponse(json.dumps({'rt': 4, 'msg': 'match is invalid'}), content_type=\"application/json\") elif match.type", "2: return HttpResponse(json.dumps({'rt': 5, 'msg': 'can not enroll activity'}), content_type=\"application/json\") obj, created =", "kane ''' from StringIO import StringIO import datetime import json from dateutil.relativedelta import", "= stream.getvalue() if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match]) return HttpResponse(jsonstr) if request.user.is_authenticated():", "None, deltadays = 1): if starttime == None: starttimenative = datetime.datetime.today() localtz =", "getMatch(matchid): return get_object_or_404(Match, pk=matchid, status='approved') def detail(request, matchid): match = getMatch(matchid) if match.type", "enroll activity'}), content_type=\"application/json\") obj, created = MatchEnroll.objects.get_or_create(match=match, user=request.user, defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))}) if obj !=", "'enrolled'} return HttpResponse(json.dumps(msg), content_type=\"application/json\") def redbull_2014_05(request): if 'f' in request.GET and request.GET.get('f') ==", "midnight = datetime.time(0) localtz = pytz.timezone(settings.TIME_ZONE) return localtz.localize(datetime.datetime.combine(dt.date(), midnight)) def getQueryCriteria(starttime, endtime): return", "set_to_midnight(dt): midnight = datetime.time(0) localtz = pytz.timezone(settings.TIME_ZONE) return localtz.localize(datetime.datetime.combine(dt.date(), midnight)) def getQueryCriteria(starttime, endtime):", "= getQueryCriteria(starttime, endtime) matches = Match.objects.filter(query).order_by('starttime') return matches, starttime, endtime def index(request, view", "in request.GET: try: starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s'))) if set_to_midnight(starttime2) <= set_to_midnight(starttimeS) <= set_to_midnight(endtime2): starttime", "getMatchByRequest(request, starttime = None, endtime = None, deltadays = 1): if starttime ==", "enrollinfo.match.id == match['pk']: match['fields']['enrolled'] = True break matchjsonstr = simplejson.dumps(matches) return matchjsonstr def", "HttpResponse(json.dumps({'rt': 3, 'msg': 'match is expired'}), content_type=\"application/json\") elif match.status != 'approved': return HttpResponse(json.dumps({'rt':", "if request.GET.get('f') == 'json': jsonstr = tojson(matches, match_fields) if request.user.is_authenticated(): jsonstr = updateMatchJsonStrEnrollInfo(jsonstr,", "setattr(match, 'enrolled', True) break return matchQuerySet datefmt = \"%Y-%m-%d\" def set_to_midnight(dt): midnight =", "= starttime + relativedelta(days=deltadays) try: if request.GET.get('endtime') is not None: endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime')))", "def getMatch(matchid): return get_object_or_404(Match, pk=matchid, status='approved') def detail(request, matchid): match = getMatch(matchid) if", "= localtz.localize(starttimenative) try: if request.GET.get('starttime') is not None: starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except Exception:", "if request.GET.get('f') == 'json': json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize([match], fields=('id', 'poolroom',", "'enddate': endtime2, 'intervals': intervals, 'matchsummary': matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), }, context_instance=RequestContext(request)) def getMatch(matchid): return", "'json': json_serializer = serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize([match], fields=('id', 'poolroom', 'title', 'bonus', 'starttime',", "= updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match]) return HttpResponse(jsonstr) if request.user.is_authenticated(): match = updateMatchQuerySetEnrollInfo([match], request.user)[0] return", "settings from billiards.commons import tojson from billiards.models import Match, MatchEnroll, \\ match_fields from", "return HttpResponse(json.dumps(msg), content_type=\"application/json\") def redbull_2014_05(request): if 'f' in request.GET and request.GET.get('f') == 'json':", "'activity_detail.html', {'match': match}, context_instance=RequestContext(request)) def enroll(request, matchid): if not request.user.is_authenticated(): raise PermissionDenied match", "== match.id: setattr(match, 'enrolled', True) break return matchQuerySet datefmt = \"%Y-%m-%d\" def set_to_midnight(dt):", "True) break return matchQuerySet datefmt = \"%Y-%m-%d\" def set_to_midnight(dt): midnight = datetime.time(0) localtz", "starttime2 = datetime.datetime.today() endtime2 = starttime2 + relativedelta(days=intervals) if 's' in request.GET: try:", "billiards import settings from billiards.commons import tojson from billiards.models import Match, MatchEnroll, \\", "# -*- coding: utf-8 -*- # encoding: utf-8 ''' Created on 2013年10月22日 @author:", "elif match.status != 'approved': return HttpResponse(json.dumps({'rt': 4, 'msg': 'match is invalid'}), content_type=\"application/json\") elif", "import Match, MatchEnroll, \\ match_fields from billiards.settings import TEMPLATE_ROOT, TIME_ZONE def updateMatchJsonStrEnrollInfo(matchjsonstr, user,", "starttime == None: starttimenative = datetime.datetime.today() localtz = pytz.timezone(settings.TIME_ZONE) starttime = localtz.localize(starttimenative) try:", "set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime')))) except Exception: pass if endtime == None: endtime = starttime + relativedelta(days=deltadays)", "return render_to_response(TEMPLATE_ROOT + 'activity_detail.html', {'match': match}, context_instance=RequestContext(request)) def enroll(request, matchid): if not request.user.is_authenticated():", "= \"%Y-%m-%d\" def set_to_midnight(dt): midnight = datetime.time(0) localtz = pytz.timezone(settings.TIME_ZONE) return localtz.localize(datetime.datetime.combine(dt.date(), midnight))", "index(request, view = None): starttime = None intervals = 7 starttime2 = datetime.datetime.today()", "return matchQuerySet datefmt = \"%Y-%m-%d\" def set_to_midnight(dt): midnight = datetime.time(0) localtz = pytz.timezone(settings.TIME_ZONE)", "= serializers.get_serializer(\"json\")() stream = StringIO() json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True) jsonstr =", "intervals, 'matchsummary': matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)), }, context_instance=RequestContext(request)) def getMatch(matchid): return get_object_or_404(Match, pk=matchid, status='approved')", "django.core.exceptions import PermissionDenied from django.db.models.aggregates import Max from django.db.models.query_utils import Q from django.http", "enrolledMatch: for match in matches: if enrollinfo.match.id == match['pk']: match['fields']['enrolled'] = True break", "= 'match.html' query2 = getQueryCriteria(starttime2, endtime2) matchCountSummary = dict() rt = Match.objects.filter(query2) for", "topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max']) def ValuesQuerySetToDict(vqs): return [{'bonus': item['bonus'], 'starttime': item['starttime'].strftime(datefmt)} for item in" ]
[ "if f.endswith(\".plt\")] fileset = sorted(fileset) print(\"==> Found %d files.\" % len(fileset)) of =", "len(fileset)) of = open(outfile, \"w\") of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): #", "\"w\") of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): # first skip the header", "pd.read_csv(fd, header=None) m = df.values[:,range(3)] m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) # drwxr-xr-x 3 wern_m3", "Francisco Directory\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) fileset =", "= open(outfile, \"w\") of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): # first skip", "in enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values m =", "def sanfrancisco(indir, outfile): print(\"Importing from a San Francisco Directory\") print(\" In-Dir: %s\" %", "Found %d files.\" % len(fileset)) of = open(outfile, \"w\") of.write(\"x y id\\n\"); for", "Sep 16 2016 roma # drwxr-xr-x 4 wern_m3 1001 4096 Jan 10 2017", "len(fileset)) of = open(outfile, \"w\") of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): df", "of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True)", "m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def geolife(indir, outfile): print(\"Importing from GeoLife\") print(\" In-Dir: %s\"", "the dataset reports a smoothed derivative. integrate to get a spatial object m[:,-1]", "as plt; def character(indir, outfile): print(\"Importing Character dataset\") print(\" In-Dir: %s\" % indir)", "in os.listdir(indir) if x.endswith(\".plt\")]) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile,", "4096 Jan 10 2017 sf_large # drwxr-xr-x 11 wern_m3 1001 4096 Nov 5", "np; import pandas as pd; import os; import sys; from tqdm import tqdm;", "a spatial object m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def sanfrancisco(indir, outfile): print(\"Importing from a", "spatial object m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def sanfrancisco(indir, outfile): print(\"Importing from a San", "San Francisco Directory\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) fileset", "skipinitialspace=True) m = df.values m = np.cumsum(m, axis=0) # the dataset reports a", "get a spatial object m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def sanfrancisco(indir, outfile): print(\"Importing from", "%s\" % outfile) # fileset = [os.path.join(root,f) for f in files for root,", "header = [fd.readline() for _ in range(6)] df = pd.read_csv(fd, header=None) m =", "print (msg) sys.exit(-1) if __name__==\"__main__\": if (len(sys.argv) != 4): usage(\"Run with 3 parameters:", "__name__==\"__main__\": if (len(sys.argv) != 4): usage(\"Run with 3 parameters: type, dir, outfilename\") cases", "dir, outfilename\") cases = dict({ \"character\":character, \"sanfrancisco\":sanfrancisco, \"geolife\":geolife }) if sys.argv[1] not in", "of = open(outfile, \"w\") of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): df =", "last column m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def geolife(indir, outfile): print(\"Importing from GeoLife\") print(\"", "dataset reports a smoothed derivative. integrate to get a spatial object m[:,-1] =", "}) if sys.argv[1] not in cases: usage(\"Importer for %s not found.\" % sys.argv[1])", "fileset = sorted(fileset) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile, \"w\")", "%s\" % outfile) fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.startswith(\"file-\")]) print(\"==>", "fileset = [os.path.join(root,f) for f in files for root, _, files in os.walk(indir)]", "first skip the header fd = open(f, \"r\"); header = [fd.readline() for _", "11 wern_m3 1001 4096 Nov 5 2016 tdrive def usage(msg): print (msg) sys.exit(-1)", "sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.startswith(\"file-\")]) print(\"==> Found %d files.\" % len(fileset))", "axis=0) # the dataset reports a smoothed derivative. integrate to get a spatial", "outfile): print(\"Importing Character dataset\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile)", "idx,f in enumerate(tqdm(fileset)): # first skip the header fd = open(f, \"r\"); header", "pd; import os; import sys; from tqdm import tqdm; from matplotlib import pyplot", "roma # drwxr-xr-x 4 wern_m3 1001 4096 Jan 10 2017 sf_large # drwxr-xr-x", "Importer for T-Drive Dataset takes t-drive directory and creates SSV \"\"\" import numpy", "indir) print(\"Out-File: %s\" % outfile) fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if", "[fd.readline() for _ in range(6)] df = pd.read_csv(fd, header=None) m = df.values[:,range(3)] m[:,-1]", "header fd = open(f, \"r\"); header = [fd.readline() for _ in range(6)] df", "# the dataset reports a smoothed derivative. integrate to get a spatial object", "In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) fileset = sorted([os.path.join(indir,x) for x", "os.listdir(indir) if x.endswith(\".plt\")]) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile, \"w\")", "(len(sys.argv) != 4): usage(\"Run with 3 parameters: type, dir, outfilename\") cases = dict({", "import os; import sys; from tqdm import tqdm; from matplotlib import pyplot as", "remove last column m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def geolife(indir, outfile): print(\"Importing from GeoLife\")", "T-Drive Dataset takes t-drive directory and creates SSV \"\"\" import numpy as np;", "= open(f, \"r\"); header = [fd.readline() for _ in range(6)] df = pd.read_csv(fd,", "4): usage(\"Run with 3 parameters: type, dir, outfilename\") cases = dict({ \"character\":character, \"sanfrancisco\":sanfrancisco,", "[os.path.join(root,f) for f in files for root, _, files in os.walk(indir)] fileset=[] for", "import numpy as np; import pandas as pd; import os; import sys; from", "dataset\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) fileset = sorted([os.path.join(indir,x)", "outfile) # fileset = [os.path.join(root,f) for f in files for root, _, files", "\",header=None, skipinitialspace=True) m = df.values[:,range(3)] # remove last column m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m)", "f.endswith(\".plt\")] fileset = sorted(fileset) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile,", "enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values m = np.cumsum(m,", "with 3 parameters: type, dir, outfilename\") cases = dict({ \"character\":character, \"sanfrancisco\":sanfrancisco, \"geolife\":geolife })", "np.ones(m.shape[0])*idx np.savetxt(of,m) def geolife(indir, outfile): print(\"Importing from GeoLife\") print(\" In-Dir: %s\" % indir)", "y id\\n\"); for idx,f in enumerate(tqdm(fileset)): # first skip the header fd =", "= dict({ \"character\":character, \"sanfrancisco\":sanfrancisco, \"geolife\":geolife }) if sys.argv[1] not in cases: usage(\"Importer for", "for root,_,files in os.walk(indir): fileset = fileset + [os.path.join(root, f) for f in", "= df.values[:,range(3)] m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) # drwxr-xr-x 3 wern_m3 1001 4096 Sep", "\"sanfrancisco\":sanfrancisco, \"geolife\":geolife }) if sys.argv[1] not in cases: usage(\"Importer for %s not found.\"", "% len(fileset)) of = open(outfile, \"w\") of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)):", "in os.walk(indir): fileset = fileset + [os.path.join(root, f) for f in files if", "type, dir, outfilename\") cases = dict({ \"character\":character, \"sanfrancisco\":sanfrancisco, \"geolife\":geolife }) if sys.argv[1] not", "character(indir, outfile): print(\"Importing Character dataset\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" %", "= np.cumsum(m, axis=0) # the dataset reports a smoothed derivative. integrate to get", "# drwxr-xr-x 3 wern_m3 1001 4096 Sep 16 2016 roma # drwxr-xr-x 4", "as pd; import os; import sys; from tqdm import tqdm; from matplotlib import", "idx,f in enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values m", "wern_m3 1001 4096 Nov 5 2016 tdrive def usage(msg): print (msg) sys.exit(-1) if", "os.walk(indir)] fileset=[] for root,_,files in os.walk(indir): fileset = fileset + [os.path.join(root, f) for", "from matplotlib import pyplot as plt; def character(indir, outfile): print(\"Importing Character dataset\") print(\"", "files if f.endswith(\".plt\")] fileset = sorted(fileset) print(\"==> Found %d files.\" % len(fileset)) of", "4096 Sep 16 2016 roma # drwxr-xr-x 4 wern_m3 1001 4096 Jan 10", "2016 tdrive def usage(msg): print (msg) sys.exit(-1) if __name__==\"__main__\": if (len(sys.argv) != 4):", "for idx,f in enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values[:,range(3)]", "from tqdm import tqdm; from matplotlib import pyplot as plt; def character(indir, outfile):", "for x in os.listdir(indir) if x.startswith(\"file-\")]) print(\"==> Found %d files.\" % len(fileset)) of", "idx,f in enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values[:,range(3)] #", "wern_m3 1001 4096 Jan 10 2017 sf_large # drwxr-xr-x 11 wern_m3 1001 4096", "5 2016 tdrive def usage(msg): print (msg) sys.exit(-1) if __name__==\"__main__\": if (len(sys.argv) !=", "from a San Francisco Directory\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" %", "3 parameters: type, dir, outfilename\") cases = dict({ \"character\":character, \"sanfrancisco\":sanfrancisco, \"geolife\":geolife }) if", "# fileset = [os.path.join(root,f) for f in files for root, _, files in", "open(outfile, \"w\") of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\"", "cases = dict({ \"character\":character, \"sanfrancisco\":sanfrancisco, \"geolife\":geolife }) if sys.argv[1] not in cases: usage(\"Importer", "2016 roma # drwxr-xr-x 4 wern_m3 1001 4096 Jan 10 2017 sf_large #", "16 2016 roma # drwxr-xr-x 4 wern_m3 1001 4096 Jan 10 2017 sf_large", "def character(indir, outfile): print(\"Importing Character dataset\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\"", "sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.endswith(\".plt\")]) print(\"==> Found %d files.\" % len(fileset))", "\"character\":character, \"sanfrancisco\":sanfrancisco, \"geolife\":geolife }) if sys.argv[1] not in cases: usage(\"Importer for %s not", "import tqdm; from matplotlib import pyplot as plt; def character(indir, outfile): print(\"Importing Character", "open(f, \"r\"); header = [fd.readline() for _ in range(6)] df = pd.read_csv(fd, header=None)", "2017 sf_large # drwxr-xr-x 11 wern_m3 1001 4096 Nov 5 2016 tdrive def", "drwxr-xr-x 11 wern_m3 1001 4096 Nov 5 2016 tdrive def usage(msg): print (msg)", "sys.exit(-1) if __name__==\"__main__\": if (len(sys.argv) != 4): usage(\"Run with 3 parameters: type, dir,", "df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values[:,range(3)] # remove last column", "\"\"\" import numpy as np; import pandas as pd; import os; import sys;", "pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values m = np.cumsum(m, axis=0) # the", "enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values[:,range(3)] # remove last", "%s\" % indir) print(\"Out-File: %s\" % outfile) # fileset = [os.path.join(root,f) for f", "= open(outfile, \"w\") of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): df = pd.read_table(f,", "m = df.values[:,range(3)] m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) # drwxr-xr-x 3 wern_m3 1001 4096", "for f in files if f.endswith(\".plt\")] fileset = sorted(fileset) print(\"==> Found %d files.\"", "files for root, _, files in os.walk(indir)] fileset=[] for root,_,files in os.walk(indir): fileset", "m = df.values m = np.cumsum(m, axis=0) # the dataset reports a smoothed", "m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) # drwxr-xr-x 3 wern_m3 1001 4096 Sep 16 2016", "= pd.read_csv(fd, header=None) m = df.values[:,range(3)] m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) # drwxr-xr-x 3", "os; import sys; from tqdm import tqdm; from matplotlib import pyplot as plt;", "takes t-drive directory and creates SSV \"\"\" import numpy as np; import pandas", "in os.listdir(indir) if x.startswith(\"file-\")]) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile,", "%s\" % indir) print(\"Out-File: %s\" % outfile) fileset = sorted([os.path.join(indir,x) for x in", "range(6)] df = pd.read_csv(fd, header=None) m = df.values[:,range(3)] m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) #", "np.ones(m.shape[0])*idx np.savetxt(of,m) def sanfrancisco(indir, outfile): print(\"Importing from a San Francisco Directory\") print(\" In-Dir:", "% indir) print(\"Out-File: %s\" % outfile) # fileset = [os.path.join(root,f) for f in", "for T-Drive Dataset takes t-drive directory and creates SSV \"\"\" import numpy as", "column m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def geolife(indir, outfile): print(\"Importing from GeoLife\") print(\" In-Dir:", "= pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values m = np.cumsum(m, axis=0) #", "a San Francisco Directory\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile)", "print(\"Out-File: %s\" % outfile) # fileset = [os.path.join(root,f) for f in files for", "np.cumsum(m, axis=0) # the dataset reports a smoothed derivative. integrate to get a", "fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.startswith(\"file-\")]) print(\"==> Found %d files.\"", "= df.values m = np.cumsum(m, axis=0) # the dataset reports a smoothed derivative.", "tqdm import tqdm; from matplotlib import pyplot as plt; def character(indir, outfile): print(\"Importing", "print(\"Importing Character dataset\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) fileset", "outfile) fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.startswith(\"file-\")]) print(\"==> Found %d", "if x.endswith(\".plt\")]) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile, \"w\") of.write(\"x", "% outfile) # fileset = [os.path.join(root,f) for f in files for root, _,", "f in files for root, _, files in os.walk(indir)] fileset=[] for root,_,files in", "in files for root, _, files in os.walk(indir)] fileset=[] for root,_,files in os.walk(indir):", "np.ones(m.shape[0])*idx np.savetxt(of,m) # drwxr-xr-x 3 wern_m3 1001 4096 Sep 16 2016 roma #", "np.savetxt(of,m) # drwxr-xr-x 3 wern_m3 1001 4096 Sep 16 2016 roma # drwxr-xr-x", "\"geolife\":geolife }) if sys.argv[1] not in cases: usage(\"Importer for %s not found.\" %", "def geolife(indir, outfile): print(\"Importing from GeoLife\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\"", "# drwxr-xr-x 4 wern_m3 1001 4096 Jan 10 2017 sf_large # drwxr-xr-x 11", "Character dataset\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) fileset =", "object m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def sanfrancisco(indir, outfile): print(\"Importing from a San Francisco", "= sorted(fileset) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile, \"w\") of.write(\"x", "def usage(msg): print (msg) sys.exit(-1) if __name__==\"__main__\": if (len(sys.argv) != 4): usage(\"Run with", "os.listdir(indir) if x.startswith(\"file-\")]) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile, \"w\")", "sanfrancisco(indir, outfile): print(\"Importing from a San Francisco Directory\") print(\" In-Dir: %s\" % indir)", "\"r\"); header = [fd.readline() for _ in range(6)] df = pd.read_csv(fd, header=None) m", "1001 4096 Nov 5 2016 tdrive def usage(msg): print (msg) sys.exit(-1) if __name__==\"__main__\":", "usage(msg): print (msg) sys.exit(-1) if __name__==\"__main__\": if (len(sys.argv) != 4): usage(\"Run with 3", "= sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.endswith(\".plt\")]) print(\"==> Found %d files.\" %", "the header fd = open(f, \"r\"); header = [fd.readline() for _ in range(6)]", "parameters: type, dir, outfilename\") cases = dict({ \"character\":character, \"sanfrancisco\":sanfrancisco, \"geolife\":geolife }) if sys.argv[1]", "Dataset takes t-drive directory and creates SSV \"\"\" import numpy as np; import", "print(\"==> Found %d files.\" % len(fileset)) of = open(outfile, \"w\") of.write(\"x y id\\n\");", "outfile): print(\"Importing from a San Francisco Directory\") print(\" In-Dir: %s\" % indir) print(\"Out-File:", "for idx,f in enumerate(tqdm(fileset)): # first skip the header fd = open(f, \"r\");", "outfilename\") cases = dict({ \"character\":character, \"sanfrancisco\":sanfrancisco, \"geolife\":geolife }) if sys.argv[1] not in cases:", "f) for f in files if f.endswith(\".plt\")] fileset = sorted(fileset) print(\"==> Found %d", "= np.ones(m.shape[0])*idx np.savetxt(of,m) def geolife(indir, outfile): print(\"Importing from GeoLife\") print(\" In-Dir: %s\" %", "df.values m = np.cumsum(m, axis=0) # the dataset reports a smoothed derivative. integrate", "reports a smoothed derivative. integrate to get a spatial object m[:,-1] = np.ones(m.shape[0])*idx", "root,_,files in os.walk(indir): fileset = fileset + [os.path.join(root, f) for f in files", "pyplot as plt; def character(indir, outfile): print(\"Importing Character dataset\") print(\" In-Dir: %s\" %", "fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.endswith(\".plt\")]) print(\"==> Found %d files.\"", "_, files in os.walk(indir)] fileset=[] for root,_,files in os.walk(indir): fileset = fileset +", "in enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values[:,range(3)] # remove", "header=None) m = df.values[:,range(3)] m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) # drwxr-xr-x 3 wern_m3 1001", "sf_large # drwxr-xr-x 11 wern_m3 1001 4096 Nov 5 2016 tdrive def usage(msg):", "plt; def character(indir, outfile): print(\"Importing Character dataset\") print(\" In-Dir: %s\" % indir) print(\"Out-File:", "geolife(indir, outfile): print(\"Importing from GeoLife\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" %", "fileset + [os.path.join(root, f) for f in files if f.endswith(\".plt\")] fileset = sorted(fileset)", "f in files if f.endswith(\".plt\")] fileset = sorted(fileset) print(\"==> Found %d files.\" %", "for root, _, files in os.walk(indir)] fileset=[] for root,_,files in os.walk(indir): fileset =", "fd = open(f, \"r\"); header = [fd.readline() for _ in range(6)] df =", "4 wern_m3 1001 4096 Jan 10 2017 sf_large # drwxr-xr-x 11 wern_m3 1001", "as np; import pandas as pd; import os; import sys; from tqdm import", "% outfile) fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.endswith(\".plt\")]) print(\"==> Found", "tdrive def usage(msg): print (msg) sys.exit(-1) if __name__==\"__main__\": if (len(sys.argv) != 4): usage(\"Run", "of = open(outfile, \"w\") of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): # first", "in os.walk(indir)] fileset=[] for root,_,files in os.walk(indir): fileset = fileset + [os.path.join(root, f)", "of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): # first skip the header fd", "np.savetxt(of,m) def sanfrancisco(indir, outfile): print(\"Importing from a San Francisco Directory\") print(\" In-Dir: %s\"", "files in os.walk(indir)] fileset=[] for root,_,files in os.walk(indir): fileset = fileset + [os.path.join(root,", "open(outfile, \"w\") of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): # first skip the", "print(\"Importing from a San Francisco Directory\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\"", "3 wern_m3 1001 4096 Sep 16 2016 roma # drwxr-xr-x 4 wern_m3 1001", "outfile): print(\"Importing from GeoLife\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile)", "fileset=[] for root,_,files in os.walk(indir): fileset = fileset + [os.path.join(root, f) for f", "from GeoLife\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) # fileset", "(msg) sys.exit(-1) if __name__==\"__main__\": if (len(sys.argv) != 4): usage(\"Run with 3 parameters: type,", "df = pd.read_csv(fd, header=None) m = df.values[:,range(3)] m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) # drwxr-xr-x", "for _ in range(6)] df = pd.read_csv(fd, header=None) m = df.values[:,range(3)] m[:,-1] =", "import sys; from tqdm import tqdm; from matplotlib import pyplot as plt; def", "matplotlib import pyplot as plt; def character(indir, outfile): print(\"Importing Character dataset\") print(\" In-Dir:", "if __name__==\"__main__\": if (len(sys.argv) != 4): usage(\"Run with 3 parameters: type, dir, outfilename\")", "SSV \"\"\" import numpy as np; import pandas as pd; import os; import", "= [fd.readline() for _ in range(6)] df = pd.read_csv(fd, header=None) m = df.values[:,range(3)]", "if sys.argv[1] not in cases: usage(\"Importer for %s not found.\" % sys.argv[1]) cases[sys.argv[1]](sys.argv[2],sys.argv[3])", "numpy as np; import pandas as pd; import os; import sys; from tqdm", "pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values[:,range(3)] # remove last column m[:,-1] =", "+ [os.path.join(root, f) for f in files if f.endswith(\".plt\")] fileset = sorted(fileset) print(\"==>", "files.\" % len(fileset)) of = open(outfile, \"w\") of.write(\"x y id\\n\"); for idx,f in", "import pandas as pd; import os; import sys; from tqdm import tqdm; from", "to get a spatial object m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def sanfrancisco(indir, outfile): print(\"Importing", "drwxr-xr-x 3 wern_m3 1001 4096 Sep 16 2016 roma # drwxr-xr-x 4 wern_m3", "t-drive directory and creates SSV \"\"\" import numpy as np; import pandas as", "drwxr-xr-x 4 wern_m3 1001 4096 Jan 10 2017 sf_large # drwxr-xr-x 11 wern_m3", "df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values m = np.cumsum(m, axis=0)", "_ in range(6)] df = pd.read_csv(fd, header=None) m = df.values[:,range(3)] m[:,-1] = np.ones(m.shape[0])*idx", "1001 4096 Jan 10 2017 sf_large # drwxr-xr-x 11 wern_m3 1001 4096 Nov", "root, _, files in os.walk(indir)] fileset=[] for root,_,files in os.walk(indir): fileset = fileset", "!= 4): usage(\"Run with 3 parameters: type, dir, outfilename\") cases = dict({ \"character\":character,", "a smoothed derivative. integrate to get a spatial object m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m)", "pandas as pd; import os; import sys; from tqdm import tqdm; from matplotlib", "= np.ones(m.shape[0])*idx np.savetxt(of,m) def sanfrancisco(indir, outfile): print(\"Importing from a San Francisco Directory\") print(\"", "wern_m3 1001 4096 Sep 16 2016 roma # drwxr-xr-x 4 wern_m3 1001 4096", "Directory\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) fileset = sorted([os.path.join(indir,x)", "= df.values[:,range(3)] # remove last column m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def geolife(indir, outfile):", "m = np.cumsum(m, axis=0) # the dataset reports a smoothed derivative. integrate to", "m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def sanfrancisco(indir, outfile): print(\"Importing from a San Francisco Directory\")", "% outfile) fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.startswith(\"file-\")]) print(\"==> Found", "derivative. integrate to get a spatial object m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def sanfrancisco(indir,", "if x.startswith(\"file-\")]) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile, \"w\") of.write(\"x", "skipinitialspace=True) m = df.values[:,range(3)] # remove last column m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def", "smoothed derivative. integrate to get a spatial object m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def", "y id\\n\"); for idx,f in enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m", "sys; from tqdm import tqdm; from matplotlib import pyplot as plt; def character(indir,", "os.walk(indir): fileset = fileset + [os.path.join(root, f) for f in files if f.endswith(\".plt\")]", "10 2017 sf_large # drwxr-xr-x 11 wern_m3 1001 4096 Nov 5 2016 tdrive", "print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) fileset = sorted([os.path.join(indir,x) for", "dict({ \"character\":character, \"sanfrancisco\":sanfrancisco, \"geolife\":geolife }) if sys.argv[1] not in cases: usage(\"Importer for %s", "directory and creates SSV \"\"\" import numpy as np; import pandas as pd;", "= sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.startswith(\"file-\")]) print(\"==> Found %d files.\" %", "indir) print(\"Out-File: %s\" % outfile) # fileset = [os.path.join(root,f) for f in files", "1001 4096 Sep 16 2016 roma # drwxr-xr-x 4 wern_m3 1001 4096 Jan", "sep=\" \",header=None, skipinitialspace=True) m = df.values m = np.cumsum(m, axis=0) # the dataset", "fileset = fileset + [os.path.join(root, f) for f in files if f.endswith(\".plt\")] fileset", "import pyplot as plt; def character(indir, outfile): print(\"Importing Character dataset\") print(\" In-Dir: %s\"", "print(\"Out-File: %s\" % outfile) fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.startswith(\"file-\")])", "= np.ones(m.shape[0])*idx np.savetxt(of,m) # drwxr-xr-x 3 wern_m3 1001 4096 Sep 16 2016 roma", "and creates SSV \"\"\" import numpy as np; import pandas as pd; import", "print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) # fileset = [os.path.join(root,f)", "Jan 10 2017 sf_large # drwxr-xr-x 11 wern_m3 1001 4096 Nov 5 2016", "\",header=None, skipinitialspace=True) m = df.values m = np.cumsum(m, axis=0) # the dataset reports", "x.endswith(\".plt\")]) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile, \"w\") of.write(\"x y", "= pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values[:,range(3)] # remove last column m[:,-1]", "id\\n\"); for idx,f in enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m =", "x.startswith(\"file-\")]) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile, \"w\") of.write(\"x y", "skip the header fd = open(f, \"r\"); header = [fd.readline() for _ in", "# first skip the header fd = open(f, \"r\"); header = [fd.readline() for", "%s\" % outfile) fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.endswith(\".plt\")]) print(\"==>", "<filename>data/dataset2ssv.py \"\"\" Importer for T-Drive Dataset takes t-drive directory and creates SSV \"\"\"", "tqdm; from matplotlib import pyplot as plt; def character(indir, outfile): print(\"Importing Character dataset\")", "for x in os.listdir(indir) if x.endswith(\".plt\")]) print(\"==> Found %d files.\" % len(fileset)) of", "enumerate(tqdm(fileset)): # first skip the header fd = open(f, \"r\"); header = [fd.readline()", "# drwxr-xr-x 11 wern_m3 1001 4096 Nov 5 2016 tdrive def usage(msg): print", "%d files.\" % len(fileset)) of = open(outfile, \"w\") of.write(\"x y id\\n\"); for idx,f", "\"w\") of.write(\"x y id\\n\"); for idx,f in enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None,", "= [os.path.join(root,f) for f in files for root, _, files in os.walk(indir)] fileset=[]", "print(\"Importing from GeoLife\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) #", "df.values[:,range(3)] m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) # drwxr-xr-x 3 wern_m3 1001 4096 Sep 16", "x in os.listdir(indir) if x.endswith(\".plt\")]) print(\"==> Found %d files.\" % len(fileset)) of =", "# remove last column m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def geolife(indir, outfile): print(\"Importing from", "creates SSV \"\"\" import numpy as np; import pandas as pd; import os;", "= fileset + [os.path.join(root, f) for f in files if f.endswith(\".plt\")] fileset =", "print(\"Out-File: %s\" % outfile) fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.endswith(\".plt\")])", "sorted(fileset) print(\"==> Found %d files.\" % len(fileset)) of = open(outfile, \"w\") of.write(\"x y", "in files if f.endswith(\".plt\")] fileset = sorted(fileset) print(\"==> Found %d files.\" % len(fileset))", "in range(6)] df = pd.read_csv(fd, header=None) m = df.values[:,range(3)] m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m)", "usage(\"Run with 3 parameters: type, dir, outfilename\") cases = dict({ \"character\":character, \"sanfrancisco\":sanfrancisco, \"geolife\":geolife", "m = df.values[:,range(3)] # remove last column m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def geolife(indir,", "for idx,f in enumerate(tqdm(fileset)): df = pd.read_table(f, sep=\" \",header=None, skipinitialspace=True) m = df.values", "sep=\" \",header=None, skipinitialspace=True) m = df.values[:,range(3)] # remove last column m[:,-1] = np.ones(m.shape[0])*idx", "[os.path.join(root, f) for f in files if f.endswith(\".plt\")] fileset = sorted(fileset) print(\"==> Found", "Nov 5 2016 tdrive def usage(msg): print (msg) sys.exit(-1) if __name__==\"__main__\": if (len(sys.argv)", "in enumerate(tqdm(fileset)): # first skip the header fd = open(f, \"r\"); header =", "\"\"\" Importer for T-Drive Dataset takes t-drive directory and creates SSV \"\"\" import", "In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) # fileset = [os.path.join(root,f) for", "np.savetxt(of,m) def geolife(indir, outfile): print(\"Importing from GeoLife\") print(\" In-Dir: %s\" % indir) print(\"Out-File:", "df.values[:,range(3)] # remove last column m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def geolife(indir, outfile): print(\"Importing", "id\\n\"); for idx,f in enumerate(tqdm(fileset)): # first skip the header fd = open(f,", "for f in files for root, _, files in os.walk(indir)] fileset=[] for root,_,files", "GeoLife\") print(\" In-Dir: %s\" % indir) print(\"Out-File: %s\" % outfile) # fileset =", "outfile) fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.endswith(\".plt\")]) print(\"==> Found %d", "4096 Nov 5 2016 tdrive def usage(msg): print (msg) sys.exit(-1) if __name__==\"__main__\": if", "if (len(sys.argv) != 4): usage(\"Run with 3 parameters: type, dir, outfilename\") cases =", "% indir) print(\"Out-File: %s\" % outfile) fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir)", "integrate to get a spatial object m[:,-1] = np.ones(m.shape[0])*idx np.savetxt(of,m) def sanfrancisco(indir, outfile):", "x in os.listdir(indir) if x.startswith(\"file-\")]) print(\"==> Found %d files.\" % len(fileset)) of =" ]
[ "from recipes.models import Recipe try: recipe = Recipe.objects.get(id=recipe.id) recipe.published=True recipe.save() except Recipe.DoesNotExist: pass", "utf-8 -*- from celery.task import task @task def publish_recipe(recipe): from recipes.models import Recipe", "celery.task import task @task def publish_recipe(recipe): from recipes.models import Recipe try: recipe =", "# -*- coding: utf-8 -*- from celery.task import task @task def publish_recipe(recipe): from", "@task def publish_recipe(recipe): from recipes.models import Recipe try: recipe = Recipe.objects.get(id=recipe.id) recipe.published=True recipe.save()", "def publish_recipe(recipe): from recipes.models import Recipe try: recipe = Recipe.objects.get(id=recipe.id) recipe.published=True recipe.save() except", "import task @task def publish_recipe(recipe): from recipes.models import Recipe try: recipe = Recipe.objects.get(id=recipe.id)", "task @task def publish_recipe(recipe): from recipes.models import Recipe try: recipe = Recipe.objects.get(id=recipe.id) recipe.published=True", "publish_recipe(recipe): from recipes.models import Recipe try: recipe = Recipe.objects.get(id=recipe.id) recipe.published=True recipe.save() except Recipe.DoesNotExist:", "coding: utf-8 -*- from celery.task import task @task def publish_recipe(recipe): from recipes.models import", "-*- coding: utf-8 -*- from celery.task import task @task def publish_recipe(recipe): from recipes.models", "-*- from celery.task import task @task def publish_recipe(recipe): from recipes.models import Recipe try:", "from celery.task import task @task def publish_recipe(recipe): from recipes.models import Recipe try: recipe" ]
[ "a Transmission JSON-RPC arguments dictionary \"\"\" self._update(other) self._commit() def keys(self) -> Generator[str, None,", "import Client class Session: \"\"\" Session is a dict-like class holding the session", "location - rpc version 12 - transmission version 2.20 :return: \"\"\" return self.__getattr__(\"download_dir\")", "is replace by underscore. (eg: ``'download_dir'``) \"\"\" for key, field in self._fields.items(): yield", "Any], \"Session\"]) -> None: \"\"\" Update the session data from a Transmission JSON-RPC", "-> None: if isinstance(other, dict): for key, value in other.items(): self._set(key, value) elif", "are the same as the session arguments in the Transmission RPC specification, but", "<NAME> <<EMAIL>> # Licensed under the MIT license. from typing import TYPE_CHECKING, Any,", "value is not None: dirty[key] = value else: for k, v in self._fields.items():", "supplied data\") def update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: \"\"\" Update the", "_commit(self, key: str = None, value: Any = None) -> None: \"\"\"submit all", "the Transmission RPC specification, but with underscore instead of hyphen. get ``'download-dir'`` with", "want to batch update a session, call ``.update(data)`` .. code-block:: python session =", "- transmission version 2.20 :return: \"\"\" return self.__getattr__(\"download_dir\") @download_dir.setter def download_dir(self, location: str)", "- rpc version 12 - transmission version 2.20 :return: \"\"\" return self.__getattr__(\"download_dir\") @download_dir.setter", "The attributes available are the same as the session arguments in the Transmission", "self._fields.items(): if v.dirty: dirty[k] = v.value self._client.set_session(**dirty) def _update(self, other: Union[Dict[str, Any], \"Session\"])", "def pex_enabled(self, enabled: bool) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(enabled, bool): self._set(\"pex_enabled\",", "available are the same as the session arguments in the Transmission RPC specification,", "class Session: \"\"\" Session is a dict-like class holding the session data for", "version 1.60 \"\"\" return self.__getattr__(\"peer_port\") @peer_port.setter def peer_port(self, port: int) -> None: \"\"\"Set", "None: if value in {\"required\", \"preferred\", \"tolerated\"}: self._set(\"encryption\", value, commit=True) else: raise ValueError(", "= '/path/to/new/download/dir' if you want to batch update a session, call ``.update(data)`` ..", "__getattr__(self, name: str) -> Any: try: return self._fields[name].value except KeyError as e: raise", "from transmission_rpc.lib_types import Field if TYPE_CHECKING: from transmission_rpc.client import Client class Session: \"\"\"", "raise TypeError(f\"{location!r} if not a valid 'download-dir'\") @property def version(self) -> str: \"\"\"", "as e: raise AttributeError(f\"No attribute {name}\") from e def _set(self, key: str, value:", "Trim21 <<EMAIL>> # Copyright (c) 2008-2014 <NAME> <<EMAIL>> # Licensed under the MIT", "= self._fields.get(key) if current_field is None: self._fields[key] = Field(value, True) else: if current_field.value", "self.__getattr__(\"peer_port\") @peer_port.setter def peer_port(self, port: int) -> None: \"\"\"Set the peer port. -", "Field if TYPE_CHECKING: from transmission_rpc.client import Client class Session: \"\"\" Session is a", "= Client().get_session() current = session.download_dir there are also setter like ``Session().download_dir = '/path/to/download'``", "value) def __str__(self) -> str: text = \"\" max_length = max(len(x) for x", "for value in self._fields.values(): yield value.value def items(self) -> Generator[Tuple[str, Any], None, None]:", "underscore (eg: ``download_dir``) \"\"\" yield from self._fields.keys() def values(self) -> Generator[Any, None, None]:", "return self.__getattr__(\"peer_port\") @peer_port.setter def peer_port(self, port: int) -> None: \"\"\"Set the peer port.", "value else: for k, v in self._fields.items(): if v.dirty: dirty[k] = v.value self._client.set_session(**dirty)", "self._set(key, value.value) else: raise ValueError(\"Cannot update with supplied data\") def update(self, other: Union[Dict[str,", "bool: \"\"\"Is peer exchange enabled - rpc version 5 - transmission version 1.60\"\"\"", "Any: try: return self._fields[name].value except KeyError as e: raise AttributeError(f\"No attribute {name}\") from", "key,value pair hyphen in key is replace by underscore. (eg: ``'download_dir'``) \"\"\" for", "self._fields[key] = Field(value, True) if commit: self._commit(key, value) def __str__(self) -> str: text", "-> None: if value in {\"required\", \"preferred\", \"tolerated\"}: self._set(\"encryption\", value, commit=True) else: raise", "= {} if fields is not None: self._update(fields) def __getattr__(self, name: str) ->", "1.60 \"\"\" if isinstance(port, int): self._set(\"peer_port\", port, True) else: raise ValueError(\"Not a valid", "if TYPE_CHECKING: from transmission_rpc.client import Client class Session: \"\"\" Session is a dict-like", "for key, value in other.items(): self._set(key, value) elif isinstance(other, Session): for key, value", "from transmission_rpc.client import Client class Session: \"\"\" Session is a dict-like class holding", "def update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: \"\"\" Update the session data", "None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(location, str) and location: self._set(\"download_dir\", location, True) else:", "1.60\"\"\" return self.__getattr__(\"pex_enabled\") @pex_enabled.setter def pex_enabled(self, enabled: bool) -> None: \"\"\"Enable/disable peer exchange.\"\"\"", "3 - transmission version 1.41 \"\"\" return self.__getattr__(\"version\") @property def rpc_version(self) -> int:", "else: raise ValueError(\"Cannot update with supplied data\") def update(self, other: Union[Dict[str, Any], \"Session\"])", "current_field is None: self._fields[key] = Field(value, True) else: if current_field.value != value: self._fields[key]", "x: x[0]): text += f\"{key.ljust(max_length)}: {value.value!r}\\n\" return text def _commit(self, key: str =", "in {\"required\", \"preferred\", \"tolerated\"}: self._set(\"encryption\", value, commit=True) else: raise ValueError( \"Not a valid", "self._update(fields) def __getattr__(self, name: str) -> Any: try: return self._fields[name].value except KeyError as", "str: \"\"\"default download location - rpc version 12 - transmission version 2.20 :return:", "with underscore instead of hyphen. get ``'download-dir'`` with ``session.download_dir``. .. code-block:: python session", "key, field.value @property def download_dir(self) -> str: \"\"\"default download location - rpc version", "-> None: \"\"\" Update the session data from a Transmission JSON-RPC arguments dictionary", "code-block:: python session = Client().get_session() session.download_dir = '/path/to/new/download/dir' if you want to batch", "update a session, call ``.update(data)`` .. code-block:: python session = Client().get_session() session.update({'k1': 'v1',", "value in other._fields.items(): self._set(key, value.value) else: raise ValueError(\"Cannot update with supplied data\") def", "v in self._fields.items(): if v.dirty: dirty[k] = v.value self._client.set_session(**dirty) def _update(self, other: Union[Dict[str,", "def version(self) -> str: \"\"\" - rpc version 3 - transmission version 1.41", "the peer port. - rpc version 5 - transmission version 1.60 \"\"\" if", "exchange enabled - rpc version 5 - transmission version 1.60\"\"\" return self.__getattr__(\"pex_enabled\") @pex_enabled.setter", "in self._fields.items(): if v.dirty: dirty[k] = v.value self._client.set_session(**dirty) def _update(self, other: Union[Dict[str, Any],", "None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(enabled, bool): self._set(\"pex_enabled\", enabled, True) else: raise TypeError(\"Not", "False) -> None: key = key.replace(\"-\", \"_\") current_field = self._fields.get(key) if current_field is", "yield from self._fields.keys() def values(self) -> Generator[Any, None, None]: for value in self._fields.values():", "Copyright (c) 2018-2021 Trim21 <<EMAIL>> # Copyright (c) 2008-2014 <NAME> <<EMAIL>> # Licensed", "from a Transmission JSON-RPC arguments dictionary \"\"\" self._update(other) self._commit() def keys(self) -> Generator[str,", "from typing import TYPE_CHECKING, Any, Dict, Tuple, Union, Generator from typing_extensions import Literal", "-> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(enabled, bool): self._set(\"pex_enabled\", enabled, True) else: raise", "in other._fields.items(): self._set(key, value.value) else: raise ValueError(\"Cannot update with supplied data\") def update(self,", "typing_extensions import Literal from transmission_rpc.lib_types import Field if TYPE_CHECKING: from transmission_rpc.client import Client", "def _set(self, key: str, value: Any, commit: bool = False) -> None: key", "Client().get_session() session.download_dir = '/path/to/new/download/dir' if you want to batch update a session, call", "the private ``Session()._fields``, keys are stored with underscore. \"\"\" def __init__(self, client: \"Client\",", "port: int) -> None: \"\"\"Set the peer port. - rpc version 5 -", "-> Any: try: return self._fields[name].value except KeyError as e: raise AttributeError(f\"No attribute {name}\")", "+= f\"{key.ljust(max_length)}: {value.value!r}\\n\" return text def _commit(self, key: str = None, value: Any", "\"\"\"Get the peer port. - rpc version 5 - transmission version 1.60 \"\"\"", "Licensed under the MIT license. from typing import TYPE_CHECKING, Any, Dict, Tuple, Union,", "private ``Session()._fields``, keys are stored with underscore. \"\"\" def __init__(self, client: \"Client\", fields:", "if key is not None and value is not None: dirty[key] = value", "return text def _commit(self, key: str = None, value: Any = None) ->", "transmission version 1.50 \"\"\" return self.__getattr__(\"rpc_version\") @property def peer_port(self) -> int: \"\"\"Get the", "yield key, field.value @property def download_dir(self) -> str: \"\"\"default download location - rpc", "KeyError as e: raise AttributeError(f\"No attribute {name}\") from e def _set(self, key: str,", "str = None, value: Any = None) -> None: \"\"\"submit all dirty field", "session = Client().get_session() session.download_dir = '/path/to/new/download/dir' if you want to batch update a", "raise ValueError(\"Cannot update with supplied data\") def update(self, other: Union[Dict[str, Any], \"Session\"]) ->", "str, value: Any, commit: bool = False) -> None: key = key.replace(\"-\", \"_\")", "with supplied data\") def update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: \"\"\" Update", "self.__getattr__(\"rpc_version\") @property def peer_port(self) -> int: \"\"\"Get the peer port. - rpc version", "for x in self._fields.keys()) + 1 for key, value in sorted(self._fields.items(), key=lambda x:", ":return: \"\"\" return self.__getattr__(\"download_dir\") @download_dir.setter def download_dir(self, location: str) -> None: \"\"\"Enable/disable peer", "Tuple, Union, Generator from typing_extensions import Literal from transmission_rpc.lib_types import Field if TYPE_CHECKING:", "int: \"\"\" - rpc version 4 - transmission version 1.50 \"\"\" return self.__getattr__(\"rpc_version\")", "\"\"\" self._update(other) self._commit() def keys(self) -> Generator[str, None, None]: \"\"\" session keys with", "specification, but with underscore instead of hyphen. get ``'download-dir'`` with ``session.download_dir``. .. code-block::", "Client().get_session() current = session.download_dir there are also setter like ``Session().download_dir = '/path/to/download'`` ..", "session arguments in the Transmission RPC specification, but with underscore instead of hyphen.", "for k, v in self._fields.items(): if v.dirty: dirty[k] = v.value self._client.set_session(**dirty) def _update(self,", "key, value in other._fields.items(): self._set(key, value.value) else: raise ValueError(\"Cannot update with supplied data\")", "2.20 :return: \"\"\" return self.__getattr__(\"download_dir\") @download_dir.setter def download_dir(self, location: str) -> None: \"\"\"Enable/disable", "dirty[k] = v.value self._client.set_session(**dirty) def _update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: if", "(eg: ``download_dir``) \"\"\" yield from self._fields.keys() def values(self) -> Generator[Any, None, None]: for", "return self.__getattr__(\"pex_enabled\") @pex_enabled.setter def pex_enabled(self, enabled: bool) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if", "peer exchange.\"\"\" if isinstance(enabled, bool): self._set(\"pex_enabled\", enabled, True) else: raise TypeError(\"Not a valid", "key, value in other.items(): self._set(key, value) elif isinstance(other, Session): for key, value in", "@property def download_dir(self) -> str: \"\"\"default download location - rpc version 12 -", "version 3 - transmission version 1.41 \"\"\" return self.__getattr__(\"version\") @property def rpc_version(self) ->", "\"\" max_length = max(len(x) for x in self._fields.keys()) + 1 for key, value", "transmission version 1.60 \"\"\" if isinstance(port, int): self._set(\"peer_port\", port, True) else: raise ValueError(\"Not", "int): self._set(\"peer_port\", port, True) else: raise ValueError(\"Not a valid limit\") @property def pex_enabled(self)", "True) else: raise TypeError(f\"{location!r} if not a valid 'download-dir'\") @property def version(self) ->", "-> str: return self.__getattr__(\"encryption\") @encryption.setter def encryption(self, value: Literal[\"required\", \"preferred\", \"tolerated\"]) -> None:", "v.dirty: dirty[k] = v.value self._client.set_session(**dirty) def _update(self, other: Union[Dict[str, Any], \"Session\"]) -> None:", "None: if isinstance(other, dict): for key, value in other.items(): self._set(key, value) elif isinstance(other,", "- transmission version 1.41 \"\"\" return self.__getattr__(\"version\") @property def rpc_version(self) -> int: \"\"\"", "raise TypeError(\"Not a valid type\") @property def encryption(self) -> str: return self.__getattr__(\"encryption\") @encryption.setter", "self._client.set_session(**dirty) def _update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: if isinstance(other, dict): for", "\"\"\" if isinstance(port, int): self._set(\"peer_port\", port, True) else: raise ValueError(\"Not a valid limit\")", "= session.download_dir there are also setter like ``Session().download_dir = '/path/to/download'`` .. code-block:: python", "- rpc version 5 - transmission version 1.60\"\"\" return self.__getattr__(\"pex_enabled\") @pex_enabled.setter def pex_enabled(self,", "and value is not None: dirty[key] = value else: for k, v in", "commit: bool = False) -> None: key = key.replace(\"-\", \"_\") current_field = self._fields.get(key)", "- rpc version 5 - transmission version 1.60 \"\"\" if isinstance(port, int): self._set(\"peer_port\",", "\"\"\" return self.__getattr__(\"rpc_version\") @property def peer_port(self) -> int: \"\"\"Get the peer port. -", "current_field.value != value: self._fields[key] = Field(value, True) if commit: self._commit(key, value) def __str__(self)", "self.__getattr__(\"pex_enabled\") @pex_enabled.setter def pex_enabled(self, enabled: bool) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(enabled,", "\"v2\"}) if you have to access to the private ``Session()._fields``, keys are stored", "by underscore. (eg: ``'download_dir'``) \"\"\" for key, field in self._fields.items(): yield key, field.value", "\"\"\" - rpc version 4 - transmission version 1.50 \"\"\" return self.__getattr__(\"rpc_version\") @property", "version 2.20 :return: \"\"\" return self.__getattr__(\"download_dir\") @download_dir.setter def download_dir(self, location: str) -> None:", "-> str: \"\"\" - rpc version 3 - transmission version 1.41 \"\"\" return", "is not None: dirty[key] = value else: for k, v in self._fields.items(): if", "same as the session arguments in the Transmission RPC specification, but with underscore", "done through attributes. The attributes available are the same as the session arguments", "python session = Client().get_session() session.update({'k1': 'v1', \"k2\": \"v2\"}) if you have to access", "def download_dir(self, location: str) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(location, str) and", "= Client().get_session() session.update({'k1': 'v1', \"k2\": \"v2\"}) if you have to access to the", "value.value def items(self) -> Generator[Tuple[str, Any], None, None]: \"\"\" iter key,value pair hyphen", "\"\"\"Is peer exchange enabled - rpc version 5 - transmission version 1.60\"\"\" return", "def __init__(self, client: \"Client\", fields: Dict[str, Any] = None): self._client = client self._fields:", "pex_enabled(self, enabled: bool) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(enabled, bool): self._set(\"pex_enabled\", enabled,", "v.value self._client.set_session(**dirty) def _update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: if isinstance(other, dict):", "field.value @property def download_dir(self) -> str: \"\"\"default download location - rpc version 12", "typing import TYPE_CHECKING, Any, Dict, Tuple, Union, Generator from typing_extensions import Literal from", "str) and location: self._set(\"download_dir\", location, True) else: raise TypeError(f\"{location!r} if not a valid", "if fields is not None: self._update(fields) def __getattr__(self, name: str) -> Any: try:", "5 - transmission version 1.60 \"\"\" return self.__getattr__(\"peer_port\") @peer_port.setter def peer_port(self, port: int)", "if isinstance(port, int): self._set(\"peer_port\", port, True) else: raise ValueError(\"Not a valid limit\") @property", "Union[Dict[str, Any], \"Session\"]) -> None: \"\"\" Update the session data from a Transmission", "if not a valid 'download-dir'\") @property def version(self) -> str: \"\"\" - rpc", "a valid limit\") @property def pex_enabled(self) -> bool: \"\"\"Is peer exchange enabled -", "return self.__getattr__(\"version\") @property def rpc_version(self) -> int: \"\"\" - rpc version 4 -", "are stored with underscore. \"\"\" def __init__(self, client: \"Client\", fields: Dict[str, Any] =", "self.__getattr__(\"download_dir\") @download_dir.setter def download_dir(self, location: str) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(location,", "else: raise TypeError(\"Not a valid type\") @property def encryption(self) -> str: return self.__getattr__(\"encryption\")", "``download_dir``) \"\"\" yield from self._fields.keys() def values(self) -> Generator[Any, None, None]: for value", "if v.dirty: dirty[k] = v.value self._client.set_session(**dirty) def _update(self, other: Union[Dict[str, Any], \"Session\"]) ->", "and location: self._set(\"download_dir\", location, True) else: raise TypeError(f\"{location!r} if not a valid 'download-dir'\")", "None: self._update(fields) def __getattr__(self, name: str) -> Any: try: return self._fields[name].value except KeyError", "Literal[\"required\", \"preferred\", \"tolerated\"]) -> None: if value in {\"required\", \"preferred\", \"tolerated\"}: self._set(\"encryption\", value,", "4 - transmission version 1.50 \"\"\" return self.__getattr__(\"rpc_version\") @property def peer_port(self) -> int:", "\"Session\"]) -> None: \"\"\" Update the session data from a Transmission JSON-RPC arguments", "rpc version 12 - transmission version 2.20 :return: \"\"\" return self.__getattr__(\"download_dir\") @download_dir.setter def", "rpc_version(self) -> int: \"\"\" - rpc version 4 - transmission version 1.50 \"\"\"", "\"\"\"submit all dirty field to client\"\"\" dirty = {} if key is not", "= {} if key is not None and value is not None: dirty[key]", "like ``Session().download_dir = '/path/to/download'`` .. code-block:: python session = Client().get_session() session.download_dir = '/path/to/new/download/dir'", "peer exchange enabled - rpc version 5 - transmission version 1.60\"\"\" return self.__getattr__(\"pex_enabled\")", "self.__getattr__(\"encryption\") @encryption.setter def encryption(self, value: Literal[\"required\", \"preferred\", \"tolerated\"]) -> None: if value in", "None: key = key.replace(\"-\", \"_\") current_field = self._fields.get(key) if current_field is None: self._fields[key]", "e: raise AttributeError(f\"No attribute {name}\") from e def _set(self, key: str, value: Any,", "location: str) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(location, str) and location: self._set(\"download_dir\",", "TYPE_CHECKING: from transmission_rpc.client import Client class Session: \"\"\" Session is a dict-like class", "session keys with underscore (eg: ``download_dir``) \"\"\" yield from self._fields.keys() def values(self) ->", "to batch update a session, call ``.update(data)`` .. code-block:: python session = Client().get_session()", "rpc version 3 - transmission version 1.41 \"\"\" return self.__getattr__(\"version\") @property def rpc_version(self)", "client self._fields: Dict[str, Field] = {} if fields is not None: self._update(fields) def", "location, True) else: raise TypeError(f\"{location!r} if not a valid 'download-dir'\") @property def version(self)", "session field can be done through attributes. The attributes available are the same", "= \"\" max_length = max(len(x) for x in self._fields.keys()) + 1 for key,", "key = key.replace(\"-\", \"_\") current_field = self._fields.get(key) if current_field is None: self._fields[key] =", "def items(self) -> Generator[Tuple[str, Any], None, None]: \"\"\" iter key,value pair hyphen in", "ValueError(\"Not a valid limit\") @property def pex_enabled(self) -> bool: \"\"\"Is peer exchange enabled", "return self.__getattr__(\"download_dir\") @download_dir.setter def download_dir(self, location: str) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if", "int: \"\"\"Get the peer port. - rpc version 5 - transmission version 1.60", "isinstance(other, dict): for key, value in other.items(): self._set(key, value) elif isinstance(other, Session): for", "port. - rpc version 5 - transmission version 1.60 \"\"\" return self.__getattr__(\"peer_port\") @peer_port.setter", "Literal from transmission_rpc.lib_types import Field if TYPE_CHECKING: from transmission_rpc.client import Client class Session:", "value: Any, commit: bool = False) -> None: key = key.replace(\"-\", \"_\") current_field", "= v.value self._client.set_session(**dirty) def _update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: if isinstance(other,", "'download-dir'\") @property def version(self) -> str: \"\"\" - rpc version 3 - transmission", "sorted(self._fields.items(), key=lambda x: x[0]): text += f\"{key.ljust(max_length)}: {value.value!r}\\n\" return text def _commit(self, key:", "Field] = {} if fields is not None: self._update(fields) def __getattr__(self, name: str)", "value.value) else: raise ValueError(\"Cannot update with supplied data\") def update(self, other: Union[Dict[str, Any],", "str: return self.__getattr__(\"encryption\") @encryption.setter def encryption(self, value: Literal[\"required\", \"preferred\", \"tolerated\"]) -> None: if", "Union, Generator from typing_extensions import Literal from transmission_rpc.lib_types import Field if TYPE_CHECKING: from", "if you have to access to the private ``Session()._fields``, keys are stored with", "data\") def update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: \"\"\" Update the session", "\"tolerated\"}: self._set(\"encryption\", value, commit=True) else: raise ValueError( \"Not a valid encryption, can only", "are also setter like ``Session().download_dir = '/path/to/download'`` .. code-block:: python session = Client().get_session()", "code-block:: python session = Client().get_session() session.update({'k1': 'v1', \"k2\": \"v2\"}) if you have to", "attributes available are the same as the session arguments in the Transmission RPC", "Session): for key, value in other._fields.items(): self._set(key, value.value) else: raise ValueError(\"Cannot update with", "version 4 - transmission version 1.50 \"\"\" return self.__getattr__(\"rpc_version\") @property def peer_port(self) ->", "{value.value!r}\\n\" return text def _commit(self, key: str = None, value: Any = None)", "data from a Transmission JSON-RPC arguments dictionary \"\"\" self._update(other) self._commit() def keys(self) ->", "if current_field.value != value: self._fields[key] = Field(value, True) if commit: self._commit(key, value) def", "key: str = None, value: Any = None) -> None: \"\"\"submit all dirty", "daemon. Access the session field can be done through attributes. The attributes available", "``'download_dir'``) \"\"\" for key, field in self._fields.items(): yield key, field.value @property def download_dir(self)", "bool) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(enabled, bool): self._set(\"pex_enabled\", enabled, True) else:", "-> None: \"\"\"Set the peer port. - rpc version 5 - transmission version", "peer_port(self) -> int: \"\"\"Get the peer port. - rpc version 5 - transmission", "{\"required\", \"preferred\", \"tolerated\"}: self._set(\"encryption\", value, commit=True) else: raise ValueError( \"Not a valid encryption,", "for key, value in sorted(self._fields.items(), key=lambda x: x[0]): text += f\"{key.ljust(max_length)}: {value.value!r}\\n\" return", "if current_field is None: self._fields[key] = Field(value, True) else: if current_field.value != value:", "\"preferred\", \"tolerated\"]) -> None: if value in {\"required\", \"preferred\", \"tolerated\"}: self._set(\"encryption\", value, commit=True)", "license. from typing import TYPE_CHECKING, Any, Dict, Tuple, Union, Generator from typing_extensions import", "\"\"\" session keys with underscore (eg: ``download_dir``) \"\"\" yield from self._fields.keys() def values(self)", "raise AttributeError(f\"No attribute {name}\") from e def _set(self, key: str, value: Any, commit:", "= '/path/to/download'`` .. code-block:: python session = Client().get_session() session.download_dir = '/path/to/new/download/dir' if you", "to access to the private ``Session()._fields``, keys are stored with underscore. \"\"\" def", "self._fields.get(key) if current_field is None: self._fields[key] = Field(value, True) else: if current_field.value !=", "Dict[str, Any] = None): self._client = client self._fields: Dict[str, Field] = {} if", "setter like ``Session().download_dir = '/path/to/download'`` .. code-block:: python session = Client().get_session() session.download_dir =", "# Licensed under the MIT license. from typing import TYPE_CHECKING, Any, Dict, Tuple,", "_update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: if isinstance(other, dict): for key, value", "self._update(other) self._commit() def keys(self) -> Generator[str, None, None]: \"\"\" session keys with underscore", "def pex_enabled(self) -> bool: \"\"\"Is peer exchange enabled - rpc version 5 -", "exchange.\"\"\" if isinstance(location, str) and location: self._set(\"download_dir\", location, True) else: raise TypeError(f\"{location!r} if", "= Field(value, True) if commit: self._commit(key, value) def __str__(self) -> str: text =", "in key is replace by underscore. (eg: ``'download_dir'``) \"\"\" for key, field in", "key, field in self._fields.items(): yield key, field.value @property def download_dir(self) -> str: \"\"\"default", "version(self) -> str: \"\"\" - rpc version 3 - transmission version 1.41 \"\"\"", "as the session arguments in the Transmission RPC specification, but with underscore instead", "not None: self._update(fields) def __getattr__(self, name: str) -> Any: try: return self._fields[name].value except", "True) else: raise ValueError(\"Not a valid limit\") @property def pex_enabled(self) -> bool: \"\"\"Is", "hyphen in key is replace by underscore. (eg: ``'download_dir'``) \"\"\" for key, field", "the MIT license. from typing import TYPE_CHECKING, Any, Dict, Tuple, Union, Generator from", "field in self._fields.items(): yield key, field.value @property def download_dir(self) -> str: \"\"\"default download", "session = Client().get_session() session.update({'k1': 'v1', \"k2\": \"v2\"}) if you have to access to", "\"\"\" return self.__getattr__(\"version\") @property def rpc_version(self) -> int: \"\"\" - rpc version 4", "rpc version 5 - transmission version 1.60 \"\"\" return self.__getattr__(\"peer_port\") @peer_port.setter def peer_port(self,", "e def _set(self, key: str, value: Any, commit: bool = False) -> None:", "peer port. - rpc version 5 - transmission version 1.60 \"\"\" if isinstance(port,", "\"tolerated\"]) -> None: if value in {\"required\", \"preferred\", \"tolerated\"}: self._set(\"encryption\", value, commit=True) else:", "\"\"\" - rpc version 3 - transmission version 1.41 \"\"\" return self.__getattr__(\"version\") @property", "but with underscore instead of hyphen. get ``'download-dir'`` with ``session.download_dir``. .. code-block:: python", "Any] = None): self._client = client self._fields: Dict[str, Field] = {} if fields", "\"\"\" Session is a dict-like class holding the session data for a Transmission", "\"\"\" return self.__getattr__(\"peer_port\") @peer_port.setter def peer_port(self, port: int) -> None: \"\"\"Set the peer", "Any, Dict, Tuple, Union, Generator from typing_extensions import Literal from transmission_rpc.lib_types import Field", "= None) -> None: \"\"\"submit all dirty field to client\"\"\" dirty = {}", "replace by underscore. (eg: ``'download_dir'``) \"\"\" for key, field in self._fields.items(): yield key,", "1.60 \"\"\" return self.__getattr__(\"peer_port\") @peer_port.setter def peer_port(self, port: int) -> None: \"\"\"Set the", "is a dict-like class holding the session data for a Transmission daemon. Access", "download_dir(self, location: str) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(location, str) and location:", "session.download_dir = '/path/to/new/download/dir' if you want to batch update a session, call ``.update(data)``", "bool = False) -> None: key = key.replace(\"-\", \"_\") current_field = self._fields.get(key) if", "None: \"\"\" Update the session data from a Transmission JSON-RPC arguments dictionary \"\"\"", "\"preferred\", \"tolerated\"}: self._set(\"encryption\", value, commit=True) else: raise ValueError( \"Not a valid encryption, can", ".. code-block:: python session = Client().get_session() session.download_dir = '/path/to/new/download/dir' if you want to", "@download_dir.setter def download_dir(self, location: str) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(location, str)", "if isinstance(other, dict): for key, value in other.items(): self._set(key, value) elif isinstance(other, Session):", "\"Client\", fields: Dict[str, Any] = None): self._client = client self._fields: Dict[str, Field] =", "commit: self._commit(key, value) def __str__(self) -> str: text = \"\" max_length = max(len(x)", "key is replace by underscore. (eg: ``'download_dir'``) \"\"\" for key, field in self._fields.items():", "self._commit(key, value) def __str__(self) -> str: text = \"\" max_length = max(len(x) for", "None, None]: for value in self._fields.values(): yield value.value def items(self) -> Generator[Tuple[str, Any],", "value in sorted(self._fields.items(), key=lambda x: x[0]): text += f\"{key.ljust(max_length)}: {value.value!r}\\n\" return text def", "update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: \"\"\" Update the session data from", "session data for a Transmission daemon. Access the session field can be done", "True) else: if current_field.value != value: self._fields[key] = Field(value, True) if commit: self._commit(key,", "else: raise ValueError( \"Not a valid encryption, can only be one of ['required',", "None]: \"\"\" iter key,value pair hyphen in key is replace by underscore. (eg:", "encryption(self, value: Literal[\"required\", \"preferred\", \"tolerated\"]) -> None: if value in {\"required\", \"preferred\", \"tolerated\"}:", "version 1.41 \"\"\" return self.__getattr__(\"version\") @property def rpc_version(self) -> int: \"\"\" - rpc", "- transmission version 1.60 \"\"\" return self.__getattr__(\"peer_port\") @peer_port.setter def peer_port(self, port: int) ->", "Transmission daemon. Access the session field can be done through attributes. The attributes", "!= value: self._fields[key] = Field(value, True) if commit: self._commit(key, value) def __str__(self) ->", "= max(len(x) for x in self._fields.keys()) + 1 for key, value in sorted(self._fields.items(),", "_set(self, key: str, value: Any, commit: bool = False) -> None: key =", "arguments dictionary \"\"\" self._update(other) self._commit() def keys(self) -> Generator[str, None, None]: \"\"\" session", "def _update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: if isinstance(other, dict): for key,", "x in self._fields.keys()) + 1 for key, value in sorted(self._fields.items(), key=lambda x: x[0]):", "return self.__getattr__(\"encryption\") @encryption.setter def encryption(self, value: Literal[\"required\", \"preferred\", \"tolerated\"]) -> None: if value", "value: Any = None) -> None: \"\"\"submit all dirty field to client\"\"\" dirty", "elif isinstance(other, Session): for key, value in other._fields.items(): self._set(key, value.value) else: raise ValueError(\"Cannot", "text = \"\" max_length = max(len(x) for x in self._fields.keys()) + 1 for", "current = session.download_dir there are also setter like ``Session().download_dir = '/path/to/download'`` .. code-block::", "@property def peer_port(self) -> int: \"\"\"Get the peer port. - rpc version 5", "return self._fields[name].value except KeyError as e: raise AttributeError(f\"No attribute {name}\") from e def", "{} if key is not None and value is not None: dirty[key] =", "= Client().get_session() session.download_dir = '/path/to/new/download/dir' if you want to batch update a session,", "Client class Session: \"\"\" Session is a dict-like class holding the session data", "Generator[Any, None, None]: for value in self._fields.values(): yield value.value def items(self) -> Generator[Tuple[str,", "attributes. The attributes available are the same as the session arguments in the", "def values(self) -> Generator[Any, None, None]: for value in self._fields.values(): yield value.value def", "you want to batch update a session, call ``.update(data)`` .. code-block:: python session", "other: Union[Dict[str, Any], \"Session\"]) -> None: if isinstance(other, dict): for key, value in", "``'download-dir'`` with ``session.download_dir``. .. code-block:: python session = Client().get_session() current = session.download_dir there", "keys with underscore (eg: ``download_dir``) \"\"\" yield from self._fields.keys() def values(self) -> Generator[Any,", "# Copyright (c) 2018-2021 Trim21 <<EMAIL>> # Copyright (c) 2008-2014 <NAME> <<EMAIL>> #", "version 1.60\"\"\" return self.__getattr__(\"pex_enabled\") @pex_enabled.setter def pex_enabled(self, enabled: bool) -> None: \"\"\"Enable/disable peer", "other._fields.items(): self._set(key, value.value) else: raise ValueError(\"Cannot update with supplied data\") def update(self, other:", "None: \"\"\"submit all dirty field to client\"\"\" dirty = {} if key is", "TypeError(f\"{location!r} if not a valid 'download-dir'\") @property def version(self) -> str: \"\"\" -", "def __str__(self) -> str: text = \"\" max_length = max(len(x) for x in", "in the Transmission RPC specification, but with underscore instead of hyphen. get ``'download-dir'``", "field to client\"\"\" dirty = {} if key is not None and value", "stored with underscore. \"\"\" def __init__(self, client: \"Client\", fields: Dict[str, Any] = None):", "self._set(\"download_dir\", location, True) else: raise TypeError(f\"{location!r} if not a valid 'download-dir'\") @property def", "TypeError(\"Not a valid type\") @property def encryption(self) -> str: return self.__getattr__(\"encryption\") @encryption.setter def", "Client().get_session() session.update({'k1': 'v1', \"k2\": \"v2\"}) if you have to access to the private", "dirty[key] = value else: for k, v in self._fields.items(): if v.dirty: dirty[k] =", "valid 'download-dir'\") @property def version(self) -> str: \"\"\" - rpc version 3 -", "value: Literal[\"required\", \"preferred\", \"tolerated\"]) -> None: if value in {\"required\", \"preferred\", \"tolerated\"}: self._set(\"encryption\",", "(eg: ``'download_dir'``) \"\"\" for key, field in self._fields.items(): yield key, field.value @property def", "def peer_port(self) -> int: \"\"\"Get the peer port. - rpc version 5 -", "key is not None and value is not None: dirty[key] = value else:", "None and value is not None: dirty[key] = value else: for k, v", "def encryption(self, value: Literal[\"required\", \"preferred\", \"tolerated\"]) -> None: if value in {\"required\", \"preferred\",", "None, value: Any = None) -> None: \"\"\"submit all dirty field to client\"\"\"", "__init__(self, client: \"Client\", fields: Dict[str, Any] = None): self._client = client self._fields: Dict[str,", "arguments in the Transmission RPC specification, but with underscore instead of hyphen. get", "limit\") @property def pex_enabled(self) -> bool: \"\"\"Is peer exchange enabled - rpc version", "12 - transmission version 2.20 :return: \"\"\" return self.__getattr__(\"download_dir\") @download_dir.setter def download_dir(self, location:", "\"\"\"Set the peer port. - rpc version 5 - transmission version 1.60 \"\"\"", "type\") @property def encryption(self) -> str: return self.__getattr__(\"encryption\") @encryption.setter def encryption(self, value: Literal[\"required\",", "max(len(x) for x in self._fields.keys()) + 1 for key, value in sorted(self._fields.items(), key=lambda", "else: for k, v in self._fields.items(): if v.dirty: dirty[k] = v.value self._client.set_session(**dirty) def", "\"\"\" Update the session data from a Transmission JSON-RPC arguments dictionary \"\"\" self._update(other)", "instead of hyphen. get ``'download-dir'`` with ``session.download_dir``. .. code-block:: python session = Client().get_session()", "-> str: text = \"\" max_length = max(len(x) for x in self._fields.keys()) +", "-> Generator[Any, None, None]: for value in self._fields.values(): yield value.value def items(self) ->", "if isinstance(location, str) and location: self._set(\"download_dir\", location, True) else: raise TypeError(f\"{location!r} if not", "text def _commit(self, key: str = None, value: Any = None) -> None:", ".. code-block:: python session = Client().get_session() current = session.download_dir there are also setter", "@property def pex_enabled(self) -> bool: \"\"\"Is peer exchange enabled - rpc version 5", "value in {\"required\", \"preferred\", \"tolerated\"}: self._set(\"encryption\", value, commit=True) else: raise ValueError( \"Not a", "\"\"\"Enable/disable peer exchange.\"\"\" if isinstance(enabled, bool): self._set(\"pex_enabled\", enabled, True) else: raise TypeError(\"Not a", "Dict[str, Field] = {} if fields is not None: self._update(fields) def __getattr__(self, name:", "keys are stored with underscore. \"\"\" def __init__(self, client: \"Client\", fields: Dict[str, Any]", "the session data from a Transmission JSON-RPC arguments dictionary \"\"\" self._update(other) self._commit() def", "if value in {\"required\", \"preferred\", \"tolerated\"}: self._set(\"encryption\", value, commit=True) else: raise ValueError( \"Not", "self._fields.values(): yield value.value def items(self) -> Generator[Tuple[str, Any], None, None]: \"\"\" iter key,value", "key.replace(\"-\", \"_\") current_field = self._fields.get(key) if current_field is None: self._fields[key] = Field(value, True)", "def peer_port(self, port: int) -> None: \"\"\"Set the peer port. - rpc version", "the session data for a Transmission daemon. Access the session field can be", "None, None]: \"\"\" session keys with underscore (eg: ``download_dir``) \"\"\" yield from self._fields.keys()", "version 5 - transmission version 1.60 \"\"\" return self.__getattr__(\"peer_port\") @peer_port.setter def peer_port(self, port:", "not None and value is not None: dirty[key] = value else: for k,", "'/path/to/download'`` .. code-block:: python session = Client().get_session() session.download_dir = '/path/to/new/download/dir' if you want", "enabled, True) else: raise TypeError(\"Not a valid type\") @property def encryption(self) -> str:", "@peer_port.setter def peer_port(self, port: int) -> None: \"\"\"Set the peer port. - rpc", "Generator from typing_extensions import Literal from transmission_rpc.lib_types import Field if TYPE_CHECKING: from transmission_rpc.client", "Union[Dict[str, Any], \"Session\"]) -> None: if isinstance(other, dict): for key, value in other.items():", "-> Generator[Tuple[str, Any], None, None]: \"\"\" iter key,value pair hyphen in key is", "\"\"\"Enable/disable peer exchange.\"\"\" if isinstance(location, str) and location: self._set(\"download_dir\", location, True) else: raise", "from self._fields.keys() def values(self) -> Generator[Any, None, None]: for value in self._fields.values(): yield", ".. code-block:: python session = Client().get_session() session.update({'k1': 'v1', \"k2\": \"v2\"}) if you have", "self._set(key, value) elif isinstance(other, Session): for key, value in other._fields.items(): self._set(key, value.value) else:", "fields: Dict[str, Any] = None): self._client = client self._fields: Dict[str, Field] = {}", "valid type\") @property def encryption(self) -> str: return self.__getattr__(\"encryption\") @encryption.setter def encryption(self, value:", "@property def rpc_version(self) -> int: \"\"\" - rpc version 4 - transmission version", "\"\"\" for key, field in self._fields.items(): yield key, field.value @property def download_dir(self) ->", "batch update a session, call ``.update(data)`` .. code-block:: python session = Client().get_session() session.update({'k1':", "key, value in sorted(self._fields.items(), key=lambda x: x[0]): text += f\"{key.ljust(max_length)}: {value.value!r}\\n\" return text", "__str__(self) -> str: text = \"\" max_length = max(len(x) for x in self._fields.keys())", "dict): for key, value in other.items(): self._set(key, value) elif isinstance(other, Session): for key,", "is not None and value is not None: dirty[key] = value else: for", "self._client = client self._fields: Dict[str, Field] = {} if fields is not None:", "in self._fields.keys()) + 1 for key, value in sorted(self._fields.items(), key=lambda x: x[0]): text", "self._set(\"encryption\", value, commit=True) else: raise ValueError( \"Not a valid encryption, can only be", "ValueError(\"Cannot update with supplied data\") def update(self, other: Union[Dict[str, Any], \"Session\"]) -> None:", "to the private ``Session()._fields``, keys are stored with underscore. \"\"\" def __init__(self, client:", "self._commit() def keys(self) -> Generator[str, None, None]: \"\"\" session keys with underscore (eg:", "ValueError( \"Not a valid encryption, can only be one of ['required', 'preferred', 'tolerated']\"", "= None, value: Any = None) -> None: \"\"\"submit all dirty field to", "dict-like class holding the session data for a Transmission daemon. Access the session", "isinstance(location, str) and location: self._set(\"download_dir\", location, True) else: raise TypeError(f\"{location!r} if not a", "Dict, Tuple, Union, Generator from typing_extensions import Literal from transmission_rpc.lib_types import Field if", "-> Generator[str, None, None]: \"\"\" session keys with underscore (eg: ``download_dir``) \"\"\" yield", "= None): self._client = client self._fields: Dict[str, Field] = {} if fields is", "self.__getattr__(\"version\") @property def rpc_version(self) -> int: \"\"\" - rpc version 4 - transmission", "session, call ``.update(data)`` .. code-block:: python session = Client().get_session() session.update({'k1': 'v1', \"k2\": \"v2\"})", "from e def _set(self, key: str, value: Any, commit: bool = False) ->", "- transmission version 1.60\"\"\" return self.__getattr__(\"pex_enabled\") @pex_enabled.setter def pex_enabled(self, enabled: bool) -> None:", "isinstance(other, Session): for key, value in other._fields.items(): self._set(key, value.value) else: raise ValueError(\"Cannot update", "pair hyphen in key is replace by underscore. (eg: ``'download_dir'``) \"\"\" for key,", "session.download_dir there are also setter like ``Session().download_dir = '/path/to/download'`` .. code-block:: python session", "RPC specification, but with underscore instead of hyphen. get ``'download-dir'`` with ``session.download_dir``. ..", "fields is not None: self._update(fields) def __getattr__(self, name: str) -> Any: try: return", "else: raise ValueError(\"Not a valid limit\") @property def pex_enabled(self) -> bool: \"\"\"Is peer", "str: text = \"\" max_length = max(len(x) for x in self._fields.keys()) + 1", "= False) -> None: key = key.replace(\"-\", \"_\") current_field = self._fields.get(key) if current_field", "return self.__getattr__(\"rpc_version\") @property def peer_port(self) -> int: \"\"\"Get the peer port. - rpc", "-> str: \"\"\"default download location - rpc version 12 - transmission version 2.20", "+ 1 for key, value in sorted(self._fields.items(), key=lambda x: x[0]): text += f\"{key.ljust(max_length)}:", "commit=True) else: raise ValueError( \"Not a valid encryption, can only be one of", "update with supplied data\") def update(self, other: Union[Dict[str, Any], \"Session\"]) -> None: \"\"\"", "Generator[str, None, None]: \"\"\" session keys with underscore (eg: ``download_dir``) \"\"\" yield from", "be done through attributes. The attributes available are the same as the session", "Transmission JSON-RPC arguments dictionary \"\"\" self._update(other) self._commit() def keys(self) -> Generator[str, None, None]:", "port. - rpc version 5 - transmission version 1.60 \"\"\" if isinstance(port, int):", "hyphen. get ``'download-dir'`` with ``session.download_dir``. .. code-block:: python session = Client().get_session() current =", "@property def version(self) -> str: \"\"\" - rpc version 3 - transmission version", "Field(value, True) if commit: self._commit(key, value) def __str__(self) -> str: text = \"\"", "None: self._fields[key] = Field(value, True) else: if current_field.value != value: self._fields[key] = Field(value,", "None]: for value in self._fields.values(): yield value.value def items(self) -> Generator[Tuple[str, Any], None,", "in self._fields.values(): yield value.value def items(self) -> Generator[Tuple[str, Any], None, None]: \"\"\" iter", "value: self._fields[key] = Field(value, True) if commit: self._commit(key, value) def __str__(self) -> str:", "JSON-RPC arguments dictionary \"\"\" self._update(other) self._commit() def keys(self) -> Generator[str, None, None]: \"\"\"", "def rpc_version(self) -> int: \"\"\" - rpc version 4 - transmission version 1.50", "1.50 \"\"\" return self.__getattr__(\"rpc_version\") @property def peer_port(self) -> int: \"\"\"Get the peer port.", "(c) 2018-2021 Trim21 <<EMAIL>> # Copyright (c) 2008-2014 <NAME> <<EMAIL>> # Licensed under", "call ``.update(data)`` .. code-block:: python session = Client().get_session() session.update({'k1': 'v1', \"k2\": \"v2\"}) if", "is None: self._fields[key] = Field(value, True) else: if current_field.value != value: self._fields[key] =", "- rpc version 5 - transmission version 1.60 \"\"\" return self.__getattr__(\"peer_port\") @peer_port.setter def", "Copyright (c) 2008-2014 <NAME> <<EMAIL>> # Licensed under the MIT license. from typing", "-> bool: \"\"\"Is peer exchange enabled - rpc version 5 - transmission version", "valid limit\") @property def pex_enabled(self) -> bool: \"\"\"Is peer exchange enabled - rpc", "other: Union[Dict[str, Any], \"Session\"]) -> None: \"\"\" Update the session data from a", "dictionary \"\"\" self._update(other) self._commit() def keys(self) -> Generator[str, None, None]: \"\"\" session keys", "items(self) -> Generator[Tuple[str, Any], None, None]: \"\"\" iter key,value pair hyphen in key", "python session = Client().get_session() current = session.download_dir there are also setter like ``Session().download_dir", "enabled - rpc version 5 - transmission version 1.60\"\"\" return self.__getattr__(\"pex_enabled\") @pex_enabled.setter def", "str: \"\"\" - rpc version 3 - transmission version 1.41 \"\"\" return self.__getattr__(\"version\")", "raise ValueError(\"Not a valid limit\") @property def pex_enabled(self) -> bool: \"\"\"Is peer exchange", "self._set(\"peer_port\", port, True) else: raise ValueError(\"Not a valid limit\") @property def pex_enabled(self) ->", "key: str, value: Any, commit: bool = False) -> None: key = key.replace(\"-\",", "-> int: \"\"\" - rpc version 4 - transmission version 1.50 \"\"\" return", "access to the private ``Session()._fields``, keys are stored with underscore. \"\"\" def __init__(self,", "peer_port(self, port: int) -> None: \"\"\"Set the peer port. - rpc version 5", "iter key,value pair hyphen in key is replace by underscore. (eg: ``'download_dir'``) \"\"\"", "for key, field in self._fields.items(): yield key, field.value @property def download_dir(self) -> str:", "with ``session.download_dir``. .. code-block:: python session = Client().get_session() current = session.download_dir there are", "transmission version 1.60\"\"\" return self.__getattr__(\"pex_enabled\") @pex_enabled.setter def pex_enabled(self, enabled: bool) -> None: \"\"\"Enable/disable", "for key, value in other._fields.items(): self._set(key, value.value) else: raise ValueError(\"Cannot update with supplied", "underscore. (eg: ``'download_dir'``) \"\"\" for key, field in self._fields.items(): yield key, field.value @property", "also setter like ``Session().download_dir = '/path/to/download'`` .. code-block:: python session = Client().get_session() session.download_dir", "to client\"\"\" dirty = {} if key is not None and value is", "2008-2014 <NAME> <<EMAIL>> # Licensed under the MIT license. from typing import TYPE_CHECKING,", "version 5 - transmission version 1.60\"\"\" return self.__getattr__(\"pex_enabled\") @pex_enabled.setter def pex_enabled(self, enabled: bool)", "the session field can be done through attributes. The attributes available are the", "try: return self._fields[name].value except KeyError as e: raise AttributeError(f\"No attribute {name}\") from e", "text += f\"{key.ljust(max_length)}: {value.value!r}\\n\" return text def _commit(self, key: str = None, value:", "value) elif isinstance(other, Session): for key, value in other._fields.items(): self._set(key, value.value) else: raise", "rpc version 4 - transmission version 1.50 \"\"\" return self.__getattr__(\"rpc_version\") @property def peer_port(self)", "``Session().download_dir = '/path/to/download'`` .. code-block:: python session = Client().get_session() session.download_dir = '/path/to/new/download/dir' if", "Session is a dict-like class holding the session data for a Transmission daemon.", "``.update(data)`` .. code-block:: python session = Client().get_session() session.update({'k1': 'v1', \"k2\": \"v2\"}) if you", "5 - transmission version 1.60\"\"\" return self.__getattr__(\"pex_enabled\") @pex_enabled.setter def pex_enabled(self, enabled: bool) ->", "the same as the session arguments in the Transmission RPC specification, but with", "-> int: \"\"\"Get the peer port. - rpc version 5 - transmission version", "max_length = max(len(x) for x in self._fields.keys()) + 1 for key, value in", "else: if current_field.value != value: self._fields[key] = Field(value, True) if commit: self._commit(key, value)", "int) -> None: \"\"\"Set the peer port. - rpc version 5 - transmission", "for a Transmission daemon. Access the session field can be done through attributes.", "isinstance(enabled, bool): self._set(\"pex_enabled\", enabled, True) else: raise TypeError(\"Not a valid type\") @property def", "= value else: for k, v in self._fields.items(): if v.dirty: dirty[k] = v.value", "with underscore. \"\"\" def __init__(self, client: \"Client\", fields: Dict[str, Any] = None): self._client", "Any], None, None]: \"\"\" iter key,value pair hyphen in key is replace by", "get ``'download-dir'`` with ``session.download_dir``. .. code-block:: python session = Client().get_session() current = session.download_dir", "key=lambda x: x[0]): text += f\"{key.ljust(max_length)}: {value.value!r}\\n\" return text def _commit(self, key: str", "data for a Transmission daemon. Access the session field can be done through", "Access the session field can be done through attributes. The attributes available are", "download_dir(self) -> str: \"\"\"default download location - rpc version 12 - transmission version", "\"Session\"]) -> None: if isinstance(other, dict): for key, value in other.items(): self._set(key, value)", "isinstance(port, int): self._set(\"peer_port\", port, True) else: raise ValueError(\"Not a valid limit\") @property def", "current_field = self._fields.get(key) if current_field is None: self._fields[key] = Field(value, True) else: if", "python session = Client().get_session() session.download_dir = '/path/to/new/download/dir' if you want to batch update", "keys(self) -> Generator[str, None, None]: \"\"\" session keys with underscore (eg: ``download_dir``) \"\"\"", "Any, commit: bool = False) -> None: key = key.replace(\"-\", \"_\") current_field =", "not None: dirty[key] = value else: for k, v in self._fields.items(): if v.dirty:", "MIT license. from typing import TYPE_CHECKING, Any, Dict, Tuple, Union, Generator from typing_extensions", "download location - rpc version 12 - transmission version 2.20 :return: \"\"\" return", "True) else: raise TypeError(\"Not a valid type\") @property def encryption(self) -> str: return", "all dirty field to client\"\"\" dirty = {} if key is not None", "location: self._set(\"download_dir\", location, True) else: raise TypeError(f\"{location!r} if not a valid 'download-dir'\") @property", "session data from a Transmission JSON-RPC arguments dictionary \"\"\" self._update(other) self._commit() def keys(self)", "None: \"\"\"Set the peer port. - rpc version 5 - transmission version 1.60", "encryption(self) -> str: return self.__getattr__(\"encryption\") @encryption.setter def encryption(self, value: Literal[\"required\", \"preferred\", \"tolerated\"]) ->", "<<EMAIL>> # Copyright (c) 2008-2014 <NAME> <<EMAIL>> # Licensed under the MIT license.", "\"\"\" return self.__getattr__(\"download_dir\") @download_dir.setter def download_dir(self, location: str) -> None: \"\"\"Enable/disable peer exchange.\"\"\"", "client\"\"\" dirty = {} if key is not None and value is not", "not a valid 'download-dir'\") @property def version(self) -> str: \"\"\" - rpc version", "underscore. \"\"\" def __init__(self, client: \"Client\", fields: Dict[str, Any] = None): self._client =", "\"_\") current_field = self._fields.get(key) if current_field is None: self._fields[key] = Field(value, True) else:", "have to access to the private ``Session()._fields``, keys are stored with underscore. \"\"\"", "(c) 2008-2014 <NAME> <<EMAIL>> # Licensed under the MIT license. from typing import", "-> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(location, str) and location: self._set(\"download_dir\", location, True)", "import TYPE_CHECKING, Any, Dict, Tuple, Union, Generator from typing_extensions import Literal from transmission_rpc.lib_types", "of hyphen. get ``'download-dir'`` with ``session.download_dir``. .. code-block:: python session = Client().get_session() current", "def encryption(self) -> str: return self.__getattr__(\"encryption\") @encryption.setter def encryption(self, value: Literal[\"required\", \"preferred\", \"tolerated\"])", "str) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(location, str) and location: self._set(\"download_dir\", location,", "attribute {name}\") from e def _set(self, key: str, value: Any, commit: bool =", "a dict-like class holding the session data for a Transmission daemon. Access the", "a valid 'download-dir'\") @property def version(self) -> str: \"\"\" - rpc version 3", "import Literal from transmission_rpc.lib_types import Field if TYPE_CHECKING: from transmission_rpc.client import Client class", "Transmission RPC specification, but with underscore instead of hyphen. get ``'download-dir'`` with ``session.download_dir``.", "is not None: self._update(fields) def __getattr__(self, name: str) -> Any: try: return self._fields[name].value", "# Copyright (c) 2008-2014 <NAME> <<EMAIL>> # Licensed under the MIT license. from", "value, commit=True) else: raise ValueError( \"Not a valid encryption, can only be one", "-> None: \"\"\"submit all dirty field to client\"\"\" dirty = {} if key", "def __getattr__(self, name: str) -> Any: try: return self._fields[name].value except KeyError as e:", "dirty = {} if key is not None and value is not None:", "underscore instead of hyphen. get ``'download-dir'`` with ``session.download_dir``. .. code-block:: python session =", "enabled: bool) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(enabled, bool): self._set(\"pex_enabled\", enabled, True)", "{name}\") from e def _set(self, key: str, value: Any, commit: bool = False)", "True) if commit: self._commit(key, value) def __str__(self) -> str: text = \"\" max_length", "session = Client().get_session() current = session.download_dir there are also setter like ``Session().download_dir =", "in sorted(self._fields.items(), key=lambda x: x[0]): text += f\"{key.ljust(max_length)}: {value.value!r}\\n\" return text def _commit(self,", "``Session()._fields``, keys are stored with underscore. \"\"\" def __init__(self, client: \"Client\", fields: Dict[str,", "the peer port. - rpc version 5 - transmission version 1.60 \"\"\" return", "class holding the session data for a Transmission daemon. Access the session field", "import Field if TYPE_CHECKING: from transmission_rpc.client import Client class Session: \"\"\" Session is", "transmission_rpc.client import Client class Session: \"\"\" Session is a dict-like class holding the", "version 1.50 \"\"\" return self.__getattr__(\"rpc_version\") @property def peer_port(self) -> int: \"\"\"Get the peer", "exchange.\"\"\" if isinstance(enabled, bool): self._set(\"pex_enabled\", enabled, True) else: raise TypeError(\"Not a valid type\")", "{} if fields is not None: self._update(fields) def __getattr__(self, name: str) -> Any:", "if you want to batch update a session, call ``.update(data)`` .. code-block:: python", "port, True) else: raise ValueError(\"Not a valid limit\") @property def pex_enabled(self) -> bool:", "through attributes. The attributes available are the same as the session arguments in", "under the MIT license. from typing import TYPE_CHECKING, Any, Dict, Tuple, Union, Generator", "value in self._fields.values(): yield value.value def items(self) -> Generator[Tuple[str, Any], None, None]: \"\"\"", "in self._fields.items(): yield key, field.value @property def download_dir(self) -> str: \"\"\"default download location", "= key.replace(\"-\", \"_\") current_field = self._fields.get(key) if current_field is None: self._fields[key] = Field(value,", "@property def encryption(self) -> str: return self.__getattr__(\"encryption\") @encryption.setter def encryption(self, value: Literal[\"required\", \"preferred\",", "- transmission version 1.60 \"\"\" if isinstance(port, int): self._set(\"peer_port\", port, True) else: raise", "2018-2021 Trim21 <<EMAIL>> # Copyright (c) 2008-2014 <NAME> <<EMAIL>> # Licensed under the", "self._fields.items(): yield key, field.value @property def download_dir(self) -> str: \"\"\"default download location -", "a valid type\") @property def encryption(self) -> str: return self.__getattr__(\"encryption\") @encryption.setter def encryption(self,", "def keys(self) -> Generator[str, None, None]: \"\"\" session keys with underscore (eg: ``download_dir``)", "1.41 \"\"\" return self.__getattr__(\"version\") @property def rpc_version(self) -> int: \"\"\" - rpc version", "None]: \"\"\" session keys with underscore (eg: ``download_dir``) \"\"\" yield from self._fields.keys() def", "transmission version 1.60 \"\"\" return self.__getattr__(\"peer_port\") @peer_port.setter def peer_port(self, port: int) -> None:", "can be done through attributes. The attributes available are the same as the", "= Field(value, True) else: if current_field.value != value: self._fields[key] = Field(value, True) if", "version 5 - transmission version 1.60 \"\"\" if isinstance(port, int): self._set(\"peer_port\", port, True)", "dirty field to client\"\"\" dirty = {} if key is not None and", "None): self._client = client self._fields: Dict[str, Field] = {} if fields is not", "Field(value, True) else: if current_field.value != value: self._fields[key] = Field(value, True) if commit:", "self._fields[key] = Field(value, True) else: if current_field.value != value: self._fields[key] = Field(value, True)", "x[0]): text += f\"{key.ljust(max_length)}: {value.value!r}\\n\" return text def _commit(self, key: str = None,", "value in other.items(): self._set(key, value) elif isinstance(other, Session): for key, value in other._fields.items():", "if isinstance(enabled, bool): self._set(\"pex_enabled\", enabled, True) else: raise TypeError(\"Not a valid type\") @property", "from typing_extensions import Literal from transmission_rpc.lib_types import Field if TYPE_CHECKING: from transmission_rpc.client import", "None, None]: \"\"\" iter key,value pair hyphen in key is replace by underscore.", "peer port. - rpc version 5 - transmission version 1.60 \"\"\" return self.__getattr__(\"peer_port\")", "\"\"\"default download location - rpc version 12 - transmission version 2.20 :return: \"\"\"", "bool): self._set(\"pex_enabled\", enabled, True) else: raise TypeError(\"Not a valid type\") @property def encryption(self)", "code-block:: python session = Client().get_session() current = session.download_dir there are also setter like", "version 1.60 \"\"\" if isinstance(port, int): self._set(\"peer_port\", port, True) else: raise ValueError(\"Not a", "yield value.value def items(self) -> Generator[Tuple[str, Any], None, None]: \"\"\" iter key,value pair", "Any], \"Session\"]) -> None: if isinstance(other, dict): for key, value in other.items(): self._set(key,", "values(self) -> Generator[Any, None, None]: for value in self._fields.values(): yield value.value def items(self)", "in other.items(): self._set(key, value) elif isinstance(other, Session): for key, value in other._fields.items(): self._set(key,", "1 for key, value in sorted(self._fields.items(), key=lambda x: x[0]): text += f\"{key.ljust(max_length)}: {value.value!r}\\n\"", "other.items(): self._set(key, value) elif isinstance(other, Session): for key, value in other._fields.items(): self._set(key, value.value)", "str) -> Any: try: return self._fields[name].value except KeyError as e: raise AttributeError(f\"No attribute", "TYPE_CHECKING, Any, Dict, Tuple, Union, Generator from typing_extensions import Literal from transmission_rpc.lib_types import", "'v1', \"k2\": \"v2\"}) if you have to access to the private ``Session()._fields``, keys", "= client self._fields: Dict[str, Field] = {} if fields is not None: self._update(fields)", "Update the session data from a Transmission JSON-RPC arguments dictionary \"\"\" self._update(other) self._commit()", "- rpc version 3 - transmission version 1.41 \"\"\" return self.__getattr__(\"version\") @property def", "self._fields.keys()) + 1 for key, value in sorted(self._fields.items(), key=lambda x: x[0]): text +=", "``session.download_dir``. .. code-block:: python session = Client().get_session() current = session.download_dir there are also", "-> None: key = key.replace(\"-\", \"_\") current_field = self._fields.get(key) if current_field is None:", "self._fields[name].value except KeyError as e: raise AttributeError(f\"No attribute {name}\") from e def _set(self,", "else: raise TypeError(f\"{location!r} if not a valid 'download-dir'\") @property def version(self) -> str:", "None: dirty[key] = value else: for k, v in self._fields.items(): if v.dirty: dirty[k]", "you have to access to the private ``Session()._fields``, keys are stored with underscore.", "transmission_rpc.lib_types import Field if TYPE_CHECKING: from transmission_rpc.client import Client class Session: \"\"\" Session", "@pex_enabled.setter def pex_enabled(self, enabled: bool) -> None: \"\"\"Enable/disable peer exchange.\"\"\" if isinstance(enabled, bool):", "if commit: self._commit(key, value) def __str__(self) -> str: text = \"\" max_length =", "with underscore (eg: ``download_dir``) \"\"\" yield from self._fields.keys() def values(self) -> Generator[Any, None,", "Session: \"\"\" Session is a dict-like class holding the session data for a", "self._set(\"pex_enabled\", enabled, True) else: raise TypeError(\"Not a valid type\") @property def encryption(self) ->", "transmission version 2.20 :return: \"\"\" return self.__getattr__(\"download_dir\") @download_dir.setter def download_dir(self, location: str) ->", "\"Not a valid encryption, can only be one of ['required', 'preferred', 'tolerated']\" )", "name: str) -> Any: try: return self._fields[name].value except KeyError as e: raise AttributeError(f\"No", "self._fields.keys() def values(self) -> Generator[Any, None, None]: for value in self._fields.values(): yield value.value", "Any = None) -> None: \"\"\"submit all dirty field to client\"\"\" dirty =", "'/path/to/new/download/dir' if you want to batch update a session, call ``.update(data)`` .. code-block::", "k, v in self._fields.items(): if v.dirty: dirty[k] = v.value self._client.set_session(**dirty) def _update(self, other:", "transmission version 1.41 \"\"\" return self.__getattr__(\"version\") @property def rpc_version(self) -> int: \"\"\" -", "the session arguments in the Transmission RPC specification, but with underscore instead of", "self._fields: Dict[str, Field] = {} if fields is not None: self._update(fields) def __getattr__(self,", "None) -> None: \"\"\"submit all dirty field to client\"\"\" dirty = {} if", "@encryption.setter def encryption(self, value: Literal[\"required\", \"preferred\", \"tolerated\"]) -> None: if value in {\"required\",", "except KeyError as e: raise AttributeError(f\"No attribute {name}\") from e def _set(self, key:", "rpc version 5 - transmission version 1.60\"\"\" return self.__getattr__(\"pex_enabled\") @pex_enabled.setter def pex_enabled(self, enabled:", "def _commit(self, key: str = None, value: Any = None) -> None: \"\"\"submit", "holding the session data for a Transmission daemon. Access the session field can", "Generator[Tuple[str, Any], None, None]: \"\"\" iter key,value pair hyphen in key is replace", "AttributeError(f\"No attribute {name}\") from e def _set(self, key: str, value: Any, commit: bool", "rpc version 5 - transmission version 1.60 \"\"\" if isinstance(port, int): self._set(\"peer_port\", port,", "session.update({'k1': 'v1', \"k2\": \"v2\"}) if you have to access to the private ``Session()._fields``,", "- rpc version 4 - transmission version 1.50 \"\"\" return self.__getattr__(\"rpc_version\") @property def", "a session, call ``.update(data)`` .. code-block:: python session = Client().get_session() session.update({'k1': 'v1', \"k2\":", "5 - transmission version 1.60 \"\"\" if isinstance(port, int): self._set(\"peer_port\", port, True) else:", "there are also setter like ``Session().download_dir = '/path/to/download'`` .. code-block:: python session =", "client: \"Client\", fields: Dict[str, Any] = None): self._client = client self._fields: Dict[str, Field]", "peer exchange.\"\"\" if isinstance(location, str) and location: self._set(\"download_dir\", location, True) else: raise TypeError(f\"{location!r}", "field can be done through attributes. The attributes available are the same as", "f\"{key.ljust(max_length)}: {value.value!r}\\n\" return text def _commit(self, key: str = None, value: Any =", "\"\"\" def __init__(self, client: \"Client\", fields: Dict[str, Any] = None): self._client = client", "pex_enabled(self) -> bool: \"\"\"Is peer exchange enabled - rpc version 5 - transmission", "- transmission version 1.50 \"\"\" return self.__getattr__(\"rpc_version\") @property def peer_port(self) -> int: \"\"\"Get", "a Transmission daemon. Access the session field can be done through attributes. The", "\"k2\": \"v2\"}) if you have to access to the private ``Session()._fields``, keys are", "def download_dir(self) -> str: \"\"\"default download location - rpc version 12 - transmission", "raise ValueError( \"Not a valid encryption, can only be one of ['required', 'preferred',", "version 12 - transmission version 2.20 :return: \"\"\" return self.__getattr__(\"download_dir\") @download_dir.setter def download_dir(self,", "\"\"\" iter key,value pair hyphen in key is replace by underscore. (eg: ``'download_dir'``)", "<<EMAIL>> # Licensed under the MIT license. from typing import TYPE_CHECKING, Any, Dict,", "\"\"\" yield from self._fields.keys() def values(self) -> Generator[Any, None, None]: for value in" ]
[ "i in range(4): link = browser.links.find_by_partial_text('Hemisphere')[i] link.click() title = browser.find_by_css('.title').first.text url = browser.find_by_text('Sample').first['href']", "Image executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_image) browser.links.find_by_partial_text('FULL IMAGE').click()", "'https://twitter.com/marswxreport?lang=en' mars_facts = 'https://space-facts.com/mars/' mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results = {} # Mars News", "'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text') for i in t: if 'InSight' in i.text: i.a.decompose()", "range(4): link = browser.links.find_by_partial_text('Hemisphere')[i] link.click() title = browser.find_by_css('.title').first.text url = browser.find_by_text('Sample').first['href'] results[title] =", "= paragraph.text.strip() results['news_title'] = news_title results['news_text'] = news_paragraph # Mars Image executable_path =", "= pd.read_html(mars_facts) facts_df = facts_html[0] facts_df.columns = ['Description', 'Data'] facts_df.set_index('Description', inplace=True) html_table =", "= ['Description', 'Data'] facts_df.set_index('Description', inplace=True) html_table = facts_df.to_html() html_table = html_table.replace('\\n', '') results['table_html']", "# Mars Facts facts_html = pd.read_html(mars_facts) facts_df = facts_html[0] facts_df.columns = ['Description', 'Data']", "Mars Facts facts_html = pd.read_html(mars_facts) facts_df = facts_html[0] facts_df.columns = ['Description', 'Data'] facts_df.set_index('Description',", "import pandas as pd import time def scrape(): mars_news = 'https://mars.nasa.gov/news/' mars_image =", "html_table = html_table.replace('\\n', '') results['table_html'] = html_table # Mars Hemispheres executable_path = {'executable_path':", "news_pull = BeautifulSoup(response.text, 'lxml') title = news_pull.find('div', class_= 'content_title') news_title = title.a.text.strip() paragraph", "'/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_hemisphere) for i in range(4): link =", "Browser('chrome', **executable_path, headless=False) browser.visit(mars_hemisphere) for i in range(4): link = browser.links.find_by_partial_text('Hemisphere')[i] link.click() title", "for i in t: if 'InSight' in i.text: i.a.decompose() results['tweet'] = i.text break", "link = browser.links.find_by_partial_text('Hemisphere')[i] link.click() title = browser.find_by_css('.title').first.text url = browser.find_by_text('Sample').first['href'] results[title] = url", "requests.get(mars_news) time.sleep(3) news_pull = BeautifulSoup(response.text, 'lxml') title = news_pull.find('div', class_= 'content_title') news_title =", "for i in range(4): link = browser.links.find_by_partial_text('Hemisphere')[i] link.click() title = browser.find_by_css('.title').first.text url =", "t: if 'InSight' in i.text: i.a.decompose() results['tweet'] = i.text break # Mars Facts", "from bs4 import BeautifulSoup from splinter import Browser import requests import pymongo import", "response = requests.get(mars_news) time.sleep(3) news_pull = BeautifulSoup(response.text, 'lxml') title = news_pull.find('div', class_= 'content_title')", "{'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_image) browser.links.find_by_partial_text('FULL IMAGE').click() browser.links.find_by_partial_text('more info').click() featured_img_url", "news_title results['news_text'] = news_paragraph # Mars Image executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser =", "paragraph = news_pull.find('div', class_= 'rollover_description_inner') news_paragraph = paragraph.text.strip() results['news_title'] = news_title results['news_text'] =", "= news_title results['news_text'] = news_paragraph # Mars Image executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser", "browser.quit() # Mars Twitter response = requests.get(mars_twitter) tweet_pull = BeautifulSoup(response.text, 'lxml') t =", "'InSight' in i.text: i.a.decompose() results['tweet'] = i.text break # Mars Facts facts_html =", "mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter = 'https://twitter.com/marswxreport?lang=en' mars_facts = 'https://space-facts.com/mars/' mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results", "executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_hemisphere) for i in", "bs4 import BeautifulSoup from splinter import Browser import requests import pymongo import pandas", "class_= 'rollover_description_inner') news_paragraph = paragraph.text.strip() results['news_title'] = news_title results['news_text'] = news_paragraph # Mars", "t = tweet_pull.find_all('p', class_ = 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text') for i in t:", "headless=False) browser.visit(mars_hemisphere) for i in range(4): link = browser.links.find_by_partial_text('Hemisphere')[i] link.click() title = browser.find_by_css('.title').first.text", "in range(4): link = browser.links.find_by_partial_text('Hemisphere')[i] link.click() title = browser.find_by_css('.title').first.text url = browser.find_by_text('Sample').first['href'] results[title]", "'/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_image) browser.links.find_by_partial_text('FULL IMAGE').click() browser.links.find_by_partial_text('more info').click() featured_img_url =", "'rollover_description_inner') news_paragraph = paragraph.text.strip() results['news_title'] = news_title results['news_text'] = news_paragraph # Mars Image", "= 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text') for i in t: if 'InSight' in i.text:", "from splinter import Browser import requests import pymongo import pandas as pd import", "{} # Mars News response = requests.get(mars_news) time.sleep(3) news_pull = BeautifulSoup(response.text, 'lxml') title", "title.a.text.strip() paragraph = news_pull.find('div', class_= 'rollover_description_inner') news_paragraph = paragraph.text.strip() results['news_title'] = news_title results['news_text']", "response = requests.get(mars_twitter) tweet_pull = BeautifulSoup(response.text, 'lxml') t = tweet_pull.find_all('p', class_ = 'TweetTextSize", "pymongo import pandas as pd import time def scrape(): mars_news = 'https://mars.nasa.gov/news/' mars_image", "js-tweet-text tweet-text') for i in t: if 'InSight' in i.text: i.a.decompose() results['tweet'] =", "= facts_html[0] facts_df.columns = ['Description', 'Data'] facts_df.set_index('Description', inplace=True) html_table = facts_df.to_html() html_table =", "= requests.get(mars_news) time.sleep(3) news_pull = BeautifulSoup(response.text, 'lxml') title = news_pull.find('div', class_= 'content_title') news_title", "in i.text: i.a.decompose() results['tweet'] = i.text break # Mars Facts facts_html = pd.read_html(mars_facts)", "browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_image) browser.links.find_by_partial_text('FULL IMAGE').click() browser.links.find_by_partial_text('more info').click() featured_img_url = browser.find_by_css('.main_image').first['src']", "BeautifulSoup from splinter import Browser import requests import pymongo import pandas as pd", "= Browser('chrome', **executable_path, headless=False) browser.visit(mars_image) browser.links.find_by_partial_text('FULL IMAGE').click() browser.links.find_by_partial_text('more info').click() featured_img_url = browser.find_by_css('.main_image').first['src'] results['image_url']", "executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_image) browser.links.find_by_partial_text('FULL IMAGE').click() browser.links.find_by_partial_text('more", "class_= 'content_title') news_title = title.a.text.strip() paragraph = news_pull.find('div', class_= 'rollover_description_inner') news_paragraph = paragraph.text.strip()", "link.click() title = browser.find_by_css('.title').first.text url = browser.find_by_text('Sample').first['href'] results[title] = url browser.back() browser.quit() return", "# Mars Hemispheres executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_hemisphere)", "{'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_hemisphere) for i in range(4): link", "mars_twitter = 'https://twitter.com/marswxreport?lang=en' mars_facts = 'https://space-facts.com/mars/' mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results = {} #", "time def scrape(): mars_news = 'https://mars.nasa.gov/news/' mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter = 'https://twitter.com/marswxreport?lang=en' mars_facts", "import pymongo import pandas as pd import time def scrape(): mars_news = 'https://mars.nasa.gov/news/'", "'') results['table_html'] = html_table # Mars Hemispheres executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser =", "news_paragraph # Mars Image executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False)", "'lxml') t = tweet_pull.find_all('p', class_ = 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text') for i in", "= 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results = {} # Mars News response = requests.get(mars_news) time.sleep(3) news_pull", "browser.visit(mars_hemisphere) for i in range(4): link = browser.links.find_by_partial_text('Hemisphere')[i] link.click() title = browser.find_by_css('.title').first.text url", "'https://mars.nasa.gov/news/' mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter = 'https://twitter.com/marswxreport?lang=en' mars_facts = 'https://space-facts.com/mars/' mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'", "'lxml') title = news_pull.find('div', class_= 'content_title') news_title = title.a.text.strip() paragraph = news_pull.find('div', class_=", "tweet_pull.find_all('p', class_ = 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text') for i in t: if 'InSight'", "mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results = {} # Mars News response = requests.get(mars_news) time.sleep(3)", "mars_facts = 'https://space-facts.com/mars/' mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results = {} # Mars News response", "browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_hemisphere) for i in range(4): link = browser.links.find_by_partial_text('Hemisphere')[i]", "Mars Hemispheres executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_hemisphere) for", "= 'https://space-facts.com/mars/' mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results = {} # Mars News response =", "time.sleep(3) news_pull = BeautifulSoup(response.text, 'lxml') title = news_pull.find('div', class_= 'content_title') news_title = title.a.text.strip()", "Browser('chrome', **executable_path, headless=False) browser.visit(mars_image) browser.links.find_by_partial_text('FULL IMAGE').click() browser.links.find_by_partial_text('more info').click() featured_img_url = browser.find_by_css('.main_image').first['src'] results['image_url'] =", "scrape(): mars_news = 'https://mars.nasa.gov/news/' mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter = 'https://twitter.com/marswxreport?lang=en' mars_facts = 'https://space-facts.com/mars/'", "'Data'] facts_df.set_index('Description', inplace=True) html_table = facts_df.to_html() html_table = html_table.replace('\\n', '') results['table_html'] = html_table", "info').click() featured_img_url = browser.find_by_css('.main_image').first['src'] results['image_url'] = featured_img_url browser.quit() # Mars Twitter response =", "html_table = facts_df.to_html() html_table = html_table.replace('\\n', '') results['table_html'] = html_table # Mars Hemispheres", "= {} # Mars News response = requests.get(mars_news) time.sleep(3) news_pull = BeautifulSoup(response.text, 'lxml')", "title = news_pull.find('div', class_= 'content_title') news_title = title.a.text.strip() paragraph = news_pull.find('div', class_= 'rollover_description_inner')", "title = browser.find_by_css('.title').first.text url = browser.find_by_text('Sample').first['href'] results[title] = url browser.back() browser.quit() return results", "facts_html[0] facts_df.columns = ['Description', 'Data'] facts_df.set_index('Description', inplace=True) html_table = facts_df.to_html() html_table = html_table.replace('\\n',", "news_pull.find('div', class_= 'content_title') news_title = title.a.text.strip() paragraph = news_pull.find('div', class_= 'rollover_description_inner') news_paragraph =", "= BeautifulSoup(response.text, 'lxml') t = tweet_pull.find_all('p', class_ = 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text') for", "news_pull.find('div', class_= 'rollover_description_inner') news_paragraph = paragraph.text.strip() results['news_title'] = news_title results['news_text'] = news_paragraph #", "pd import time def scrape(): mars_news = 'https://mars.nasa.gov/news/' mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter =", "pd.read_html(mars_facts) facts_df = facts_html[0] facts_df.columns = ['Description', 'Data'] facts_df.set_index('Description', inplace=True) html_table = facts_df.to_html()", "= browser.find_by_css('.main_image').first['src'] results['image_url'] = featured_img_url browser.quit() # Mars Twitter response = requests.get(mars_twitter) tweet_pull", "**executable_path, headless=False) browser.visit(mars_hemisphere) for i in range(4): link = browser.links.find_by_partial_text('Hemisphere')[i] link.click() title =", "import Browser import requests import pymongo import pandas as pd import time def", "'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results = {} # Mars News response = requests.get(mars_news) time.sleep(3) news_pull =", "'content_title') news_title = title.a.text.strip() paragraph = news_pull.find('div', class_= 'rollover_description_inner') news_paragraph = paragraph.text.strip() results['news_title']", "= Browser('chrome', **executable_path, headless=False) browser.visit(mars_hemisphere) for i in range(4): link = browser.links.find_by_partial_text('Hemisphere')[i] link.click()", "'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter = 'https://twitter.com/marswxreport?lang=en' mars_facts = 'https://space-facts.com/mars/' mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results = {}", "BeautifulSoup(response.text, 'lxml') title = news_pull.find('div', class_= 'content_title') news_title = title.a.text.strip() paragraph = news_pull.find('div',", "= featured_img_url browser.quit() # Mars Twitter response = requests.get(mars_twitter) tweet_pull = BeautifulSoup(response.text, 'lxml')", "BeautifulSoup(response.text, 'lxml') t = tweet_pull.find_all('p', class_ = 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text') for i", "= {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_hemisphere) for i in range(4):", "i.a.decompose() results['tweet'] = i.text break # Mars Facts facts_html = pd.read_html(mars_facts) facts_df =", "break # Mars Facts facts_html = pd.read_html(mars_facts) facts_df = facts_html[0] facts_df.columns = ['Description',", "tweet_pull = BeautifulSoup(response.text, 'lxml') t = tweet_pull.find_all('p', class_ = 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text')", "facts_df.columns = ['Description', 'Data'] facts_df.set_index('Description', inplace=True) html_table = facts_df.to_html() html_table = html_table.replace('\\n', '')", "facts_html = pd.read_html(mars_facts) facts_df = facts_html[0] facts_df.columns = ['Description', 'Data'] facts_df.set_index('Description', inplace=True) html_table", "= news_pull.find('div', class_= 'rollover_description_inner') news_paragraph = paragraph.text.strip() results['news_title'] = news_title results['news_text'] = news_paragraph", "= news_paragraph # Mars Image executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path,", "inplace=True) html_table = facts_df.to_html() html_table = html_table.replace('\\n', '') results['table_html'] = html_table # Mars", "Browser import requests import pymongo import pandas as pd import time def scrape():", "= i.text break # Mars Facts facts_html = pd.read_html(mars_facts) facts_df = facts_html[0] facts_df.columns", "Twitter response = requests.get(mars_twitter) tweet_pull = BeautifulSoup(response.text, 'lxml') t = tweet_pull.find_all('p', class_ =", "<reponame>neilhsu70/web-scraping-challenge from bs4 import BeautifulSoup from splinter import Browser import requests import pymongo", "news_paragraph = paragraph.text.strip() results['news_title'] = news_title results['news_text'] = news_paragraph # Mars Image executable_path", "featured_img_url = browser.find_by_css('.main_image').first['src'] results['image_url'] = featured_img_url browser.quit() # Mars Twitter response = requests.get(mars_twitter)", "['Description', 'Data'] facts_df.set_index('Description', inplace=True) html_table = facts_df.to_html() html_table = html_table.replace('\\n', '') results['table_html'] =", "def scrape(): mars_news = 'https://mars.nasa.gov/news/' mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter = 'https://twitter.com/marswxreport?lang=en' mars_facts =", "= html_table.replace('\\n', '') results['table_html'] = html_table # Mars Hemispheres executable_path = {'executable_path': '/usr/local/bin/chromedriver'}", "import time def scrape(): mars_news = 'https://mars.nasa.gov/news/' mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter = 'https://twitter.com/marswxreport?lang=en'", "i.text break # Mars Facts facts_html = pd.read_html(mars_facts) facts_df = facts_html[0] facts_df.columns =", "= tweet_pull.find_all('p', class_ = 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text') for i in t: if", "requests import pymongo import pandas as pd import time def scrape(): mars_news =", "class_ = 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text') for i in t: if 'InSight' in", "mars_news = 'https://mars.nasa.gov/news/' mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter = 'https://twitter.com/marswxreport?lang=en' mars_facts = 'https://space-facts.com/mars/' mars_hemisphere", "= facts_df.to_html() html_table = html_table.replace('\\n', '') results['table_html'] = html_table # Mars Hemispheres executable_path", "import BeautifulSoup from splinter import Browser import requests import pymongo import pandas as", "= 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter = 'https://twitter.com/marswxreport?lang=en' mars_facts = 'https://space-facts.com/mars/' mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results =", "if 'InSight' in i.text: i.a.decompose() results['tweet'] = i.text break # Mars Facts facts_html", "i.text: i.a.decompose() results['tweet'] = i.text break # Mars Facts facts_html = pd.read_html(mars_facts) facts_df", "browser.links.find_by_partial_text('Hemisphere')[i] link.click() title = browser.find_by_css('.title').first.text url = browser.find_by_text('Sample').first['href'] results[title] = url browser.back() browser.quit()", "= {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_image) browser.links.find_by_partial_text('FULL IMAGE').click() browser.links.find_by_partial_text('more info').click()", "results['tweet'] = i.text break # Mars Facts facts_html = pd.read_html(mars_facts) facts_df = facts_html[0]", "= 'https://mars.nasa.gov/news/' mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter = 'https://twitter.com/marswxreport?lang=en' mars_facts = 'https://space-facts.com/mars/' mars_hemisphere =", "results['table_html'] = html_table # Mars Hemispheres executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome',", "Mars News response = requests.get(mars_news) time.sleep(3) news_pull = BeautifulSoup(response.text, 'lxml') title = news_pull.find('div',", "paragraph.text.strip() results['news_title'] = news_title results['news_text'] = news_paragraph # Mars Image executable_path = {'executable_path':", "TweetTextSize--normal js-tweet-text tweet-text') for i in t: if 'InSight' in i.text: i.a.decompose() results['tweet']", "browser.links.find_by_partial_text('FULL IMAGE').click() browser.links.find_by_partial_text('more info').click() featured_img_url = browser.find_by_css('.main_image').first['src'] results['image_url'] = featured_img_url browser.quit() # Mars", "= html_table # Mars Hemispheres executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path,", "news_title = title.a.text.strip() paragraph = news_pull.find('div', class_= 'rollover_description_inner') news_paragraph = paragraph.text.strip() results['news_title'] =", "splinter import Browser import requests import pymongo import pandas as pd import time", "= 'https://twitter.com/marswxreport?lang=en' mars_facts = 'https://space-facts.com/mars/' mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results = {} # Mars", "results['image_url'] = featured_img_url browser.quit() # Mars Twitter response = requests.get(mars_twitter) tweet_pull = BeautifulSoup(response.text,", "Mars Twitter response = requests.get(mars_twitter) tweet_pull = BeautifulSoup(response.text, 'lxml') t = tweet_pull.find_all('p', class_", "= news_pull.find('div', class_= 'content_title') news_title = title.a.text.strip() paragraph = news_pull.find('div', class_= 'rollover_description_inner') news_paragraph", "**executable_path, headless=False) browser.visit(mars_image) browser.links.find_by_partial_text('FULL IMAGE').click() browser.links.find_by_partial_text('more info').click() featured_img_url = browser.find_by_css('.main_image').first['src'] results['image_url'] = featured_img_url", "featured_img_url browser.quit() # Mars Twitter response = requests.get(mars_twitter) tweet_pull = BeautifulSoup(response.text, 'lxml') t", "results['news_text'] = news_paragraph # Mars Image executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome',", "= title.a.text.strip() paragraph = news_pull.find('div', class_= 'rollover_description_inner') news_paragraph = paragraph.text.strip() results['news_title'] = news_title", "browser.find_by_css('.main_image').first['src'] results['image_url'] = featured_img_url browser.quit() # Mars Twitter response = requests.get(mars_twitter) tweet_pull =", "Facts facts_html = pd.read_html(mars_facts) facts_df = facts_html[0] facts_df.columns = ['Description', 'Data'] facts_df.set_index('Description', inplace=True)", "facts_df = facts_html[0] facts_df.columns = ['Description', 'Data'] facts_df.set_index('Description', inplace=True) html_table = facts_df.to_html() html_table", "html_table # Mars Hemispheres executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False)", "import requests import pymongo import pandas as pd import time def scrape(): mars_news", "as pd import time def scrape(): mars_news = 'https://mars.nasa.gov/news/' mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' mars_twitter", "Mars Image executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_image) browser.links.find_by_partial_text('FULL", "i in t: if 'InSight' in i.text: i.a.decompose() results['tweet'] = i.text break #", "= browser.links.find_by_partial_text('Hemisphere')[i] link.click() title = browser.find_by_css('.title').first.text url = browser.find_by_text('Sample').first['href'] results[title] = url browser.back()", "html_table.replace('\\n', '') results['table_html'] = html_table # Mars Hemispheres executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser", "= requests.get(mars_twitter) tweet_pull = BeautifulSoup(response.text, 'lxml') t = tweet_pull.find_all('p', class_ = 'TweetTextSize TweetTextSize--normal", "results = {} # Mars News response = requests.get(mars_news) time.sleep(3) news_pull = BeautifulSoup(response.text,", "requests.get(mars_twitter) tweet_pull = BeautifulSoup(response.text, 'lxml') t = tweet_pull.find_all('p', class_ = 'TweetTextSize TweetTextSize--normal js-tweet-text", "'https://space-facts.com/mars/' mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' results = {} # Mars News response = requests.get(mars_news)", "IMAGE').click() browser.links.find_by_partial_text('more info').click() featured_img_url = browser.find_by_css('.main_image').first['src'] results['image_url'] = featured_img_url browser.quit() # Mars Twitter", "tweet-text') for i in t: if 'InSight' in i.text: i.a.decompose() results['tweet'] = i.text", "# Mars News response = requests.get(mars_news) time.sleep(3) news_pull = BeautifulSoup(response.text, 'lxml') title =", "# Mars Twitter response = requests.get(mars_twitter) tweet_pull = BeautifulSoup(response.text, 'lxml') t = tweet_pull.find_all('p',", "headless=False) browser.visit(mars_image) browser.links.find_by_partial_text('FULL IMAGE').click() browser.links.find_by_partial_text('more info').click() featured_img_url = browser.find_by_css('.main_image').first['src'] results['image_url'] = featured_img_url browser.quit()", "in t: if 'InSight' in i.text: i.a.decompose() results['tweet'] = i.text break # Mars", "browser.links.find_by_partial_text('more info').click() featured_img_url = browser.find_by_css('.main_image').first['src'] results['image_url'] = featured_img_url browser.quit() # Mars Twitter response", "= BeautifulSoup(response.text, 'lxml') title = news_pull.find('div', class_= 'content_title') news_title = title.a.text.strip() paragraph =", "browser.visit(mars_image) browser.links.find_by_partial_text('FULL IMAGE').click() browser.links.find_by_partial_text('more info').click() featured_img_url = browser.find_by_css('.main_image').first['src'] results['image_url'] = featured_img_url browser.quit() #", "facts_df.set_index('Description', inplace=True) html_table = facts_df.to_html() html_table = html_table.replace('\\n', '') results['table_html'] = html_table #", "Hemispheres executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_hemisphere) for i", "pandas as pd import time def scrape(): mars_news = 'https://mars.nasa.gov/news/' mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'", "# Mars Image executable_path = {'executable_path': '/usr/local/bin/chromedriver'} browser = Browser('chrome', **executable_path, headless=False) browser.visit(mars_image)", "News response = requests.get(mars_news) time.sleep(3) news_pull = BeautifulSoup(response.text, 'lxml') title = news_pull.find('div', class_=", "facts_df.to_html() html_table = html_table.replace('\\n', '') results['table_html'] = html_table # Mars Hemispheres executable_path =", "results['news_title'] = news_title results['news_text'] = news_paragraph # Mars Image executable_path = {'executable_path': '/usr/local/bin/chromedriver'}" ]
[ "self._fields.append(arg_field) # Width total_padding = padding * (len(self._fields) + 1) self._total_width = sum(f['width']", "== 'n+': evtype_fg_colour = self._config['trigger']['note_on_colour'] elif evtype == 'h': evtype_fg_colour = self._config['trigger']['hit_colour'] elif", "evtype_fg_colour = self._config['trigger']['hit_colour'] elif evtype == 'n-': evtype_fg_colour = self._config['trigger']['note_off_colour'] else: evtype_fg_colour =", "http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under law, Kunquat Affirmers have waived", "else: evtype_fg_colour = self._config['trigger']['default_colour'] # Set colours painter.save() if select: height = self._config['tr_height']", "cents = float(expr) name = self._notation.get_full_name(cents) except ValueError: return expr if name: return", "self._fields[index]['width'] return (offset, width) def get_total_width(self): return self._total_width def draw_trigger(self, painter, include_line=True, select=False):", "return { 'offset': offset, 'width': metrics.boundingRect(vis_text).width(), 'text': vis_text, } def _get_note_vis_name(self, expr): try:", "of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent", "self._get_note_vis_name(expr) note_field = self._make_field_data(padding, note_name) self._fields.append(note_field) elif evtype == 'n-': vis_text = u'══'", "self._notation = notation self._setup_fields() def get_field_count(self): return len(self._fields) def get_field_bounds(self, index): offset =", "rights to Kunquat. # from PyQt4.QtCore import * from PyQt4.QtGui import * from", "Finland 2014 # # This file is part of Kunquat. # # CC0", "To the extent possible under law, Kunquat Affirmers have waived all # copyright", "!= None: if self._trigger.get_argument_type() == 'note': vis_text = self._get_note_vis_name(expr) else: vis_text = expr", "config self._trigger = trigger self._notation = notation self._setup_fields() def get_field_count(self): return len(self._fields) def", "# This file is part of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/", "self._config['trigger']['hit_colour'] elif evtype == 'n-': evtype_fg_colour = self._config['trigger']['note_off_colour'] else: evtype_fg_colour = self._config['trigger']['default_colour'] #", "have waived all # copyright and related or neighboring rights to Kunquat. #", "= notation self._setup_fields() def get_field_count(self): return len(self._fields) def get_field_bounds(self, index): offset = self._fields[index]['offset']", "and related or neighboring rights to Kunquat. # from PyQt4.QtCore import * from", "note_field = self._make_field_data(padding, note_name) self._fields.append(note_field) elif evtype == 'n-': vis_text = u'══' note_off_field", "Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under law, Kunquat Affirmers have", "1, height - 1), evtype_fg_colour) painter.setPen(self._config['bg_colour'] if select else evtype_fg_colour) # Draw fields", "Author: <NAME>, Finland 2014 # # This file is part of Kunquat. #", "== 'h': evtype_fg_colour = self._config['trigger']['hit_colour'] elif evtype == 'n-': evtype_fg_colour = self._config['trigger']['note_off_colour'] else:", "evtype_fg_colour = self._config['trigger']['note_off_colour'] else: evtype_fg_colour = self._config['trigger']['default_colour'] # Set colours painter.save() if select:", "'offset': offset, 'width': metrics.boundingRect(vis_text).width(), 'text': vis_text, } def _get_note_vis_name(self, expr): try: cents =", "def get_total_width(self): return self._total_width def draw_trigger(self, painter, include_line=True, select=False): # Select colour based", "PyQt4.QtGui import * from config import * class TriggerRenderer(): def __init__(self, config, trigger,", "return self._total_width def draw_trigger(self, painter, include_line=True, select=False): # Select colour based on event", "from PyQt4.QtGui import * from config import * class TriggerRenderer(): def __init__(self, config,", "index): offset = self._fields[index]['offset'] width = self._fields[index]['width'] return (offset, width) def get_total_width(self): return", "# # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under", "arg_field = self._make_field_data( type_field['offset'] + type_field['width'] + padding, vis_text) self._fields.append(arg_field) # Width total_padding", "self._get_note_vis_name(expr) else: vis_text = expr arg_field = self._make_field_data( type_field['offset'] + type_field['width'] + padding,", "0)) painter.restore() def _make_field_data(self, offset, vis_text): metrics = self._config['font_metrics'] return { 'offset': offset,", "'note': vis_text = self._get_note_vis_name(expr) else: vis_text = expr arg_field = self._make_field_data( type_field['offset'] +", "Draw line only if not obscured by cursor if include_line: painter.save() painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0,", "try: cents = float(expr) name = self._notation.get_full_name(cents) except ValueError: return expr if name:", "self._config['tr_height'] painter.fillRect( QRect(0, 0, self._total_width - 1, height - 1), evtype_fg_colour) painter.setPen(self._config['bg_colour'] if", "'n+': note_name = self._get_note_vis_name(expr) note_field = self._make_field_data(padding, note_name) self._fields.append(note_field) elif evtype == 'n-':", "This file is part of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ #", "'n-': evtype_fg_colour = self._config['trigger']['note_off_colour'] else: evtype_fg_colour = self._config['trigger']['default_colour'] # Set colours painter.save() if", "Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible", "if name: return name else: return expr def _setup_fields(self): evtype = self._trigger.get_type() expr", "expr): try: cents = float(expr) name = self._notation.get_full_name(cents) except ValueError: return expr if", "include_line=True, select=False): # Select colour based on event type evtype = self._trigger.get_type() if", "evtype = self._trigger.get_type() expr = self._trigger.get_argument() metrics = self._config['font_metrics'] padding = self._config['trigger']['padding'] self._baseline_offset", "get_field_bounds(self, index): offset = self._fields[index]['offset'] width = self._fields[index]['width'] return (offset, width) def get_total_width(self):", "return expr def _setup_fields(self): evtype = self._trigger.get_type() expr = self._trigger.get_argument() metrics = self._config['font_metrics']", "# Author: <NAME>, Finland 2014 # # This file is part of Kunquat.", "self._trigger = trigger self._notation = notation self._setup_fields() def get_field_count(self): return len(self._fields) def get_field_bounds(self,", "2, 0)) painter.restore() def _make_field_data(self, offset, vis_text): metrics = self._config['font_metrics'] return { 'offset':", "expr != None: if self._trigger.get_argument_type() == 'note': vis_text = self._get_note_vis_name(expr) else: vis_text =", "painter.drawText( QPoint(field['offset'], self._baseline_offset), field['text']) painter.restore() # Draw line only if not obscured by", "colours painter.save() if select: height = self._config['tr_height'] painter.fillRect( QRect(0, 0, self._total_width - 1,", "= self._config['trigger']['note_on_colour'] elif evtype == 'h': evtype_fg_colour = self._config['trigger']['hit_colour'] elif evtype == 'n-':", "metrics = self._config['font_metrics'] return { 'offset': offset, 'width': metrics.boundingRect(vis_text).width(), 'text': vis_text, } def", "select: height = self._config['tr_height'] painter.fillRect( QRect(0, 0, self._total_width - 1, height - 1),", "height - 1), evtype_fg_colour) painter.setPen(self._config['bg_colour'] if select else evtype_fg_colour) # Draw fields for", "self._config['font_metrics'] padding = self._config['trigger']['padding'] self._baseline_offset = metrics.tightBoundingRect('A').height() self._fields = [] # Get field", "if self._trigger.get_argument_type() == 'note': vis_text = self._get_note_vis_name(expr) else: vis_text = expr arg_field =", "padding = self._config['trigger']['padding'] self._baseline_offset = metrics.tightBoundingRect('A').height() self._fields = [] # Get field bounds", "draw_trigger(self, painter, include_line=True, select=False): # Select colour based on event type evtype =", "from config import * class TriggerRenderer(): def __init__(self, config, trigger, notation): assert trigger", "notation): assert trigger self._config = config self._trigger = trigger self._notation = notation self._setup_fields()", "} def _get_note_vis_name(self, expr): try: cents = float(expr) name = self._notation.get_full_name(cents) except ValueError:", "def __init__(self, config, trigger, notation): assert trigger self._config = config self._trigger = trigger", "= expr arg_field = self._make_field_data( type_field['offset'] + type_field['width'] + padding, vis_text) self._fields.append(arg_field) #", "self._config['trigger']['padding'] self._baseline_offset = metrics.tightBoundingRect('A').height() self._fields = [] # Get field bounds if evtype", "0), QPoint(self._total_width - 2, 0)) painter.restore() def _make_field_data(self, offset, vis_text): metrics = self._config['font_metrics']", "possible under law, Kunquat Affirmers have waived all # copyright and related or", "type_field['offset'] + type_field['width'] + padding, vis_text) self._fields.append(arg_field) # Width total_padding = padding *", "[] # Get field bounds if evtype == 'n+': note_name = self._get_note_vis_name(expr) note_field", "'n-': vis_text = u'══' note_off_field = self._make_field_data(padding, vis_text) self._fields.append(note_off_field) else: type_field = self._make_field_data(padding,", "else evtype_fg_colour) # Draw fields for i, field in enumerate(self._fields): painter.drawText( QPoint(field['offset'], self._baseline_offset),", "only if not obscured by cursor if include_line: painter.save() painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0, 0), QPoint(self._total_width", "related or neighboring rights to Kunquat. # from PyQt4.QtCore import * from PyQt4.QtGui", "= self._config['font_metrics'] return { 'offset': offset, 'width': metrics.boundingRect(vis_text).width(), 'text': vis_text, } def _get_note_vis_name(self,", "self._make_field_data(padding, evtype) self._fields.append(type_field) if expr != None: if self._trigger.get_argument_type() == 'note': vis_text =", "get_total_width(self): return self._total_width def draw_trigger(self, painter, include_line=True, select=False): # Select colour based on", "QRect(0, 0, self._total_width - 1, height - 1), evtype_fg_colour) painter.setPen(self._config['bg_colour'] if select else", "include_line: painter.save() painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0, 0), QPoint(self._total_width - 2, 0)) painter.restore() def _make_field_data(self, offset,", "self._baseline_offset), field['text']) painter.restore() # Draw line only if not obscured by cursor if", "class TriggerRenderer(): def __init__(self, config, trigger, notation): assert trigger self._config = config self._trigger", "# # To the extent possible under law, Kunquat Affirmers have waived all", "== 'n+': note_name = self._get_note_vis_name(expr) note_field = self._make_field_data(padding, note_name) self._fields.append(note_field) elif evtype ==", "import * class TriggerRenderer(): def __init__(self, config, trigger, notation): assert trigger self._config =", "painter.fillRect( QRect(0, 0, self._total_width - 1, height - 1), evtype_fg_colour) painter.setPen(self._config['bg_colour'] if select", "in enumerate(self._fields): painter.drawText( QPoint(field['offset'], self._baseline_offset), field['text']) painter.restore() # Draw line only if not", "# # Author: <NAME>, Finland 2014 # # This file is part of", "self._config = config self._trigger = trigger self._notation = notation self._setup_fields() def get_field_count(self): return", "elif evtype == 'n-': evtype_fg_colour = self._config['trigger']['note_off_colour'] else: evtype_fg_colour = self._config['trigger']['default_colour'] # Set", "the extent possible under law, Kunquat Affirmers have waived all # copyright and", "self._notation.get_full_name(cents) except ValueError: return expr if name: return name else: return expr def", "def draw_trigger(self, painter, include_line=True, select=False): # Select colour based on event type evtype", "name: return name else: return expr def _setup_fields(self): evtype = self._trigger.get_type() expr =", "painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0, 0), QPoint(self._total_width - 2, 0)) painter.restore() def _make_field_data(self, offset, vis_text): metrics", "evtype == 'n+': note_name = self._get_note_vis_name(expr) note_field = self._make_field_data(padding, note_name) self._fields.append(note_field) elif evtype", "= self._config['tr_height'] painter.fillRect( QRect(0, 0, self._total_width - 1, height - 1), evtype_fg_colour) painter.setPen(self._config['bg_colour']", "law, Kunquat Affirmers have waived all # copyright and related or neighboring rights", "# Get field bounds if evtype == 'n+': note_name = self._get_note_vis_name(expr) note_field =", "if include_line: painter.save() painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0, 0), QPoint(self._total_width - 2, 0)) painter.restore() def _make_field_data(self,", "- 1), evtype_fg_colour) painter.setPen(self._config['bg_colour'] if select else evtype_fg_colour) # Draw fields for i,", "metrics.tightBoundingRect('A').height() self._fields = [] # Get field bounds if evtype == 'n+': note_name", "(offset, width) def get_total_width(self): return self._total_width def draw_trigger(self, painter, include_line=True, select=False): # Select", "self._trigger.get_argument() metrics = self._config['font_metrics'] padding = self._config['trigger']['padding'] self._baseline_offset = metrics.tightBoundingRect('A').height() self._fields = []", "if expr != None: if self._trigger.get_argument_type() == 'note': vis_text = self._get_note_vis_name(expr) else: vis_text", "line only if not obscured by cursor if include_line: painter.save() painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0, 0),", "# # This file is part of Kunquat. # # CC0 1.0 Universal,", "note_name = self._get_note_vis_name(expr) note_field = self._make_field_data(padding, note_name) self._fields.append(note_field) elif evtype == 'n-': vis_text", "evtype == 'n-': evtype_fg_colour = self._config['trigger']['note_off_colour'] else: evtype_fg_colour = self._config['trigger']['default_colour'] # Set colours", "return len(self._fields) def get_field_bounds(self, index): offset = self._fields[index]['offset'] width = self._fields[index]['width'] return (offset,", "under law, Kunquat Affirmers have waived all # copyright and related or neighboring", "vis_text, } def _get_note_vis_name(self, expr): try: cents = float(expr) name = self._notation.get_full_name(cents) except", "-*- coding: utf-8 -*- # # Author: <NAME>, Finland 2014 # # This", "self._trigger.get_type() if evtype == 'n+': evtype_fg_colour = self._config['trigger']['note_on_colour'] elif evtype == 'h': evtype_fg_colour", "self._config['trigger']['note_off_colour'] else: evtype_fg_colour = self._config['trigger']['default_colour'] # Set colours painter.save() if select: height =", "config, trigger, notation): assert trigger self._config = config self._trigger = trigger self._notation =", "file is part of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # #", "self._config['trigger']['note_on_colour'] elif evtype == 'h': evtype_fg_colour = self._config['trigger']['hit_colour'] elif evtype == 'n-': evtype_fg_colour", "for i, field in enumerate(self._fields): painter.drawText( QPoint(field['offset'], self._baseline_offset), field['text']) painter.restore() # Draw line", "cursor if include_line: painter.save() painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0, 0), QPoint(self._total_width - 2, 0)) painter.restore() def", "return (offset, width) def get_total_width(self): return self._total_width def draw_trigger(self, painter, include_line=True, select=False): #", "trigger self._config = config self._trigger = trigger self._notation = notation self._setup_fields() def get_field_count(self):", "neighboring rights to Kunquat. # from PyQt4.QtCore import * from PyQt4.QtGui import *", "* (len(self._fields) + 1) self._total_width = sum(f['width'] for f in self._fields) + total_padding", "<NAME>, Finland 2014 # # This file is part of Kunquat. # #", "config import * class TriggerRenderer(): def __init__(self, config, trigger, notation): assert trigger self._config", "offset, vis_text): metrics = self._config['font_metrics'] return { 'offset': offset, 'width': metrics.boundingRect(vis_text).width(), 'text': vis_text,", "vis_text): metrics = self._config['font_metrics'] return { 'offset': offset, 'width': metrics.boundingRect(vis_text).width(), 'text': vis_text, }", "0, self._total_width - 1, height - 1), evtype_fg_colour) painter.setPen(self._config['bg_colour'] if select else evtype_fg_colour)", "== 'n-': vis_text = u'══' note_off_field = self._make_field_data(padding, vis_text) self._fields.append(note_off_field) else: type_field =", "evtype = self._trigger.get_type() if evtype == 'n+': evtype_fg_colour = self._config['trigger']['note_on_colour'] elif evtype ==", "# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under law,", "bounds if evtype == 'n+': note_name = self._get_note_vis_name(expr) note_field = self._make_field_data(padding, note_name) self._fields.append(note_field)", "= self._trigger.get_type() if evtype == 'n+': evtype_fg_colour = self._config['trigger']['note_on_colour'] elif evtype == 'h':", "total_padding = padding * (len(self._fields) + 1) self._total_width = sum(f['width'] for f in", "to Kunquat. # from PyQt4.QtCore import * from PyQt4.QtGui import * from config", "else: type_field = self._make_field_data(padding, evtype) self._fields.append(type_field) if expr != None: if self._trigger.get_argument_type() ==", "= self._trigger.get_argument() metrics = self._config['font_metrics'] padding = self._config['trigger']['padding'] self._baseline_offset = metrics.tightBoundingRect('A').height() self._fields =", "= self._make_field_data(padding, evtype) self._fields.append(type_field) if expr != None: if self._trigger.get_argument_type() == 'note': vis_text", "trigger, notation): assert trigger self._config = config self._trigger = trigger self._notation = notation", "evtype == 'h': evtype_fg_colour = self._config['trigger']['hit_colour'] elif evtype == 'n-': evtype_fg_colour = self._config['trigger']['note_off_colour']", "width) def get_total_width(self): return self._total_width def draw_trigger(self, painter, include_line=True, select=False): # Select colour", "based on event type evtype = self._trigger.get_type() if evtype == 'n+': evtype_fg_colour =", "Affirmers have waived all # copyright and related or neighboring rights to Kunquat.", "width = self._fields[index]['width'] return (offset, width) def get_total_width(self): return self._total_width def draw_trigger(self, painter,", "def _get_note_vis_name(self, expr): try: cents = float(expr) name = self._notation.get_full_name(cents) except ValueError: return", "= self._fields[index]['width'] return (offset, width) def get_total_width(self): return self._total_width def draw_trigger(self, painter, include_line=True,", "all # copyright and related or neighboring rights to Kunquat. # from PyQt4.QtCore", "'n+': evtype_fg_colour = self._config['trigger']['note_on_colour'] elif evtype == 'h': evtype_fg_colour = self._config['trigger']['hit_colour'] elif evtype", "_make_field_data(self, offset, vis_text): metrics = self._config['font_metrics'] return { 'offset': offset, 'width': metrics.boundingRect(vis_text).width(), 'text':", "offset, 'width': metrics.boundingRect(vis_text).width(), 'text': vis_text, } def _get_note_vis_name(self, expr): try: cents = float(expr)", "* class TriggerRenderer(): def __init__(self, config, trigger, notation): assert trigger self._config = config", "= config self._trigger = trigger self._notation = notation self._setup_fields() def get_field_count(self): return len(self._fields)", "or neighboring rights to Kunquat. # from PyQt4.QtCore import * from PyQt4.QtGui import", "= self._config['trigger']['padding'] self._baseline_offset = metrics.tightBoundingRect('A').height() self._fields = [] # Get field bounds if", "if select else evtype_fg_colour) # Draw fields for i, field in enumerate(self._fields): painter.drawText(", "None: if self._trigger.get_argument_type() == 'note': vis_text = self._get_note_vis_name(expr) else: vis_text = expr arg_field", "= self._config['trigger']['default_colour'] # Set colours painter.save() if select: height = self._config['tr_height'] painter.fillRect( QRect(0,", "coding: utf-8 -*- # # Author: <NAME>, Finland 2014 # # This file", "vis_text = u'══' note_off_field = self._make_field_data(padding, vis_text) self._fields.append(note_off_field) else: type_field = self._make_field_data(padding, evtype)", "if not obscured by cursor if include_line: painter.save() painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0, 0), QPoint(self._total_width -", "Width total_padding = padding * (len(self._fields) + 1) self._total_width = sum(f['width'] for f", "= self._config['font_metrics'] padding = self._config['trigger']['padding'] self._baseline_offset = metrics.tightBoundingRect('A').height() self._fields = [] # Get", "PyQt4.QtCore import * from PyQt4.QtGui import * from config import * class TriggerRenderer():", "expr if name: return name else: return expr def _setup_fields(self): evtype = self._trigger.get_type()", "evtype == 'n-': vis_text = u'══' note_off_field = self._make_field_data(padding, vis_text) self._fields.append(note_off_field) else: type_field", "field in enumerate(self._fields): painter.drawText( QPoint(field['offset'], self._baseline_offset), field['text']) painter.restore() # Draw line only if", "painter.restore() def _make_field_data(self, offset, vis_text): metrics = self._config['font_metrics'] return { 'offset': offset, 'width':", "extent possible under law, Kunquat Affirmers have waived all # copyright and related", "select=False): # Select colour based on event type evtype = self._trigger.get_type() if evtype", "* from PyQt4.QtGui import * from config import * class TriggerRenderer(): def __init__(self,", "offset = self._fields[index]['offset'] width = self._fields[index]['width'] return (offset, width) def get_total_width(self): return self._total_width", "{ 'offset': offset, 'width': metrics.boundingRect(vis_text).width(), 'text': vis_text, } def _get_note_vis_name(self, expr): try: cents", "ValueError: return expr if name: return name else: return expr def _setup_fields(self): evtype", "expr arg_field = self._make_field_data( type_field['offset'] + type_field['width'] + padding, vis_text) self._fields.append(arg_field) # Width", "QPoint(field['offset'], self._baseline_offset), field['text']) painter.restore() # Draw line only if not obscured by cursor", "fields for i, field in enumerate(self._fields): painter.drawText( QPoint(field['offset'], self._baseline_offset), field['text']) painter.restore() # Draw", "+ padding, vis_text) self._fields.append(arg_field) # Width total_padding = padding * (len(self._fields) + 1)", "'h': evtype_fg_colour = self._config['trigger']['hit_colour'] elif evtype == 'n-': evtype_fg_colour = self._config['trigger']['note_off_colour'] else: evtype_fg_colour", "evtype_fg_colour) # Draw fields for i, field in enumerate(self._fields): painter.drawText( QPoint(field['offset'], self._baseline_offset), field['text'])", "evtype) self._fields.append(type_field) if expr != None: if self._trigger.get_argument_type() == 'note': vis_text = self._get_note_vis_name(expr)", "notation self._setup_fields() def get_field_count(self): return len(self._fields) def get_field_bounds(self, index): offset = self._fields[index]['offset'] width", "painter.restore() # Draw line only if not obscured by cursor if include_line: painter.save()", "vis_text = expr arg_field = self._make_field_data( type_field['offset'] + type_field['width'] + padding, vis_text) self._fields.append(arg_field)", "Kunquat Affirmers have waived all # copyright and related or neighboring rights to", "import * from PyQt4.QtGui import * from config import * class TriggerRenderer(): def", "expr = self._trigger.get_argument() metrics = self._config['font_metrics'] padding = self._config['trigger']['padding'] self._baseline_offset = metrics.tightBoundingRect('A').height() self._fields", "type_field['width'] + padding, vis_text) self._fields.append(arg_field) # Width total_padding = padding * (len(self._fields) +", "painter.setPen(self._config['bg_colour'] if select else evtype_fg_colour) # Draw fields for i, field in enumerate(self._fields):", "metrics = self._config['font_metrics'] padding = self._config['trigger']['padding'] self._baseline_offset = metrics.tightBoundingRect('A').height() self._fields = [] #", "painter, include_line=True, select=False): # Select colour based on event type evtype = self._trigger.get_type()", "return expr if name: return name else: return expr def _setup_fields(self): evtype =", "evtype_fg_colour) painter.setPen(self._config['bg_colour'] if select else evtype_fg_colour) # Draw fields for i, field in", "# Set colours painter.save() if select: height = self._config['tr_height'] painter.fillRect( QRect(0, 0, self._total_width", "Set colours painter.save() if select: height = self._config['tr_height'] painter.fillRect( QRect(0, 0, self._total_width -", "part of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the", "+ type_field['width'] + padding, vis_text) self._fields.append(arg_field) # Width total_padding = padding * (len(self._fields)", "= self._trigger.get_type() expr = self._trigger.get_argument() metrics = self._config['font_metrics'] padding = self._config['trigger']['padding'] self._baseline_offset =", "# from PyQt4.QtCore import * from PyQt4.QtGui import * from config import *", "float(expr) name = self._notation.get_full_name(cents) except ValueError: return expr if name: return name else:", "field['text']) painter.restore() # Draw line only if not obscured by cursor if include_line:", "padding * (len(self._fields) + 1) self._total_width = sum(f['width'] for f in self._fields) +", "# Width total_padding = padding * (len(self._fields) + 1) self._total_width = sum(f['width'] for", "is part of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To", "1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under law, Kunquat Affirmers", "= self._make_field_data(padding, vis_text) self._fields.append(note_off_field) else: type_field = self._make_field_data(padding, evtype) self._fields.append(type_field) if expr !=", "len(self._fields) def get_field_bounds(self, index): offset = self._fields[index]['offset'] width = self._fields[index]['width'] return (offset, width)", "i, field in enumerate(self._fields): painter.drawText( QPoint(field['offset'], self._baseline_offset), field['text']) painter.restore() # Draw line only", "event type evtype = self._trigger.get_type() if evtype == 'n+': evtype_fg_colour = self._config['trigger']['note_on_colour'] elif", "note_name) self._fields.append(note_field) elif evtype == 'n-': vis_text = u'══' note_off_field = self._make_field_data(padding, vis_text)", "- 1, height - 1), evtype_fg_colour) painter.setPen(self._config['bg_colour'] if select else evtype_fg_colour) # Draw", "return name else: return expr def _setup_fields(self): evtype = self._trigger.get_type() expr = self._trigger.get_argument()", "= self._get_note_vis_name(expr) else: vis_text = expr arg_field = self._make_field_data( type_field['offset'] + type_field['width'] +", "self._trigger.get_argument_type() == 'note': vis_text = self._get_note_vis_name(expr) else: vis_text = expr arg_field = self._make_field_data(", "height = self._config['tr_height'] painter.fillRect( QRect(0, 0, self._total_width - 1, height - 1), evtype_fg_colour)", "elif evtype == 'h': evtype_fg_colour = self._config['trigger']['hit_colour'] elif evtype == 'n-': evtype_fg_colour =", "painter.save() if select: height = self._config['tr_height'] painter.fillRect( QRect(0, 0, self._total_width - 1, height", "= self._fields[index]['offset'] width = self._fields[index]['width'] return (offset, width) def get_total_width(self): return self._total_width def", "# copyright and related or neighboring rights to Kunquat. # from PyQt4.QtCore import", "trigger self._notation = notation self._setup_fields() def get_field_count(self): return len(self._fields) def get_field_bounds(self, index): offset", "Kunquat. # from PyQt4.QtCore import * from PyQt4.QtGui import * from config import", "2014 # # This file is part of Kunquat. # # CC0 1.0", "evtype == 'n+': evtype_fg_colour = self._config['trigger']['note_on_colour'] elif evtype == 'h': evtype_fg_colour = self._config['trigger']['hit_colour']", "__init__(self, config, trigger, notation): assert trigger self._config = config self._trigger = trigger self._notation", "vis_text) self._fields.append(note_off_field) else: type_field = self._make_field_data(padding, evtype) self._fields.append(type_field) if expr != None: if", "* from config import * class TriggerRenderer(): def __init__(self, config, trigger, notation): assert", "_setup_fields(self): evtype = self._trigger.get_type() expr = self._trigger.get_argument() metrics = self._config['font_metrics'] padding = self._config['trigger']['padding']", "def _setup_fields(self): evtype = self._trigger.get_type() expr = self._trigger.get_argument() metrics = self._config['font_metrics'] padding =", "self._make_field_data(padding, vis_text) self._fields.append(note_off_field) else: type_field = self._make_field_data(padding, evtype) self._fields.append(type_field) if expr != None:", "self._config['trigger']['default_colour'] # Set colours painter.save() if select: height = self._config['tr_height'] painter.fillRect( QRect(0, 0,", "= self._config['trigger']['note_off_colour'] else: evtype_fg_colour = self._config['trigger']['default_colour'] # Set colours painter.save() if select: height", "= float(expr) name = self._notation.get_full_name(cents) except ValueError: return expr if name: return name", "except ValueError: return expr if name: return name else: return expr def _setup_fields(self):", "= metrics.tightBoundingRect('A').height() self._fields = [] # Get field bounds if evtype == 'n+':", "def get_field_bounds(self, index): offset = self._fields[index]['offset'] width = self._fields[index]['width'] return (offset, width) def", "== 'note': vis_text = self._get_note_vis_name(expr) else: vis_text = expr arg_field = self._make_field_data( type_field['offset']", "else: return expr def _setup_fields(self): evtype = self._trigger.get_type() expr = self._trigger.get_argument() metrics =", "else: vis_text = expr arg_field = self._make_field_data( type_field['offset'] + type_field['width'] + padding, vis_text)", "= self._make_field_data(padding, note_name) self._fields.append(note_field) elif evtype == 'n-': vis_text = u'══' note_off_field =", "type evtype = self._trigger.get_type() if evtype == 'n+': evtype_fg_colour = self._config['trigger']['note_on_colour'] elif evtype", "self._baseline_offset = metrics.tightBoundingRect('A').height() self._fields = [] # Get field bounds if evtype ==", "enumerate(self._fields): painter.drawText( QPoint(field['offset'], self._baseline_offset), field['text']) painter.restore() # Draw line only if not obscured", "Get field bounds if evtype == 'n+': note_name = self._get_note_vis_name(expr) note_field = self._make_field_data(padding,", "= padding * (len(self._fields) + 1) self._total_width = sum(f['width'] for f in self._fields)", "_get_note_vis_name(self, expr): try: cents = float(expr) name = self._notation.get_full_name(cents) except ValueError: return expr", "# Draw line only if not obscured by cursor if include_line: painter.save() painter.setPen(evtype_fg_colour)", "metrics.boundingRect(vis_text).width(), 'text': vis_text, } def _get_note_vis_name(self, expr): try: cents = float(expr) name =", "# To the extent possible under law, Kunquat Affirmers have waived all #", "colour based on event type evtype = self._trigger.get_type() if evtype == 'n+': evtype_fg_colour", "self._make_field_data(padding, note_name) self._fields.append(note_field) elif evtype == 'n-': vis_text = u'══' note_off_field = self._make_field_data(padding,", "def get_field_count(self): return len(self._fields) def get_field_bounds(self, index): offset = self._fields[index]['offset'] width = self._fields[index]['width']", "note_off_field = self._make_field_data(padding, vis_text) self._fields.append(note_off_field) else: type_field = self._make_field_data(padding, evtype) self._fields.append(type_field) if expr", "from PyQt4.QtCore import * from PyQt4.QtGui import * from config import * class", "utf-8 -*- # # Author: <NAME>, Finland 2014 # # This file is", "elif evtype == 'n-': vis_text = u'══' note_off_field = self._make_field_data(padding, vis_text) self._fields.append(note_off_field) else:", "-*- # # Author: <NAME>, Finland 2014 # # This file is part", "= [] # Get field bounds if evtype == 'n+': note_name = self._get_note_vis_name(expr)", "TriggerRenderer(): def __init__(self, config, trigger, notation): assert trigger self._config = config self._trigger =", "== 'n-': evtype_fg_colour = self._config['trigger']['note_off_colour'] else: evtype_fg_colour = self._config['trigger']['default_colour'] # Set colours painter.save()", "not obscured by cursor if include_line: painter.save() painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0, 0), QPoint(self._total_width - 2,", "CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under law, Kunquat", "vis_text) self._fields.append(arg_field) # Width total_padding = padding * (len(self._fields) + 1) self._total_width =", "copyright and related or neighboring rights to Kunquat. # from PyQt4.QtCore import *", "= self._make_field_data( type_field['offset'] + type_field['width'] + padding, vis_text) self._fields.append(arg_field) # Width total_padding =", "name = self._notation.get_full_name(cents) except ValueError: return expr if name: return name else: return", "obscured by cursor if include_line: painter.save() painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0, 0), QPoint(self._total_width - 2, 0))", "vis_text = self._get_note_vis_name(expr) else: vis_text = expr arg_field = self._make_field_data( type_field['offset'] + type_field['width']", "if evtype == 'n+': evtype_fg_colour = self._config['trigger']['note_on_colour'] elif evtype == 'h': evtype_fg_colour =", "self._setup_fields() def get_field_count(self): return len(self._fields) def get_field_bounds(self, index): offset = self._fields[index]['offset'] width =", "if evtype == 'n+': note_name = self._get_note_vis_name(expr) note_field = self._make_field_data(padding, note_name) self._fields.append(note_field) elif", "u'══' note_off_field = self._make_field_data(padding, vis_text) self._fields.append(note_off_field) else: type_field = self._make_field_data(padding, evtype) self._fields.append(type_field) if", "def _make_field_data(self, offset, vis_text): metrics = self._config['font_metrics'] return { 'offset': offset, 'width': metrics.boundingRect(vis_text).width(),", "name else: return expr def _setup_fields(self): evtype = self._trigger.get_type() expr = self._trigger.get_argument() metrics", "self._trigger.get_type() expr = self._trigger.get_argument() metrics = self._config['font_metrics'] padding = self._config['trigger']['padding'] self._baseline_offset = metrics.tightBoundingRect('A').height()", "# Draw fields for i, field in enumerate(self._fields): painter.drawText( QPoint(field['offset'], self._baseline_offset), field['text']) painter.restore()", "= self._get_note_vis_name(expr) note_field = self._make_field_data(padding, note_name) self._fields.append(note_field) elif evtype == 'n-': vis_text =", "Select colour based on event type evtype = self._trigger.get_type() if evtype == 'n+':", "waived all # copyright and related or neighboring rights to Kunquat. # from", "on event type evtype = self._trigger.get_type() if evtype == 'n+': evtype_fg_colour = self._config['trigger']['note_on_colour']", "= self._notation.get_full_name(cents) except ValueError: return expr if name: return name else: return expr", "import * from config import * class TriggerRenderer(): def __init__(self, config, trigger, notation):", "= u'══' note_off_field = self._make_field_data(padding, vis_text) self._fields.append(note_off_field) else: type_field = self._make_field_data(padding, evtype) self._fields.append(type_field)", "field bounds if evtype == 'n+': note_name = self._get_note_vis_name(expr) note_field = self._make_field_data(padding, note_name)", "'text': vis_text, } def _get_note_vis_name(self, expr): try: cents = float(expr) name = self._notation.get_full_name(cents)", "type_field = self._make_field_data(padding, evtype) self._fields.append(type_field) if expr != None: if self._trigger.get_argument_type() == 'note':", "painter.save() painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0, 0), QPoint(self._total_width - 2, 0)) painter.restore() def _make_field_data(self, offset, vis_text):", "padding, vis_text) self._fields.append(arg_field) # Width total_padding = padding * (len(self._fields) + 1) self._total_width", "by cursor if include_line: painter.save() painter.setPen(evtype_fg_colour) painter.drawLine(QPoint(0, 0), QPoint(self._total_width - 2, 0)) painter.restore()", "self._total_width def draw_trigger(self, painter, include_line=True, select=False): # Select colour based on event type", "# -*- coding: utf-8 -*- # # Author: <NAME>, Finland 2014 # #", "self._fields[index]['offset'] width = self._fields[index]['width'] return (offset, width) def get_total_width(self): return self._total_width def draw_trigger(self,", "= trigger self._notation = notation self._setup_fields() def get_field_count(self): return len(self._fields) def get_field_bounds(self, index):", "evtype_fg_colour = self._config['trigger']['default_colour'] # Set colours painter.save() if select: height = self._config['tr_height'] painter.fillRect(", "if select: height = self._config['tr_height'] painter.fillRect( QRect(0, 0, self._total_width - 1, height -", "assert trigger self._config = config self._trigger = trigger self._notation = notation self._setup_fields() def", "- 2, 0)) painter.restore() def _make_field_data(self, offset, vis_text): metrics = self._config['font_metrics'] return {", "'width': metrics.boundingRect(vis_text).width(), 'text': vis_text, } def _get_note_vis_name(self, expr): try: cents = float(expr) name", "self._config['font_metrics'] return { 'offset': offset, 'width': metrics.boundingRect(vis_text).width(), 'text': vis_text, } def _get_note_vis_name(self, expr):", "painter.drawLine(QPoint(0, 0), QPoint(self._total_width - 2, 0)) painter.restore() def _make_field_data(self, offset, vis_text): metrics =", "# Select colour based on event type evtype = self._trigger.get_type() if evtype ==", "self._fields.append(note_field) elif evtype == 'n-': vis_text = u'══' note_off_field = self._make_field_data(padding, vis_text) self._fields.append(note_off_field)", "get_field_count(self): return len(self._fields) def get_field_bounds(self, index): offset = self._fields[index]['offset'] width = self._fields[index]['width'] return", "self._total_width - 1, height - 1), evtype_fg_colour) painter.setPen(self._config['bg_colour'] if select else evtype_fg_colour) #", "self._fields = [] # Get field bounds if evtype == 'n+': note_name =", "select else evtype_fg_colour) # Draw fields for i, field in enumerate(self._fields): painter.drawText( QPoint(field['offset'],", "self._fields.append(note_off_field) else: type_field = self._make_field_data(padding, evtype) self._fields.append(type_field) if expr != None: if self._trigger.get_argument_type()", "Draw fields for i, field in enumerate(self._fields): painter.drawText( QPoint(field['offset'], self._baseline_offset), field['text']) painter.restore() #", "= self._config['trigger']['hit_colour'] elif evtype == 'n-': evtype_fg_colour = self._config['trigger']['note_off_colour'] else: evtype_fg_colour = self._config['trigger']['default_colour']", "evtype_fg_colour = self._config['trigger']['note_on_colour'] elif evtype == 'h': evtype_fg_colour = self._config['trigger']['hit_colour'] elif evtype ==", "expr def _setup_fields(self): evtype = self._trigger.get_type() expr = self._trigger.get_argument() metrics = self._config['font_metrics'] padding", "self._fields.append(type_field) if expr != None: if self._trigger.get_argument_type() == 'note': vis_text = self._get_note_vis_name(expr) else:", "self._make_field_data( type_field['offset'] + type_field['width'] + padding, vis_text) self._fields.append(arg_field) # Width total_padding = padding", "1), evtype_fg_colour) painter.setPen(self._config['bg_colour'] if select else evtype_fg_colour) # Draw fields for i, field", "QPoint(self._total_width - 2, 0)) painter.restore() def _make_field_data(self, offset, vis_text): metrics = self._config['font_metrics'] return" ]
[ "License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "writing, software # distributed under the License is distributed on an \"AS IS\"", "resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) <EMAIL> def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access = {} vims", "assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] for", "assert resp['name'] == vim_name assert resp['vim_type'] == 'openstack' assert resp['vim_url'] == os_access['vim-url'] assert", "vim['name'] assert resp['vim_type'] == vim['vim-type'] assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username']", "resp['name'] == vim_name assert resp['vim_type'] == 'openstack' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user']", "assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type'] == 'vmware' assert", "Unless required by applicable law or agreed to in writing, software # distributed", "os_access['vim-type'] = 'openstack' os_access['description'] = 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert", "'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['description'] = 'a test vim' for", "{'name': 'testvim1', 'vim-type': 'openstack'}, {'name': 'testvim2','vim-type': 'vmware'} ] os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] =", "See the # License for the specific language governing permissions and limitations #", "@pytest.mark.vim @pytest.mark.openstack @pytest.mark.vmware class TestClass(object): def test_empty_vim(self,osm): assert not osm.get_api().vim.list() @pytest.fixture(scope='function') def cleanup_test_add_vim_account(self,osm,request):", "test_empty_vim(self,osm): assert not osm.get_api().vim.list() @pytest.fixture(scope='function') def cleanup_test_add_vim_account(self,osm,request): def teardown(): try: for vim in", "= '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['description'] = 'a test vim' for vim in", "vim' for vim in vims: os_access['vim-type'] = vim['vim-type'] assert not osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name']) assert", "\"License\"); you may # not use this file except in compliance with the", "= 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] =", "@pytest.fixture(scope='function') def cleanup_test_add_vim_account(self,osm,request): def teardown(): try: for vim in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name']) except: pass", "os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['description']", "Apache License, Version 2.0 (the \"License\"); you may # not use this file", "the License. You may obtain # a copy of the License at #", "# under the License. import pytest import time @pytest.mark.vim @pytest.mark.openstack @pytest.mark.vmware class TestClass(object):", "import time @pytest.mark.vim @pytest.mark.openstack @pytest.mark.vmware class TestClass(object): def test_empty_vim(self,osm): assert not osm.get_api().vim.list() @pytest.fixture(scope='function')", "resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) <EMAIL> def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account):", "law or agreed to in writing, software # distributed under the License is", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); you", "may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the Apache License, Version 2.0 (the \"License\"); you may # not use this", "vim_name = 'helloworld-vmware' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name']", "] os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3'", "resp=osm.get_api().vim.get(vim['name']) assert resp['name'] == vim['name'] assert resp['vim_type'] == vim['vim-type'] assert resp['vim_url'] == os_access['vim-url']", "assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] for vim in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name'])", "resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) @pytest.mark.vmware #@<EMAIL> def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access = {}", "@pytest.mark.vmware #@<EMAIL> def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-vmware' os_access['vim-url'] = 'https://169.254.169.245/'", "os_access = {} vim_name = 'helloworld-vmware' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password']", "assert not osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name']) assert resp['name'] == vim['name'] assert resp['vim_type'] == vim['vim-type'] assert", "= {} vim_name = 'helloworld-os' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] =", "language governing permissions and limitations # under the License. import pytest import time", "test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type'] ==", "express or implied. See the # License for the specific language governing permissions", "vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type'] == 'vmware'", "'pytest3' os_access['description'] = 'a test vim' for vim in vims: os_access['vim-type'] = vim['vim-type']", "@pytest.mark.openstack @pytest.mark.smoke def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-os' os_access['vim-url'] = 'https://169.254.169.245/'", "'testvim1', 'vim-type': 'openstack'}, {'name': 'testvim2','vim-type': 'vmware'} ] os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2'", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "CONDITIONS OF ANY KIND, either express or implied. See the # License for", "not use this file except in compliance with the License. You may obtain", "the License. import pytest import time @pytest.mark.vim @pytest.mark.openstack @pytest.mark.vmware class TestClass(object): def test_empty_vim(self,osm):", "assert not osm.get_api().vim.delete(vim_name) @pytest.mark.vmware #@<EMAIL> def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-vmware'", "'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'vmware' os_access['description'] = 'a", "permissions and limitations # under the License. import pytest import time @pytest.mark.vim @pytest.mark.openstack", "not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type'] == 'vmware' assert resp['vim_url']", "request.addfinalizer(teardown) @pytest.mark.openstack @pytest.mark.smoke def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-os' os_access['vim-url'] =", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "with the License. You may obtain # a copy of the License at", "for the specific language governing permissions and limitations # under the License. import", "'vmware' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name']", "vim in vims: os_access['vim-type'] = vim['vim-type'] assert not osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name']) assert resp['name'] ==", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may # not", "== os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) @pytest.mark.vmware #@<EMAIL> def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access = {} vim_name", "vim_name assert resp['vim_type'] == 'vmware' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username']", "resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not", "vim['vim-type'] assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name']", "License for the specific language governing permissions and limitations # under the License.", "time @pytest.mark.vim @pytest.mark.openstack @pytest.mark.vmware class TestClass(object): def test_empty_vim(self,osm): assert not osm.get_api().vim.list() @pytest.fixture(scope='function') def", "os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['description'] = 'a test vim' for vim", "os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) @pytest.mark.vmware", "@pytest.mark.vmware class TestClass(object): def test_empty_vim(self,osm): assert not osm.get_api().vim.list() @pytest.fixture(scope='function') def cleanup_test_add_vim_account(self,osm,request): def teardown():", "2.0 (the \"License\"); you may # not use this file except in compliance", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "= 'pytest3' os_access['vim-type'] = 'openstack' os_access['description'] = 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access)", "'pytest3' os_access['vim-type'] = 'vmware' os_access['description'] = 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name)", "= 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'vmware' os_access['description'] =", "= {} vim_name = 'helloworld-vmware' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] =", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "== vim['vim-type'] assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] ==", "assert resp['vim_type'] == vim['vim-type'] assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert", "use this file except in compliance with the License. You may obtain #", "== 'vmware' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] ==", "vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type'] == 'openstack'", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "'a test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type']", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #", "compliance with the License. You may obtain # a copy of the License", "os_access['description'] = 'a test vim' for vim in vims: os_access['vim-type'] = vim['vim-type'] assert", "osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name']) assert resp['name'] == vim['name'] assert resp['vim_type'] == vim['vim-type'] assert resp['vim_url'] ==", "License, Version 2.0 (the \"License\"); you may # not use this file except", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "'vmware' os_access['description'] = 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] ==", "== vim_name assert resp['vim_type'] == 'vmware' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] ==", "try: for vim in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name']) except: pass request.addfinalizer(teardown) @pytest.mark.openstack @pytest.mark.smoke def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account):", "@pytest.mark.smoke def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-os' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username']", "class TestClass(object): def test_empty_vim(self,osm): assert not osm.get_api().vim.list() @pytest.fixture(scope='function') def cleanup_test_add_vim_account(self,osm,request): def teardown(): try:", "os_access = {} vim_name = 'helloworld-os' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password']", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "implied. See the # License for the specific language governing permissions and limitations", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "OF ANY KIND, either express or implied. See the # License for the", "os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'openstack' os_access['description'] = 'a test", "== vim['name'] assert resp['vim_type'] == vim['vim-type'] assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] ==", "test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-vmware' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2'", "License. import pytest import time @pytest.mark.vim @pytest.mark.openstack @pytest.mark.vmware class TestClass(object): def test_empty_vim(self,osm): assert", "assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) <EMAIL> def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access = {}", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "Copyright 2017 Sandvine # # All Rights Reserved. # # Licensed under the", "= 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert", "for vim in vims: os_access['vim-type'] = vim['vim-type'] assert not osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name']) assert resp['name']", "vims = [ {'name': 'testvim1', 'vim-type': 'openstack'}, {'name': 'testvim2','vim-type': 'vmware'} ] os_access['vim-url'] =", "os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) <EMAIL> def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access = {} vims = [", "not osm.get_api().vim.list() @pytest.fixture(scope='function') def cleanup_test_add_vim_account(self,osm,request): def teardown(): try: for vim in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name'])", "'openstack'}, {'name': 'testvim2','vim-type': 'vmware'} ] os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] =", "# Copyright 2017 Sandvine # # All Rights Reserved. # # Licensed under", "pytest import time @pytest.mark.vim @pytest.mark.openstack @pytest.mark.vmware class TestClass(object): def test_empty_vim(self,osm): assert not osm.get_api().vim.list()", "pass request.addfinalizer(teardown) @pytest.mark.openstack @pytest.mark.smoke def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-os' os_access['vim-url']", "vim['vim-type'] assert not osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name']) assert resp['name'] == vim['name'] assert resp['vim_type'] == vim['vim-type']", "= 'helloworld-vmware' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] =", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'openstack' os_access['description']", "test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access = {} vims = [ {'name': 'testvim1', 'vim-type': 'openstack'}, {'name': 'testvim2','vim-type':", "in vims: os_access['vim-type'] = vim['vim-type'] assert not osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name']) assert resp['name'] == vim['name']", "def test_empty_vim(self,osm): assert not osm.get_api().vim.list() @pytest.fixture(scope='function') def cleanup_test_add_vim_account(self,osm,request): def teardown(): try: for vim", "def cleanup_test_add_vim_account(self,osm,request): def teardown(): try: for vim in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name']) except: pass request.addfinalizer(teardown)", "<EMAIL> def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access = {} vims = [ {'name': 'testvim1', 'vim-type': 'openstack'},", "'helloworld-os' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3'", "resp['vim_type'] == 'openstack' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name']", "you may # not use this file except in compliance with the License.", "assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) @pytest.mark.vmware #@<EMAIL>", "resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type'] == 'vmware' assert resp['vim_url'] == os_access['vim-url']", "assert resp['name'] == vim_name assert resp['vim_type'] == 'vmware' assert resp['vim_url'] == os_access['vim-url'] assert", "= 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['description'] = 'a test vim'", "resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type'] == 'openstack' assert resp['vim_url'] == os_access['vim-url']", "agreed to in writing, software # distributed under the License is distributed on", "def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-os' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] =", "assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) @pytest.mark.vmware #@<EMAIL> def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access =", "(the \"License\"); you may # not use this file except in compliance with", "resp['vim_type'] == vim['vim-type'] assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name']", "may # not use this file except in compliance with the License. You", "KIND, either express or implied. See the # License for the specific language", "assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type'] == 'openstack' assert", "either express or implied. See the # License for the specific language governing", "= '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'openstack' os_access['description'] = 'a test vim'", "# # Unless required by applicable law or agreed to in writing, software", "file except in compliance with the License. You may obtain # a copy", "this file except in compliance with the License. You may obtain # a", "== os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) <EMAIL> def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access", "# Unless required by applicable law or agreed to in writing, software #", "osm.get_api().vim.delete(vim_name) @pytest.mark.vmware #@<EMAIL> def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-vmware' os_access['vim-url'] =", "= [ {'name': 'testvim1', 'vim-type': 'openstack'}, {'name': 'testvim2','vim-type': 'vmware'} ] os_access['vim-url'] = 'https://169.254.169.245/'", "os_access['description'] = 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name", "'testvim2','vim-type': 'vmware'} ] os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name']", "os_access['vim-type'] = vim['vim-type'] assert not osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name']) assert resp['name'] == vim['name'] assert resp['vim_type']", "by applicable law or agreed to in writing, software # distributed under the", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "the specific language governing permissions and limitations # under the License. import pytest", "os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'openstack' os_access['description'] = 'a test vim' assert not", "2017 Sandvine # # All Rights Reserved. # # Licensed under the Apache", "in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name']) except: pass request.addfinalizer(teardown) @pytest.mark.openstack @pytest.mark.smoke def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access = {}", "governing permissions and limitations # under the License. import pytest import time @pytest.mark.vim", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "or implied. See the # License for the specific language governing permissions and", "== vim_name assert resp['vim_type'] == 'openstack' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] ==", "vim_name assert resp['vim_type'] == 'openstack' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username']", "resp['name'] == vim_name assert resp['vim_type'] == 'vmware' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user']", "resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) @pytest.mark.vmware #@<EMAIL> def", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "== os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) @pytest.mark.vmware #@<EMAIL> def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account):", "'<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'vmware' os_access['description'] = 'a test vim' assert", "os_access['vim-type'] = 'vmware' os_access['description'] = 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert", "import pytest import time @pytest.mark.vim @pytest.mark.openstack @pytest.mark.vmware class TestClass(object): def test_empty_vim(self,osm): assert not", "not osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name']) assert resp['name'] == vim['name'] assert resp['vim_type'] == vim['vim-type'] assert resp['vim_url']", "except: pass request.addfinalizer(teardown) @pytest.mark.openstack @pytest.mark.smoke def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-os'", "test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-os' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2'", "osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name']) except: pass request.addfinalizer(teardown) @pytest.mark.openstack @pytest.mark.smoke def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access = {} vim_name", "os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'vmware' os_access['description'] = 'a test vim' assert not", "teardown(): try: for vim in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name']) except: pass request.addfinalizer(teardown) @pytest.mark.openstack @pytest.mark.smoke def", "'openstack' os_access['description'] = 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] ==", "== os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) <EMAIL> def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access = {} vims =", "[ {'name': 'testvim1', 'vim-type': 'openstack'}, {'name': 'testvim2','vim-type': 'vmware'} ] os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username']", "Sandvine # # All Rights Reserved. # # Licensed under the Apache License,", "License. You may obtain # a copy of the License at # #", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "def teardown(): try: for vim in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name']) except: pass request.addfinalizer(teardown) @pytest.mark.openstack @pytest.mark.smoke", "assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "'a test vim' for vim in vims: os_access['vim-type'] = vim['vim-type'] assert not osm.get_api().vim.create(vim['name'],os_access)", "assert resp['name'] == vim['name'] assert resp['vim_type'] == vim['vim-type'] assert resp['vim_url'] == os_access['vim-url'] assert", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'vmware' os_access['description']", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "== 'openstack' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] ==", "resp['name'] == vim['name'] assert resp['vim_type'] == vim['vim-type'] assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user']", "TestClass(object): def test_empty_vim(self,osm): assert not osm.get_api().vim.list() @pytest.fixture(scope='function') def cleanup_test_add_vim_account(self,osm,request): def teardown(): try: for", "and limitations # under the License. import pytest import time @pytest.mark.vim @pytest.mark.openstack @pytest.mark.vmware", "'<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'openstack' os_access['description'] = 'a test vim' assert", "== os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] for vim in", "ANY KIND, either express or implied. See the # License for the specific", "the # License for the specific language governing permissions and limitations # under", "except in compliance with the License. You may obtain # a copy of", "under the License. import pytest import time @pytest.mark.vim @pytest.mark.openstack @pytest.mark.vmware class TestClass(object): def", "def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-vmware' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] =", "= 'a test vim' for vim in vims: os_access['vim-type'] = vim['vim-type'] assert not", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type'] == 'vmware' assert resp['vim_url'] ==", "specific language governing permissions and limitations # under the License. import pytest import", "= 'pytest3' os_access['vim-type'] = 'vmware' os_access['description'] = 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access)", "test vim' for vim in vims: os_access['vim-type'] = vim['vim-type'] assert not osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name'])", "{} vim_name = 'helloworld-os' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>'", "= 'vmware' os_access['description'] = 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name']", "resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] for vim", "to in writing, software # distributed under the License is distributed on an", "limitations # under the License. import pytest import time @pytest.mark.vim @pytest.mark.openstack @pytest.mark.vmware class", "You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "'openstack' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name']", "not osm.get_api().vim.delete(vim_name) <EMAIL> def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access = {} vims = [ {'name': 'testvim1',", "os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type']", "= 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'openstack' os_access['description'] =", "assert resp['vim_type'] == 'openstack' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert", "{'name': 'testvim2','vim-type': 'vmware'} ] os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>'", "for vim in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name']) except: pass request.addfinalizer(teardown) @pytest.mark.openstack @pytest.mark.smoke def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "osm.get_api().vim.delete(vim['name']) except: pass request.addfinalizer(teardown) @pytest.mark.openstack @pytest.mark.smoke def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access = {} vim_name =", "# # All Rights Reserved. # # Licensed under the Apache License, Version", "required by applicable law or agreed to in writing, software # distributed under", "'vim-type': 'openstack'}, {'name': 'testvim2','vim-type': 'vmware'} ] os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password']", "not osm.get_api().vim.delete(vim_name) @pytest.mark.vmware #@<EMAIL> def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-vmware' os_access['vim-url']", "{} vim_name = 'helloworld-vmware' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>'", "<reponame>ayoubbargueoui1996/osm-devops # Copyright 2017 Sandvine # # All Rights Reserved. # # Licensed", "vim_name = 'helloworld-os' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name']", "cleanup_test_add_vim_account(self,osm,request): def teardown(): try: for vim in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name']) except: pass request.addfinalizer(teardown) @pytest.mark.openstack", "applicable law or agreed to in writing, software # distributed under the License", "'pytest3' os_access['vim-type'] = 'openstack' os_access['description'] = 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name)", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #", "#@<EMAIL> def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access = {} vim_name = 'helloworld-vmware' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username']", "OR CONDITIONS OF ANY KIND, either express or implied. See the # License", "osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type'] == 'openstack' assert resp['vim_url'] ==", "obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "'<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['description'] = 'a test vim' for vim in vims:", "'helloworld-vmware' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3'", "'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'openstack'", "assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) <EMAIL> def", "assert not osm.get_api().vim.list() @pytest.fixture(scope='function') def cleanup_test_add_vim_account(self,osm,request): def teardown(): try: for vim in osm.get_api().vim.list(False):", "'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'vmware'", "vims: os_access['vim-type'] = vim['vim-type'] assert not osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name']) assert resp['name'] == vim['name'] assert", "= vim['vim-type'] assert not osm.get_api().vim.create(vim['name'],os_access) resp=osm.get_api().vim.get(vim['name']) assert resp['name'] == vim['name'] assert resp['vim_type'] ==", "os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) <EMAIL>", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may #", "in compliance with the License. You may obtain # a copy of the", "# not use this file except in compliance with the License. You may", "os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) @pytest.mark.vmware #@<EMAIL> def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access = {} vim_name =", "or agreed to in writing, software # distributed under the License is distributed", "os_access = {} vims = [ {'name': 'testvim1', 'vim-type': 'openstack'}, {'name': 'testvim2','vim-type': 'vmware'}", "'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'openstack' os_access['description'] = 'a", "= {} vims = [ {'name': 'testvim1', 'vim-type': 'openstack'}, {'name': 'testvim2','vim-type': 'vmware'} ]", "os_access['vim-tenant-name'] = 'pytest3' os_access['description'] = 'a test vim' for vim in vims: os_access['vim-type']", "# License for the specific language governing permissions and limitations # under the", "not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name'] == vim_name assert resp['vim_type'] == 'openstack' assert resp['vim_url']", "assert resp['vim_type'] == 'vmware' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert", "def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access = {} vims = [ {'name': 'testvim1', 'vim-type': 'openstack'}, {'name':", "assert not osm.get_api().vim.delete(vim_name) <EMAIL> def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access = {} vims = [ {'name':", "= 'helloworld-os' os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] =", "osm.get_api().vim.delete(vim_name) <EMAIL> def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access = {} vims = [ {'name': 'testvim1', 'vim-type':", "# All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "@pytest.mark.openstack @pytest.mark.vmware class TestClass(object): def test_empty_vim(self,osm): assert not osm.get_api().vim.list() @pytest.fixture(scope='function') def cleanup_test_add_vim_account(self,osm,request): def", "= 'openstack' os_access['description'] = 'a test vim' assert not osm.get_api().vim.create(vim_name,os_access) resp=osm.get_api().vim.get(vim_name) assert resp['name']", "os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'vmware' os_access['description'] = 'a test", "under the Apache License, Version 2.0 (the \"License\"); you may # not use", "'vmware'} ] os_access['vim-url'] = 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] =", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) <EMAIL> def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account): os_access =", "{} vims = [ {'name': 'testvim1', 'vim-type': 'openstack'}, {'name': 'testvim2','vim-type': 'vmware'} ] os_access['vim-url']", "vim in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name']) except: pass request.addfinalizer(teardown) @pytest.mark.openstack @pytest.mark.smoke def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account): os_access =", "resp['vim_type'] == 'vmware' assert resp['vim_url'] == os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name']", "'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['description'] = 'a", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['description'] = 'a test", "== os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name)", "in writing, software # distributed under the License is distributed on an \"AS", "os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] assert not osm.get_api().vim.delete(vim_name) @pytest.mark.vmware #@<EMAIL> def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account): os_access", "= 'pytest3' os_access['description'] = 'a test vim' for vim in vims: os_access['vim-type'] =", "os_access['vim-url'] assert resp['vim_user'] == os_access['vim-username'] assert resp['vim_tenant_name'] == os_access['vim-tenant-name'] for vim in osm.get_api().vim.list(False):", "Version 2.0 (the \"License\"); you may # not use this file except in", "osm.get_api().vim.list() @pytest.fixture(scope='function') def cleanup_test_add_vim_account(self,osm,request): def teardown(): try: for vim in osm.get_api().vim.list(False): osm.get_api().vim.delete(vim['name']) except:", "= 'https://169.254.169.245/' os_access['vim-username'] = 'pytest2' os_access['vim-password'] = '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['description'] =", "= '<PASSWORD>' os_access['vim-tenant-name'] = 'pytest3' os_access['vim-type'] = 'vmware' os_access['description'] = 'a test vim'" ]
[ "i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def customer(idx): arrive = sim.now print('%7.4f Customer%02d: Here I am' %", "in range(NUM_CUSTOMERS): sim.process(customer, i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def customer(idx): arrive = sim.now print('%7.4f Customer%02d: Here", "simpy so that we can get the same results.\"\"\" RANDOM_SEED = 42 #", "else: print('%7.4f Customer%02d: Waited %6.3f' % (sim.now, idx, sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f Customer%02d: Finished'", "# random seed for repeatability NUM_CUSTOMERS = 5 # total number of customers", "number of customers INTV_CUSTOMERS = 10.0 # mean time between new customers MEAN_BANK_TIME", "def customer(idx): arrive = sim.now print('%7.4f Customer%02d: Here I am' % (arrive, idx))", "we can get the same results.\"\"\" RANDOM_SEED = 42 # random seed for", "sim.wait(counter, patience) if timedout: print('%7.4f Customer%02d: RENEGED after %6.3f' % (sim.now, idx, sim.now-arrive))", "% (arrive, idx)) patience = uniform(MIN_PATIENCE, MAX_PATIENCE) _, timedout = sim.wait(counter, patience) if", "idx, sim.now-arrive)) else: print('%7.4f Customer%02d: Waited %6.3f' % (sim.now, idx, sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f", "Here I am' % (arrive, idx)) patience = uniform(MIN_PATIENCE, MAX_PATIENCE) _, timedout =", "is modified from the simpy's bank renege example; we use the same settings", "uniform(MIN_PATIENCE, MAX_PATIENCE) _, timedout = sim.wait(counter, patience) if timedout: print('%7.4f Customer%02d: RENEGED after", "Customer%02d: Here I am' % (arrive, idx)) patience = uniform(MIN_PATIENCE, MAX_PATIENCE) _, timedout", "Waited %6.3f' % (sim.now, idx, sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f Customer%02d: Finished' % (sim.now, idx))", "bank for each customer MIN_PATIENCE = 1 # min customer patience MAX_PATIENCE =", "sim.now-arrive)) else: print('%7.4f Customer%02d: Waited %6.3f' % (sim.now, idx, sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f Customer%02d:", "max customer patience import simulus from random import seed, expovariate, uniform def source():", "timedout: print('%7.4f Customer%02d: RENEGED after %6.3f' % (sim.now, idx, sim.now-arrive)) else: print('%7.4f Customer%02d:", "mean time in bank for each customer MIN_PATIENCE = 1 # min customer", "seed, expovariate, uniform def source(): for i in range(NUM_CUSTOMERS): sim.process(customer, i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def", "def source(): for i in range(NUM_CUSTOMERS): sim.process(customer, i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def customer(idx): arrive =", "(sim.now, idx)) counter.release() print('Bank renege') seed(RANDOM_SEED) sim = simulus.simulator() counter = sim.resource() sim.process(source)", "5 # total number of customers INTV_CUSTOMERS = 10.0 # mean time between", "renege example; we use the same settings as simpy so that we can", "example is modified from the simpy's bank renege example; we use the same", "= 12.0 # mean time in bank for each customer MIN_PATIENCE = 1", "modified from the simpy's bank renege example; we use the same settings as", "print('%7.4f Customer%02d: Waited %6.3f' % (sim.now, idx, sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f Customer%02d: Finished' %", "for i in range(NUM_CUSTOMERS): sim.process(customer, i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def customer(idx): arrive = sim.now print('%7.4f", "can get the same results.\"\"\" RANDOM_SEED = 42 # random seed for repeatability", "customers MEAN_BANK_TIME = 12.0 # mean time in bank for each customer MIN_PATIENCE", "idx, sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f Customer%02d: Finished' % (sim.now, idx)) counter.release() print('Bank renege') seed(RANDOM_SEED)", "42 # random seed for repeatability NUM_CUSTOMERS = 5 # total number of", "expovariate, uniform def source(): for i in range(NUM_CUSTOMERS): sim.process(customer, i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def customer(idx):", "# total number of customers INTV_CUSTOMERS = 10.0 # mean time between new", "\"\"\"This example is modified from the simpy's bank renege example; we use the", "repeatability NUM_CUSTOMERS = 5 # total number of customers INTV_CUSTOMERS = 10.0 #", "patience MAX_PATIENCE = 3 # max customer patience import simulus from random import", "use the same settings as simpy so that we can get the same", "MAX_PATIENCE = 3 # max customer patience import simulus from random import seed,", "same results.\"\"\" RANDOM_SEED = 42 # random seed for repeatability NUM_CUSTOMERS = 5", "uniform def source(): for i in range(NUM_CUSTOMERS): sim.process(customer, i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def customer(idx): arrive", "from random import seed, expovariate, uniform def source(): for i in range(NUM_CUSTOMERS): sim.process(customer,", "_, timedout = sim.wait(counter, patience) if timedout: print('%7.4f Customer%02d: RENEGED after %6.3f' %", "= 1 # min customer patience MAX_PATIENCE = 3 # max customer patience", "random seed for repeatability NUM_CUSTOMERS = 5 # total number of customers INTV_CUSTOMERS", "for each customer MIN_PATIENCE = 1 # min customer patience MAX_PATIENCE = 3", "min customer patience MAX_PATIENCE = 3 # max customer patience import simulus from", "idx)) counter.release() print('Bank renege') seed(RANDOM_SEED) sim = simulus.simulator() counter = sim.resource() sim.process(source) sim.run()", "the same settings as simpy so that we can get the same results.\"\"\"", "RENEGED after %6.3f' % (sim.now, idx, sim.now-arrive)) else: print('%7.4f Customer%02d: Waited %6.3f' %", "customer MIN_PATIENCE = 1 # min customer patience MAX_PATIENCE = 3 # max", "(arrive, idx)) patience = uniform(MIN_PATIENCE, MAX_PATIENCE) _, timedout = sim.wait(counter, patience) if timedout:", "simpy's bank renege example; we use the same settings as simpy so that", "= sim.wait(counter, patience) if timedout: print('%7.4f Customer%02d: RENEGED after %6.3f' % (sim.now, idx,", "RANDOM_SEED = 42 # random seed for repeatability NUM_CUSTOMERS = 5 # total", "after %6.3f' % (sim.now, idx, sim.now-arrive)) else: print('%7.4f Customer%02d: Waited %6.3f' % (sim.now,", "1 # min customer patience MAX_PATIENCE = 3 # max customer patience import", "in bank for each customer MIN_PATIENCE = 1 # min customer patience MAX_PATIENCE", "of customers INTV_CUSTOMERS = 10.0 # mean time between new customers MEAN_BANK_TIME =", "= uniform(MIN_PATIENCE, MAX_PATIENCE) _, timedout = sim.wait(counter, patience) if timedout: print('%7.4f Customer%02d: RENEGED", "= 10.0 # mean time between new customers MEAN_BANK_TIME = 12.0 # mean", "sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f Customer%02d: Finished' % (sim.now, idx)) counter.release() print('Bank renege') seed(RANDOM_SEED) sim", "get the same results.\"\"\" RANDOM_SEED = 42 # random seed for repeatability NUM_CUSTOMERS", "settings as simpy so that we can get the same results.\"\"\" RANDOM_SEED =", "Customer%02d: RENEGED after %6.3f' % (sim.now, idx, sim.now-arrive)) else: print('%7.4f Customer%02d: Waited %6.3f'", "patience import simulus from random import seed, expovariate, uniform def source(): for i", "patience = uniform(MIN_PATIENCE, MAX_PATIENCE) _, timedout = sim.wait(counter, patience) if timedout: print('%7.4f Customer%02d:", "sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def customer(idx): arrive = sim.now print('%7.4f Customer%02d: Here I am' % (arrive,", "sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f Customer%02d: Finished' % (sim.now, idx)) counter.release() print('Bank renege') seed(RANDOM_SEED) sim =", "if timedout: print('%7.4f Customer%02d: RENEGED after %6.3f' % (sim.now, idx, sim.now-arrive)) else: print('%7.4f", "% (sim.now, idx)) counter.release() print('Bank renege') seed(RANDOM_SEED) sim = simulus.simulator() counter = sim.resource()", "the same results.\"\"\" RANDOM_SEED = 42 # random seed for repeatability NUM_CUSTOMERS =", "# mean time between new customers MEAN_BANK_TIME = 12.0 # mean time in", "idx)) patience = uniform(MIN_PATIENCE, MAX_PATIENCE) _, timedout = sim.wait(counter, patience) if timedout: print('%7.4f", "(sim.now, idx, sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f Customer%02d: Finished' % (sim.now, idx)) counter.release() print('Bank renege')", "between new customers MEAN_BANK_TIME = 12.0 # mean time in bank for each", "12.0 # mean time in bank for each customer MIN_PATIENCE = 1 #", "same settings as simpy so that we can get the same results.\"\"\" RANDOM_SEED", "mean time between new customers MEAN_BANK_TIME = 12.0 # mean time in bank", "(sim.now, idx, sim.now-arrive)) else: print('%7.4f Customer%02d: Waited %6.3f' % (sim.now, idx, sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME))", "new customers MEAN_BANK_TIME = 12.0 # mean time in bank for each customer", "that we can get the same results.\"\"\" RANDOM_SEED = 42 # random seed", "source(): for i in range(NUM_CUSTOMERS): sim.process(customer, i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def customer(idx): arrive = sim.now", "# mean time in bank for each customer MIN_PATIENCE = 1 # min", "time in bank for each customer MIN_PATIENCE = 1 # min customer patience", "print('%7.4f Customer%02d: RENEGED after %6.3f' % (sim.now, idx, sim.now-arrive)) else: print('%7.4f Customer%02d: Waited", "results.\"\"\" RANDOM_SEED = 42 # random seed for repeatability NUM_CUSTOMERS = 5 #", "customer(idx): arrive = sim.now print('%7.4f Customer%02d: Here I am' % (arrive, idx)) patience", "customer patience MAX_PATIENCE = 3 # max customer patience import simulus from random", "the simpy's bank renege example; we use the same settings as simpy so", "= 5 # total number of customers INTV_CUSTOMERS = 10.0 # mean time", "import seed, expovariate, uniform def source(): for i in range(NUM_CUSTOMERS): sim.process(customer, i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS))", "Finished' % (sim.now, idx)) counter.release() print('Bank renege') seed(RANDOM_SEED) sim = simulus.simulator() counter =", "= sim.now print('%7.4f Customer%02d: Here I am' % (arrive, idx)) patience = uniform(MIN_PATIENCE,", "as simpy so that we can get the same results.\"\"\" RANDOM_SEED = 42", "so that we can get the same results.\"\"\" RANDOM_SEED = 42 # random", "import simulus from random import seed, expovariate, uniform def source(): for i in", "= 3 # max customer patience import simulus from random import seed, expovariate,", "simulus from random import seed, expovariate, uniform def source(): for i in range(NUM_CUSTOMERS):", "% (sim.now, idx, sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f Customer%02d: Finished' % (sim.now, idx)) counter.release() print('Bank", "customers INTV_CUSTOMERS = 10.0 # mean time between new customers MEAN_BANK_TIME = 12.0", "10.0 # mean time between new customers MEAN_BANK_TIME = 12.0 # mean time", "am' % (arrive, idx)) patience = uniform(MIN_PATIENCE, MAX_PATIENCE) _, timedout = sim.wait(counter, patience)", "random import seed, expovariate, uniform def source(): for i in range(NUM_CUSTOMERS): sim.process(customer, i)", "= 42 # random seed for repeatability NUM_CUSTOMERS = 5 # total number", "patience) if timedout: print('%7.4f Customer%02d: RENEGED after %6.3f' % (sim.now, idx, sim.now-arrive)) else:", "<gh_stars>1-10 \"\"\"This example is modified from the simpy's bank renege example; we use", "example; we use the same settings as simpy so that we can get", "MEAN_BANK_TIME = 12.0 # mean time in bank for each customer MIN_PATIENCE =", "print('%7.4f Customer%02d: Finished' % (sim.now, idx)) counter.release() print('Bank renege') seed(RANDOM_SEED) sim = simulus.simulator()", "%6.3f' % (sim.now, idx, sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f Customer%02d: Finished' % (sim.now, idx)) counter.release()", "NUM_CUSTOMERS = 5 # total number of customers INTV_CUSTOMERS = 10.0 # mean", "time between new customers MEAN_BANK_TIME = 12.0 # mean time in bank for", "timedout = sim.wait(counter, patience) if timedout: print('%7.4f Customer%02d: RENEGED after %6.3f' % (sim.now,", "seed for repeatability NUM_CUSTOMERS = 5 # total number of customers INTV_CUSTOMERS =", "i in range(NUM_CUSTOMERS): sim.process(customer, i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def customer(idx): arrive = sim.now print('%7.4f Customer%02d:", "print('%7.4f Customer%02d: Here I am' % (arrive, idx)) patience = uniform(MIN_PATIENCE, MAX_PATIENCE) _,", "# min customer patience MAX_PATIENCE = 3 # max customer patience import simulus", "# max customer patience import simulus from random import seed, expovariate, uniform def", "I am' % (arrive, idx)) patience = uniform(MIN_PATIENCE, MAX_PATIENCE) _, timedout = sim.wait(counter,", "MAX_PATIENCE) _, timedout = sim.wait(counter, patience) if timedout: print('%7.4f Customer%02d: RENEGED after %6.3f'", "Customer%02d: Waited %6.3f' % (sim.now, idx, sim.now-arrive)) sim.sleep(expovariate(1.0/MEAN_BANK_TIME)) print('%7.4f Customer%02d: Finished' % (sim.now,", "from the simpy's bank renege example; we use the same settings as simpy", "total number of customers INTV_CUSTOMERS = 10.0 # mean time between new customers", "for repeatability NUM_CUSTOMERS = 5 # total number of customers INTV_CUSTOMERS = 10.0", "INTV_CUSTOMERS = 10.0 # mean time between new customers MEAN_BANK_TIME = 12.0 #", "% (sim.now, idx, sim.now-arrive)) else: print('%7.4f Customer%02d: Waited %6.3f' % (sim.now, idx, sim.now-arrive))", "customer patience import simulus from random import seed, expovariate, uniform def source(): for", "Customer%02d: Finished' % (sim.now, idx)) counter.release() print('Bank renege') seed(RANDOM_SEED) sim = simulus.simulator() counter", "each customer MIN_PATIENCE = 1 # min customer patience MAX_PATIENCE = 3 #", "sim.process(customer, i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def customer(idx): arrive = sim.now print('%7.4f Customer%02d: Here I am'", "3 # max customer patience import simulus from random import seed, expovariate, uniform", "%6.3f' % (sim.now, idx, sim.now-arrive)) else: print('%7.4f Customer%02d: Waited %6.3f' % (sim.now, idx,", "bank renege example; we use the same settings as simpy so that we", "arrive = sim.now print('%7.4f Customer%02d: Here I am' % (arrive, idx)) patience =", "range(NUM_CUSTOMERS): sim.process(customer, i) sim.sleep(expovariate(1.0/INTV_CUSTOMERS)) def customer(idx): arrive = sim.now print('%7.4f Customer%02d: Here I", "we use the same settings as simpy so that we can get the", "MIN_PATIENCE = 1 # min customer patience MAX_PATIENCE = 3 # max customer", "sim.now print('%7.4f Customer%02d: Here I am' % (arrive, idx)) patience = uniform(MIN_PATIENCE, MAX_PATIENCE)" ]
[ "\"\"\" Class for generating the lazy cartesian product \"\"\" def __init__(self, sets: List[List[Any]]):", "self.__max_size = 0 self.__factors = [0] * len(self.__sets) self.__modulo = [0] * len(self.__sets)", "-> None: fac = 1 self.__max_size = 1 for i in range(len(self.__sets) -", "in range(len(self.__sets) - 1, -1, -1): items = len(self.__sets[i]) self.__max_size *= items self.__factors[i]", "or n >= self.__max_size: raise ValueError(\"Invalid value of n\") res = [] for", "* class LazyCartesianProduct: \"\"\" Class for generating the lazy cartesian product \"\"\" def", "\"\"\" def __init__(self, sets: List[List[Any]]): self.__sets = sets self.__max_size = 0 self.__factors =", "self.__factors[i] = fac self.__modulo[i] = items fac *= items def get_nth_element(self, n: int)", "fac *= items def get_nth_element(self, n: int) -> List[Any]: if n < 0", "\"\"\" from typing import List, Any from bigfloat import * class LazyCartesianProduct: \"\"\"", "= 1 for i in range(len(self.__sets) - 1, -1, -1): items = len(self.__sets[i])", "self.__compute() @property def max_size(self): return self.__max_size def __compute(self) -> None: fac = 1", "i in range(len(self.__sets) - 1, -1, -1): items = len(self.__sets[i]) self.__max_size *= items", "= len(self.__sets[i]) self.__max_size *= items self.__factors[i] = fac self.__modulo[i] = items fac *=", "ValueError(\"Invalid value of n\") res = [] for i in range(len(self.__sets)): res.append(self.__sets[i][int(mod(div(BigFloat(n), BigFloat(self.__factors[i])),", "@author: <NAME> @email: <EMAIL> @date: 04.04.2020 17:34 \"\"\" from typing import List, Any", "0 self.__factors = [0] * len(self.__sets) self.__modulo = [0] * len(self.__sets) self.__compute() @property", "n < 0 or n >= self.__max_size: raise ValueError(\"Invalid value of n\") res", "@date: 04.04.2020 17:34 \"\"\" from typing import List, Any from bigfloat import *", "= sets self.__max_size = 0 self.__factors = [0] * len(self.__sets) self.__modulo = [0]", "max_size(self): return self.__max_size def __compute(self) -> None: fac = 1 self.__max_size = 1", "self.__max_size def __compute(self) -> None: fac = 1 self.__max_size = 1 for i", "raise ValueError(\"Invalid value of n\") res = [] for i in range(len(self.__sets)): res.append(self.__sets[i][int(mod(div(BigFloat(n),", "sets: List[List[Any]]): self.__sets = sets self.__max_size = 0 self.__factors = [0] * len(self.__sets)", "fac = 1 self.__max_size = 1 for i in range(len(self.__sets) - 1, -1,", "items self.__factors[i] = fac self.__modulo[i] = items fac *= items def get_nth_element(self, n:", "self.__max_size: raise ValueError(\"Invalid value of n\") res = [] for i in range(len(self.__sets)):", "LazyCartesianProduct: \"\"\" Class for generating the lazy cartesian product \"\"\" def __init__(self, sets:", "self.__max_size *= items self.__factors[i] = fac self.__modulo[i] = items fac *= items def", "1 self.__max_size = 1 for i in range(len(self.__sets) - 1, -1, -1): items", "self.__factors = [0] * len(self.__sets) self.__modulo = [0] * len(self.__sets) self.__compute() @property def", "for i in range(len(self.__sets) - 1, -1, -1): items = len(self.__sets[i]) self.__max_size *=", "-1): items = len(self.__sets[i]) self.__max_size *= items self.__factors[i] = fac self.__modulo[i] = items", "lazy cartesian product \"\"\" def __init__(self, sets: List[List[Any]]): self.__sets = sets self.__max_size =", "generating the lazy cartesian product \"\"\" def __init__(self, sets: List[List[Any]]): self.__sets = sets", "len(self.__sets[i]) self.__max_size *= items self.__factors[i] = fac self.__modulo[i] = items fac *= items", ">= self.__max_size: raise ValueError(\"Invalid value of n\") res = [] for i in", "class LazyCartesianProduct: \"\"\" Class for generating the lazy cartesian product \"\"\" def __init__(self,", "04.04.2020 17:34 \"\"\" from typing import List, Any from bigfloat import * class", "= fac self.__modulo[i] = items fac *= items def get_nth_element(self, n: int) ->", "-1, -1): items = len(self.__sets[i]) self.__max_size *= items self.__factors[i] = fac self.__modulo[i] =", "<NAME> @email: <EMAIL> @date: 04.04.2020 17:34 \"\"\" from typing import List, Any from", "of n\") res = [] for i in range(len(self.__sets)): res.append(self.__sets[i][int(mod(div(BigFloat(n), BigFloat(self.__factors[i])), self.__modulo[i]))]) return", "n >= self.__max_size: raise ValueError(\"Invalid value of n\") res = [] for i", "int) -> List[Any]: if n < 0 or n >= self.__max_size: raise ValueError(\"Invalid", "len(self.__sets) self.__modulo = [0] * len(self.__sets) self.__compute() @property def max_size(self): return self.__max_size def", "cartesian product \"\"\" def __init__(self, sets: List[List[Any]]): self.__sets = sets self.__max_size = 0", "len(self.__sets) self.__compute() @property def max_size(self): return self.__max_size def __compute(self) -> None: fac =", "__init__(self, sets: List[List[Any]]): self.__sets = sets self.__max_size = 0 self.__factors = [0] *", "= [0] * len(self.__sets) self.__modulo = [0] * len(self.__sets) self.__compute() @property def max_size(self):", "n: int) -> List[Any]: if n < 0 or n >= self.__max_size: raise", "fac self.__modulo[i] = items fac *= items def get_nth_element(self, n: int) -> List[Any]:", "return self.__max_size def __compute(self) -> None: fac = 1 self.__max_size = 1 for", "< 0 or n >= self.__max_size: raise ValueError(\"Invalid value of n\") res =", "0 or n >= self.__max_size: raise ValueError(\"Invalid value of n\") res = []", "* len(self.__sets) self.__modulo = [0] * len(self.__sets) self.__compute() @property def max_size(self): return self.__max_size", "sets self.__max_size = 0 self.__factors = [0] * len(self.__sets) self.__modulo = [0] *", "17:34 \"\"\" from typing import List, Any from bigfloat import * class LazyCartesianProduct:", "\"\"\" @author: <NAME> @email: <EMAIL> @date: 04.04.2020 17:34 \"\"\" from typing import List,", "from typing import List, Any from bigfloat import * class LazyCartesianProduct: \"\"\" Class", "<reponame>GeorgianBadita/Dronem-gym-envirnoment \"\"\" @author: <NAME> @email: <EMAIL> @date: 04.04.2020 17:34 \"\"\" from typing import", "-> List[Any]: if n < 0 or n >= self.__max_size: raise ValueError(\"Invalid value", "List, Any from bigfloat import * class LazyCartesianProduct: \"\"\" Class for generating the", "*= items def get_nth_element(self, n: int) -> List[Any]: if n < 0 or", "items fac *= items def get_nth_element(self, n: int) -> List[Any]: if n <", "*= items self.__factors[i] = fac self.__modulo[i] = items fac *= items def get_nth_element(self,", "bigfloat import * class LazyCartesianProduct: \"\"\" Class for generating the lazy cartesian product", "self.__sets = sets self.__max_size = 0 self.__factors = [0] * len(self.__sets) self.__modulo =", "[0] * len(self.__sets) self.__compute() @property def max_size(self): return self.__max_size def __compute(self) -> None:", "def __compute(self) -> None: fac = 1 self.__max_size = 1 for i in", "for generating the lazy cartesian product \"\"\" def __init__(self, sets: List[List[Any]]): self.__sets =", "@email: <EMAIL> @date: 04.04.2020 17:34 \"\"\" from typing import List, Any from bigfloat", "self.__modulo[i] = items fac *= items def get_nth_element(self, n: int) -> List[Any]: if", "- 1, -1, -1): items = len(self.__sets[i]) self.__max_size *= items self.__factors[i] = fac", "@property def max_size(self): return self.__max_size def __compute(self) -> None: fac = 1 self.__max_size", "items def get_nth_element(self, n: int) -> List[Any]: if n < 0 or n", "__compute(self) -> None: fac = 1 self.__max_size = 1 for i in range(len(self.__sets)", "items = len(self.__sets[i]) self.__max_size *= items self.__factors[i] = fac self.__modulo[i] = items fac", "= 0 self.__factors = [0] * len(self.__sets) self.__modulo = [0] * len(self.__sets) self.__compute()", "<EMAIL> @date: 04.04.2020 17:34 \"\"\" from typing import List, Any from bigfloat import", "from bigfloat import * class LazyCartesianProduct: \"\"\" Class for generating the lazy cartesian", "None: fac = 1 self.__max_size = 1 for i in range(len(self.__sets) - 1,", "[0] * len(self.__sets) self.__modulo = [0] * len(self.__sets) self.__compute() @property def max_size(self): return", "n\") res = [] for i in range(len(self.__sets)): res.append(self.__sets[i][int(mod(div(BigFloat(n), BigFloat(self.__factors[i])), self.__modulo[i]))]) return res", "get_nth_element(self, n: int) -> List[Any]: if n < 0 or n >= self.__max_size:", "1 for i in range(len(self.__sets) - 1, -1, -1): items = len(self.__sets[i]) self.__max_size", "range(len(self.__sets) - 1, -1, -1): items = len(self.__sets[i]) self.__max_size *= items self.__factors[i] =", "the lazy cartesian product \"\"\" def __init__(self, sets: List[List[Any]]): self.__sets = sets self.__max_size", "List[List[Any]]): self.__sets = sets self.__max_size = 0 self.__factors = [0] * len(self.__sets) self.__modulo", "def max_size(self): return self.__max_size def __compute(self) -> None: fac = 1 self.__max_size =", "def get_nth_element(self, n: int) -> List[Any]: if n < 0 or n >=", "List[Any]: if n < 0 or n >= self.__max_size: raise ValueError(\"Invalid value of", "value of n\") res = [] for i in range(len(self.__sets)): res.append(self.__sets[i][int(mod(div(BigFloat(n), BigFloat(self.__factors[i])), self.__modulo[i]))])", "= items fac *= items def get_nth_element(self, n: int) -> List[Any]: if n", "def __init__(self, sets: List[List[Any]]): self.__sets = sets self.__max_size = 0 self.__factors = [0]", "self.__modulo = [0] * len(self.__sets) self.__compute() @property def max_size(self): return self.__max_size def __compute(self)", "product \"\"\" def __init__(self, sets: List[List[Any]]): self.__sets = sets self.__max_size = 0 self.__factors", "typing import List, Any from bigfloat import * class LazyCartesianProduct: \"\"\" Class for", "* len(self.__sets) self.__compute() @property def max_size(self): return self.__max_size def __compute(self) -> None: fac", "if n < 0 or n >= self.__max_size: raise ValueError(\"Invalid value of n\")", "import List, Any from bigfloat import * class LazyCartesianProduct: \"\"\" Class for generating", "import * class LazyCartesianProduct: \"\"\" Class for generating the lazy cartesian product \"\"\"", "Any from bigfloat import * class LazyCartesianProduct: \"\"\" Class for generating the lazy", "= 1 self.__max_size = 1 for i in range(len(self.__sets) - 1, -1, -1):", "self.__max_size = 1 for i in range(len(self.__sets) - 1, -1, -1): items =", "Class for generating the lazy cartesian product \"\"\" def __init__(self, sets: List[List[Any]]): self.__sets", "= [0] * len(self.__sets) self.__compute() @property def max_size(self): return self.__max_size def __compute(self) ->", "1, -1, -1): items = len(self.__sets[i]) self.__max_size *= items self.__factors[i] = fac self.__modulo[i]" ]