content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
""" Use repro_eval from the command line with e.g. python -m repro_eval -t rpd -q qrel_orig -r orig_b rpd_b python -m repro_eval -t rpd -q qrel_orig -r orig_b orig_a rpd_b rpd_a python -m repro_eval -t rpd -m rmse -q qrel_orig -r orig_b rpd_b python -m repro_eval -t rpl -q qrel_orig qrel_rpl -r orig_b rpl_b python -m repro_eval -t rpl -q qrel_orig qrel_rpl -r orig_b orig_a rpl_b rpl_a after having installed the Python package. For other more specific examples also have a look at the README file. Depending on the provided parameters and input run files, evaluation measures will be printed. """ import argparse from repro_eval.Evaluator import RpdEvaluator, RplEvaluator from repro_eval.util import print_simple_line, print_base_adv from repro_eval.util import arp if __name__ == "__main__": main()
[ 37811, 198, 11041, 43969, 62, 18206, 422, 262, 3141, 1627, 351, 304, 13, 70, 13, 198, 198, 29412, 532, 76, 43969, 62, 18206, 532, 83, 374, 30094, 532, 80, 10662, 2411, 62, 11612, 532, 81, 1796, 62, 65, 374, 30094, 62, 65, 198, 198...
2.708609
302
import math from game_objects import Turret, Troop players = [] def calculate_distance(entity1: Location, entity2: Location) -> float: # distance between vectors: https://brilliant.org/wiki/distance-formula/ distance = math.sqrt((entity1.x - entity2.x) ** 2 + (entity1.y + entity2.y) ** 2) return distance init() while True: # most of this is pseudocode, as I have no way of handling user input currently for index, player in enumerate(players): if 'player places turret': player.add_turret(Location(1, 1)) if 'player places troops': player.add_troops('bla') for troop in player.troops: troop.move() player.turret_fire_check() if player.health <= 0: print(f'Player {index} won the game!')
[ 11748, 10688, 198, 198, 6738, 983, 62, 48205, 1330, 3831, 1186, 11, 8498, 404, 198, 198, 32399, 796, 17635, 628, 628, 198, 4299, 15284, 62, 30246, 7, 26858, 16, 25, 13397, 11, 9312, 17, 25, 13397, 8, 4613, 12178, 25, 198, 220, 220, ...
2.539185
319
import os from pathlib import Path from difflib import SequenceMatcher supported_bibtex_types = {"article", "book", "booklet", "inbook", "incollection", "inproceedings", "manual", "mastersthesis", "misc", "phdthesis", "proceedings", "techreport", "unpublished"} supported_fields = ["author", "title", "year", "month", "pages", "note", "journal", "booktitle", "volume", "number", "series", "edition", "editor", "publisher", "address", "howpublished", "type", "chapter", "organization", "school", "institution"] extra_fields = ["doi", "issn", "isbn", "keywords", "abstract", "url", "archivePrefix", "eprint", "timestamp", "biburl", "bibsource"] data_path = Path("_data/papers.yml") bib_path = Path("bibfiles") year_from = 2017 similarity_threshold = 0.8 def find_all_files(path_to_search): """Recursively find all bib files in root path given""" list_of_files = os.listdir(path_to_search) all_files = [] # Iterate over all the entries for e in list_of_files: # Create full path full_path = path_to_search / e # If entry is a directory then get the list of files in this directory if os.path.isdir(full_path): all_files = all_files + find_all_files(full_path) elif full_path.with_suffix(".bib"): all_files.append(full_path) return all_files def process_entry(entry_to_process): """ Turns a string of an entry into a dictionary mapping from fields to field values :param entry_to_process :return: dictionary. """ dict_entry = {} entry_lines = entry_to_process.split("\n") first_line = entry_lines[0].split("=") entry_type = first_line[0].replace("@", "") entry_id = first_line[1] # Type validation if entry_type.lower() not in supported_bibtex_types: print("Type " + entry_type + " not supported for bibtex entry " + entry_id) return dict_entry dict_entry["id"] = entry_id dict_entry["type"] = entry_type # Process the rest of the fields field_value = "" # Keep this up here to be able to access previous values in case of multi-line field field = "" for l in entry_lines: split_line = l.split("=") if len(split_line) == 1 and field != "": # No = found on this line, it's a multi-line field field_value += " " + split_line[0].strip() dict_entry[field] = field_value.strip() else: field = split_line[0].strip() field_value = split_line[1].strip() if field.lower() in supported_fields or field.lower() in extra_fields: if field.lower() == "pages" and "--" not in field_value: field_value = field_value.replace("-", "--") dict_entry[field] = field_value # Try to find pdf of this paper pdf = find_pdf(entry_id, dict_entry["year"]) dict_entry["pdf"] = str(pdf).lower() return dict_entry def find_pdf(entry_id, year): """ Returns true if a pdf for this paper exists in the pdf/pub/year directory (must have name as paper ID) """ return os.path.isfile("pdf/pub/" + year + "/" + entry_id + ".pdf") def output_entries(entries): """ Prints the given bibtex entries into yaml supported format """ with open(data_path.absolute(), 'w+', encoding='utf-8') as wf: for entry in entries: if int(entry["year"]) < year_from: continue wf.write("- id: " + entry["id"] + "\n") for e in entry: if e != "id": if ":" in entry[e]: entry[e] = '"' + entry[e] + '"' wf.write(" " + e + ": " + entry[e] + "\n") def check_equality(entry1, entry2): """ Checks if 2 entries are the same """ sim_fields = 0 common_fields = 0 for field1 in entry1: for field2 in entry2: if field1 == field2: common_fields += 1 if similar(entry1[field1], entry2[field2]) >= similarity_threshold: sim_fields += 1 if common_fields == 0: return False if sim_fields / common_fields >= similarity_threshold: return True return False def similar(a, b): """ Checks if 2 strings are similar, returns a similarity measure. """ return SequenceMatcher(None, a, b).ratio() def process_yml_entries(lines): """ Processes entries in yml format :param lines: list of lines from yml file to process :return: list of entries as dictionaries """ entry_list = [] entry = {} ln = 0 for line in lines: if "- id:" in line or ln == len(lines) - 1: # Starting a new entry if len(entry) > 0: entry_list.append(entry) entry = {} line = line.replace("\"", "") if "- id:" in line: line = line[1:] # Ignore first dash stripped_line = line.strip() if stripped_line != "": # Adding to current entry split_line = stripped_line.split(':') entry[split_line[0].strip()] = ':'.join(split_line[1:]).strip() ln += 1 return entry_list def main(): """ Main function to process bibtex entries in a given path and output a file in yaml supported format. """ # Read in current entries lines = data_path.read_text(encoding='utf-8').split('\n') entries = process_yml_entries(lines) # Find new entries files = find_all_files(bib_path) for bibfile in files: entry = "" full_pth = Path(bibfile) lines = full_pth.read_text(encoding='utf-8').split('\n') line_number = 0 for line in lines: if "@" in line or line_number == len(lines)-1: # Starting a new entry if entry != "": entry = entry.translate({ord(c): None for c in '\\"{}~\'"'}) processed_entry = process_entry(entry) entries.append(processed_entry) entry = "" if "@" in line: line = line.replace("{", "=") stripped_line = line.strip() if stripped_line != "": # Adding to current entry if stripped_line.endswith(","): stripped_line = stripped_line[:-1] entry += stripped_line + "\n" line_number += 1 # Check for duplication duplicate_entries = [] for i in range(len(entries)-1): for j in range(i+1, len(entries)): if check_equality(entries[i], entries[j]): print("Duplicate found: " + entries[i]["id"] + " = " + entries[j]["id"]) duplicate_entries.append(j) duplicate_entries.sort() for i in range(len(duplicate_entries)): e = duplicate_entries[i] - i del entries[e] # Finally, save entries output_entries(entries) if __name__ == "__main__": main()
[ 11748, 28686, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 814, 8019, 1330, 45835, 19044, 2044, 628, 198, 15999, 62, 65, 571, 16886, 62, 19199, 796, 19779, 20205, 1600, 366, 2070, 1600, 366, 2070, 1616, 1600, 366, 259, 2070, 1600, 366...
2.235146
3,181
from selenium import webdriver import time import userdata as udata import random randomUsers = set()
[ 6738, 384, 11925, 1505, 1330, 3992, 26230, 201, 198, 11748, 640, 201, 198, 11748, 2836, 7890, 355, 334, 7890, 201, 198, 11748, 4738, 201, 198, 201, 198, 25120, 14490, 796, 900, 3419, 201, 198, 201, 198, 220, 220, 220, 220, 220, 220, ...
2.388889
54
''' Title : Linear Algebra Subdomain : Numpy Domain : Python Author : codeperfectplus Created : 10 May 2020 ''' import numpy n=int(input()) a=numpy.array([input().split() for _ in range(n)],float) print(round(numpy.linalg.det(a),2))
[ 7061, 6, 198, 19160, 220, 220, 220, 220, 1058, 44800, 978, 29230, 198, 7004, 27830, 1058, 399, 32152, 198, 43961, 220, 220, 220, 1058, 11361, 198, 13838, 220, 220, 220, 1058, 2438, 25833, 9541, 198, 41972, 220, 220, 1058, 838, 1737, 1...
2.510204
98
# -*- coding: utf-8 -*- import numpy as np from ..dim_processors_base import DimProcessorBase from ...registry import DIMPROCESSORS from sklearn.preprocessing import normalize from typing import Dict, List
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 11485, 27740, 62, 14681, 669, 62, 8692, 1330, 14048, 18709, 273, 14881, 198, 6738, 2644, 2301, 4592, 1330, 360, 3955, ...
3.119403
67
import csv import person from random import randrange headers = ['Name', 'Messages', 'Char Count', 'Likes Given', 'Likes Received', 'Image URL'] #tester code people = ['bob', 'joe', 'gmo'] bob = person.Person(111, 'bob', 'www.bob.com', people) joe = person.Person(222, 'joe', 'www.joe.com', people) gmo = person.Person(333, 'gmo', 'www.gmo.com', people) members = [bob, joe, gmo] bob.msgs = randrange(40) bob.likes_given = randrange(40) bob.likes_received = randrange(40) bob.chars = randrange(40) bob.friends['gmo'] = randrange(40) bob.friends['joe'] = randrange(40) bob.friends['bob'] = randrange(40) joe.msgs = randrange(40) joe.likes_given = randrange(40) joe.likes_received = randrange(40) joe.chars = randrange(40) joe.friends['gmo'] = randrange(40) joe.friends['joe'] = randrange(40) joe.friends['bob'] = randrange(40) gmo.msgs = randrange(40) gmo.likes_given = randrange(40) gmo.likes_received = randrange(40) gmo.chars = randrange(40) gmo.friends['gmo'] = randrange(40) gmo.friends['joe'] = randrange(40) gmo.friends['bob'] = randrange(40) # loop through the list of members and add their names to the headers for member in members: headers.append(member.name) with open('raw_groupme_data.csv', 'w') as csv_file: csv_writer = csv.writer(csv_file) csv_writer.writerow(headers) for member in members: row = [member.name, member.msgs, member.chars, member.likes_given, member.likes_received, member.image_url] for friend in member.friends: row.append(member.friends[friend]) csv_writer.writerow(row)
[ 11748, 269, 21370, 201, 198, 11748, 1048, 201, 198, 6738, 4738, 1330, 43720, 9521, 201, 198, 201, 198, 50145, 796, 37250, 5376, 3256, 705, 36479, 1095, 3256, 705, 12441, 2764, 3256, 705, 43, 7938, 11259, 3256, 705, 43, 7938, 20557, 3256...
2.310198
706
import pandas as pd from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin import numpy as np import matplotlib.pyplot as plt from hydroDL.post import axplot, figplot from hydroDL import kPath, utils import json import os import importlib from hydroDL.master import basinFull from hydroDL.app.waterQuality import WRTDS # dataName = 'G200N' # labelLst = ['QFPRT2C', 'FPRT2C', 'FPRT2QC', 'QFPT2C', 'QFRT2C'] dataName = 'G200' labelLst = ['QFPRT2C'] trainLst = ['rmR20', 'rmL20', 'rmRT20', 'rmYr5', 'B10'] testLst = ['pkR20', 'pkL20', 'pkRT20', 'pkYr5', 'A10'] DF = dbBasin.DataFrameBasin(dataName) for label in labelLst: for trainSet, testSet in zip(trainLst, testLst): outName = '{}-{}-{}'.format(dataName, label, trainSet) print(outName) yP, ycP = basinFull.testModel(outName, DF=DF, testSet=testSet, ep=500)
[ 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 17173, 19260, 13, 7890, 1330, 514, 14542, 11, 308, 496, 3978, 11, 10706, 47123, 11, 299, 34106, 11, 10188, 10705, 11, 6121, 11, 20613, 15522, 259, 198, 11748, 299, 32152, 355, 45941, 19...
2.309333
375
import math as m import numpy as np from BDMesh import Mesh1DUniform from BDFunction1D import Function from BDFunction1D.Functional import Functional from BDFunction1D.Interpolation import InterpolateFunction from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_arrays from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_mesh_arrays from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_mesh from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_recurrent_mesh from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_amr import unittest
[ 11748, 10688, 355, 285, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 347, 23127, 5069, 1330, 47529, 16, 35, 3118, 6933, 198, 6738, 347, 8068, 4575, 16, 35, 1330, 15553, 198, 6738, 347, 8068, 4575, 16, 35, 13, 22203, 282, 1330, ...
2.837121
264
import argparse import gzip import os import pytest from ..dumpSTR import * from trtools.testsupport.utils import assert_same_vcf, assert_same_file # Set up base argparser # Test no such file or directory def test_WrongFile(args, testDumpSTRdir): fname = os.path.join(testDumpSTRdir, "test_non_existent.vcf") if os.path.exists(fname): os.remove(fname) args.vcf = fname retcode = main(args) assert retcode==1 # Test a file that already has Filter IDs defined # that we want to use that are of either the wrong number of type. # Since cyvcf2 currently won't allow us to overwrite them, # error out # Test a file that already has a HWE Filter ID defined # if the field is of the correct type and number, as in this case # we overwrite it and emit a warning instead of failing # this allows dumpSTR to be run multiple times in succession # on the same file # Test if basic inputs and threshold filters work for each file # confirm that producing zipped output doesn't crash # Test invalid options # Test locus-level filters """ def test_InvalidEHOptions(args, testDumpSTRdir): fname = os.path.join(testDumpSTRdir, "test_ExpansionHunter.vcf") args.vcf = fname args.num_records = 10 # TODO add once EH is implemented """ """ These tests run dumpSTR and compare its output to output that has been generated by a pervious version of dumpSTR and saved in the repo. The results are expected to be identical. These tests are too strict and will often break because dumpSTR output has been intentionally changed However, the presence of these tests is important because it should prevent any unexpected changes in output. If you've reviewed the change in output and find it acceptable, use trtools/testsupport/sample_vcfs/dumpSTR_vcfs/create_test_files.sh to regenerate the tests files with the new output. """ # make sure locus level filters produce the same output when # --drop-filtered is set # test advntr call level filters # test hipstr call and locus level filters # test gangstr call level filters that don't begin # with 'expansion' - those are tested on another file # test gangstr call level filters that begin with # 'expansion' - the other gangstr call level filters # are tested on another file # test popstr call level filters
[ 11748, 1822, 29572, 198, 11748, 308, 13344, 198, 11748, 28686, 198, 198, 11748, 12972, 9288, 198, 198, 6738, 11485, 39455, 18601, 1330, 1635, 198, 6738, 491, 31391, 13, 9288, 11284, 13, 26791, 1330, 6818, 62, 31642, 62, 85, 12993, 11, 6...
3.478916
664
begin_unit comment|'# Copyright 2012 Andrew Bogott for the Wikimedia Foundation' nl|'\n' comment|'#' nl|'\n' comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may' nl|'\n' comment|'# not use this file except in compliance with the License. You may obtain' nl|'\n' comment|'# a copy of the License at' nl|'\n' comment|'#' nl|'\n' comment|'# http://www.apache.org/licenses/LICENSE-2.0' nl|'\n' comment|'#' nl|'\n' comment|'# Unless required by applicable law or agreed to in writing, software' nl|'\n' comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT' nl|'\n' comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the' nl|'\n' comment|'# License for the specific language governing permissions and limitations' nl|'\n' comment|'# under the License.' nl|'\n' nl|'\n' name|'try' op|':' newline|'\n' indent|' ' name|'import' name|'ldap' newline|'\n' dedent|'' name|'except' name|'ImportError' op|':' newline|'\n' comment|'# This module needs to be importable despite ldap not being a requirement' nl|'\n' DECL|variable|ldap indent|' ' name|'ldap' op|'=' name|'None' newline|'\n' nl|'\n' dedent|'' name|'import' name|'time' newline|'\n' nl|'\n' name|'from' name|'oslo_log' name|'import' name|'log' name|'as' name|'logging' newline|'\n' nl|'\n' name|'import' name|'nova' op|'.' name|'conf' newline|'\n' name|'from' name|'nova' name|'import' name|'exception' newline|'\n' name|'from' name|'nova' op|'.' name|'i18n' name|'import' name|'_' op|',' name|'_LW' newline|'\n' name|'from' name|'nova' op|'.' name|'network' name|'import' name|'dns_driver' newline|'\n' name|'from' name|'nova' name|'import' name|'utils' newline|'\n' nl|'\n' DECL|variable|CONF name|'CONF' op|'=' name|'nova' op|'.' name|'conf' op|'.' name|'CONF' newline|'\n' DECL|variable|LOG name|'LOG' op|'=' name|'logging' op|'.' name|'getLogger' op|'(' name|'__name__' op|')' newline|'\n' nl|'\n' nl|'\n' comment|'# Importing ldap.modlist breaks the tests for some reason,' nl|'\n' comment|'# so this is an abbreviated version of a function from' nl|'\n' comment|'# there.' nl|'\n' DECL|function|create_modlist name|'def' name|'create_modlist' op|'(' name|'newattrs' op|')' op|':' newline|'\n' indent|' ' name|'modlist' op|'=' op|'[' op|']' newline|'\n' name|'for' name|'attrtype' name|'in' name|'newattrs' op|'.' name|'keys' op|'(' op|')' op|':' newline|'\n' indent|' ' name|'utf8_vals' op|'=' op|'[' op|']' newline|'\n' name|'for' name|'val' name|'in' name|'newattrs' op|'[' name|'attrtype' op|']' op|':' newline|'\n' indent|' ' name|'utf8_vals' op|'.' name|'append' op|'(' name|'utils' op|'.' name|'utf8' op|'(' name|'val' op|')' op|')' newline|'\n' dedent|'' name|'newattrs' op|'[' name|'attrtype' op|']' op|'=' name|'utf8_vals' newline|'\n' name|'modlist' op|'.' name|'append' op|'(' op|'(' name|'attrtype' op|',' name|'newattrs' op|'[' name|'attrtype' op|']' op|')' op|')' newline|'\n' dedent|'' name|'return' name|'modlist' newline|'\n' nl|'\n' nl|'\n' DECL|class|DNSEntry dedent|'' name|'class' name|'DNSEntry' op|'(' name|'object' op|')' op|':' newline|'\n' nl|'\n' DECL|member|__init__ indent|' ' name|'def' name|'__init__' op|'(' name|'self' op|',' name|'ldap_object' op|')' op|':' newline|'\n' indent|' ' string|'"""ldap_object is an instance of ldap.LDAPObject.\n\n It should already be initialized and bound before\n getting passed in here.\n """' newline|'\n' name|'self' op|'.' name|'lobj' op|'=' name|'ldap_object' newline|'\n' name|'self' op|'.' name|'ldap_tuple' op|'=' name|'None' newline|'\n' name|'self' op|'.' name|'qualified_domain' op|'=' name|'None' newline|'\n' nl|'\n' dedent|'' op|'@' name|'classmethod' newline|'\n' DECL|member|_get_tuple_for_domain name|'def' name|'_get_tuple_for_domain' op|'(' name|'cls' op|',' name|'lobj' op|',' name|'domain' op|')' op|':' newline|'\n' indent|' ' name|'entry' op|'=' name|'lobj' op|'.' name|'search_s' op|'(' name|'CONF' op|'.' name|'ldap_dns_base_dn' op|',' name|'ldap' op|'.' name|'SCOPE_SUBTREE' op|',' nl|'\n' string|"'(associatedDomain=%s)'" op|'%' name|'utils' op|'.' name|'utf8' op|'(' name|'domain' op|')' op|')' newline|'\n' name|'if' name|'not' name|'entry' op|':' newline|'\n' indent|' ' name|'return' name|'None' newline|'\n' dedent|'' name|'if' name|'len' op|'(' name|'entry' op|')' op|'>' number|'1' op|':' newline|'\n' indent|' ' name|'LOG' op|'.' name|'warning' op|'(' name|'_LW' op|'(' string|'"Found multiple matches for domain "' nl|'\n' string|'"%(domain)s.\\n%(entry)s"' op|')' op|',' nl|'\n' name|'domain' op|',' name|'entry' op|')' newline|'\n' dedent|'' name|'return' name|'entry' op|'[' number|'0' op|']' newline|'\n' nl|'\n' dedent|'' op|'@' name|'classmethod' newline|'\n' DECL|member|_get_all_domains name|'def' name|'_get_all_domains' op|'(' name|'cls' op|',' name|'lobj' op|')' op|':' newline|'\n' indent|' ' name|'entries' op|'=' name|'lobj' op|'.' name|'search_s' op|'(' name|'CONF' op|'.' name|'ldap_dns_base_dn' op|',' nl|'\n' name|'ldap' op|'.' name|'SCOPE_SUBTREE' op|',' string|"'(sOARecord=*)'" op|')' newline|'\n' name|'domains' op|'=' op|'[' op|']' newline|'\n' name|'for' name|'entry' name|'in' name|'entries' op|':' newline|'\n' indent|' ' name|'domain' op|'=' name|'entry' op|'[' number|'1' op|']' op|'.' name|'get' op|'(' string|"'associatedDomain'" op|')' newline|'\n' name|'if' name|'domain' op|':' newline|'\n' indent|' ' name|'domains' op|'.' name|'append' op|'(' name|'domain' op|'[' number|'0' op|']' op|')' newline|'\n' dedent|'' dedent|'' name|'return' name|'domains' newline|'\n' nl|'\n' DECL|member|_set_tuple dedent|'' name|'def' name|'_set_tuple' op|'(' name|'self' op|',' name|'tuple' op|')' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'ldap_tuple' op|'=' name|'tuple' newline|'\n' nl|'\n' DECL|member|_qualify dedent|'' name|'def' name|'_qualify' op|'(' name|'self' op|',' name|'name' op|')' op|':' newline|'\n' indent|' ' name|'return' string|"'%s.%s'" op|'%' op|'(' name|'name' op|',' name|'self' op|'.' name|'qualified_domain' op|')' newline|'\n' nl|'\n' DECL|member|_dequalify dedent|'' name|'def' name|'_dequalify' op|'(' name|'self' op|',' name|'name' op|')' op|':' newline|'\n' indent|' ' name|'z' op|'=' string|'".%s"' op|'%' name|'self' op|'.' name|'qualified_domain' newline|'\n' name|'if' name|'name' op|'.' name|'endswith' op|'(' name|'z' op|')' op|':' newline|'\n' indent|' ' name|'dequalified' op|'=' name|'name' op|'[' number|'0' op|':' name|'name' op|'.' name|'rfind' op|'(' name|'z' op|')' op|']' newline|'\n' dedent|'' name|'else' op|':' newline|'\n' indent|' ' name|'LOG' op|'.' name|'warning' op|'(' name|'_LW' op|'(' string|'"Unable to dequalify. %(name)s is not in "' nl|'\n' string|'"%(domain)s.\\n"' op|')' op|',' nl|'\n' op|'{' string|"'name'" op|':' name|'name' op|',' nl|'\n' string|"'domain'" op|':' name|'self' op|'.' name|'qualified_domain' op|'}' op|')' newline|'\n' name|'dequalified' op|'=' name|'None' newline|'\n' nl|'\n' dedent|'' name|'return' name|'dequalified' newline|'\n' nl|'\n' DECL|member|_dn dedent|'' name|'def' name|'_dn' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'return' name|'self' op|'.' name|'ldap_tuple' op|'[' number|'0' op|']' newline|'\n' DECL|variable|dn dedent|'' name|'dn' op|'=' name|'property' op|'(' name|'_dn' op|')' newline|'\n' nl|'\n' DECL|member|_rdn name|'def' name|'_rdn' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'return' name|'self' op|'.' name|'dn' op|'.' name|'partition' op|'(' string|"','" op|')' op|'[' number|'0' op|']' newline|'\n' DECL|variable|rdn dedent|'' name|'rdn' op|'=' name|'property' op|'(' name|'_rdn' op|')' newline|'\n' nl|'\n' nl|'\n' DECL|class|DomainEntry dedent|'' name|'class' name|'DomainEntry' op|'(' name|'DNSEntry' op|')' op|':' newline|'\n' nl|'\n' indent|' ' op|'@' name|'classmethod' newline|'\n' DECL|member|_soa name|'def' name|'_soa' op|'(' name|'cls' op|')' op|':' newline|'\n' indent|' ' name|'date' op|'=' name|'time' op|'.' name|'strftime' op|'(' string|"'%Y%m%d%H%M%S'" op|')' newline|'\n' name|'soa' op|'=' string|"'%s %s %s %s %s %s %s'" op|'%' op|'(' nl|'\n' name|'CONF' op|'.' name|'ldap_dns_servers' op|'[' number|'0' op|']' op|',' nl|'\n' name|'CONF' op|'.' name|'ldap_dns_soa_hostmaster' op|',' nl|'\n' name|'date' op|',' nl|'\n' name|'CONF' op|'.' name|'ldap_dns_soa_refresh' op|',' nl|'\n' name|'CONF' op|'.' name|'ldap_dns_soa_retry' op|',' nl|'\n' name|'CONF' op|'.' name|'ldap_dns_soa_expiry' op|',' nl|'\n' name|'CONF' op|'.' name|'ldap_dns_soa_minimum' op|')' newline|'\n' name|'return' name|'utils' op|'.' name|'utf8' op|'(' name|'soa' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'classmethod' newline|'\n' DECL|member|create_domain name|'def' name|'create_domain' op|'(' name|'cls' op|',' name|'lobj' op|',' name|'domain' op|')' op|':' newline|'\n' indent|' ' string|'"""Create a new domain entry, and return an object that wraps it."""' newline|'\n' name|'entry' op|'=' name|'cls' op|'.' name|'_get_tuple_for_domain' op|'(' name|'lobj' op|',' name|'domain' op|')' newline|'\n' name|'if' name|'entry' op|':' newline|'\n' indent|' ' name|'raise' name|'exception' op|'.' name|'FloatingIpDNSExists' op|'(' name|'name' op|'=' name|'domain' op|',' name|'domain' op|'=' string|"''" op|')' newline|'\n' nl|'\n' dedent|'' name|'newdn' op|'=' string|"'dc=%s,%s'" op|'%' op|'(' name|'domain' op|',' name|'CONF' op|'.' name|'ldap_dns_base_dn' op|')' newline|'\n' name|'attrs' op|'=' op|'{' string|"'objectClass'" op|':' op|'[' string|"'domainrelatedobject'" op|',' string|"'dnsdomain'" op|',' nl|'\n' string|"'domain'" op|',' string|"'dcobject'" op|',' string|"'top'" op|']' op|',' nl|'\n' string|"'sOARecord'" op|':' op|'[' name|'cls' op|'.' name|'_soa' op|'(' op|')' op|']' op|',' nl|'\n' string|"'associatedDomain'" op|':' op|'[' name|'domain' op|']' op|',' nl|'\n' string|"'dc'" op|':' op|'[' name|'domain' op|']' op|'}' newline|'\n' name|'lobj' op|'.' name|'add_s' op|'(' name|'newdn' op|',' name|'create_modlist' op|'(' name|'attrs' op|')' op|')' newline|'\n' name|'return' name|'DomainEntry' op|'(' name|'lobj' op|',' name|'domain' op|')' newline|'\n' nl|'\n' DECL|member|__init__ dedent|'' name|'def' name|'__init__' op|'(' name|'self' op|',' name|'ldap_object' op|',' name|'domain' op|')' op|':' newline|'\n' indent|' ' name|'super' op|'(' name|'DomainEntry' op|',' name|'self' op|')' op|'.' name|'__init__' op|'(' name|'ldap_object' op|')' newline|'\n' name|'entry' op|'=' name|'self' op|'.' name|'_get_tuple_for_domain' op|'(' name|'self' op|'.' name|'lobj' op|',' name|'domain' op|')' newline|'\n' name|'if' name|'not' name|'entry' op|':' newline|'\n' indent|' ' name|'raise' name|'exception' op|'.' name|'NotFound' op|'(' op|')' newline|'\n' dedent|'' name|'self' op|'.' name|'_set_tuple' op|'(' name|'entry' op|')' newline|'\n' name|'assert' op|'(' name|'entry' op|'[' number|'1' op|']' op|'[' string|"'associatedDomain'" op|']' op|'[' number|'0' op|']' op|'==' name|'domain' op|')' newline|'\n' name|'self' op|'.' name|'qualified_domain' op|'=' name|'domain' newline|'\n' nl|'\n' DECL|member|delete dedent|'' name|'def' name|'delete' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' string|'"""Delete the domain that this entry refers to."""' newline|'\n' name|'entries' op|'=' name|'self' op|'.' name|'lobj' op|'.' name|'search_s' op|'(' name|'self' op|'.' name|'dn' op|',' nl|'\n' name|'ldap' op|'.' name|'SCOPE_SUBTREE' op|',' nl|'\n' string|"'(aRecord=*)'" op|')' newline|'\n' name|'for' name|'entry' name|'in' name|'entries' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'lobj' op|'.' name|'delete_s' op|'(' name|'entry' op|'[' number|'0' op|']' op|')' newline|'\n' nl|'\n' dedent|'' name|'self' op|'.' name|'lobj' op|'.' name|'delete_s' op|'(' name|'self' op|'.' name|'dn' op|')' newline|'\n' nl|'\n' DECL|member|update_soa dedent|'' name|'def' name|'update_soa' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'mlist' op|'=' op|'[' op|'(' name|'ldap' op|'.' name|'MOD_REPLACE' op|',' string|"'sOARecord'" op|',' name|'self' op|'.' name|'_soa' op|'(' op|')' op|')' op|']' newline|'\n' name|'self' op|'.' name|'lobj' op|'.' name|'modify_s' op|'(' name|'self' op|'.' name|'dn' op|',' name|'mlist' op|')' newline|'\n' nl|'\n' DECL|member|subentry_with_name dedent|'' name|'def' name|'subentry_with_name' op|'(' name|'self' op|',' name|'name' op|')' op|':' newline|'\n' indent|' ' name|'entry' op|'=' name|'self' op|'.' name|'lobj' op|'.' name|'search_s' op|'(' name|'self' op|'.' name|'dn' op|',' name|'ldap' op|'.' name|'SCOPE_SUBTREE' op|',' nl|'\n' string|"'(associatedDomain=%s.%s)'" op|'%' nl|'\n' op|'(' name|'utils' op|'.' name|'utf8' op|'(' name|'name' op|')' op|',' nl|'\n' name|'utils' op|'.' name|'utf8' op|'(' name|'self' op|'.' name|'qualified_domain' op|')' op|')' op|')' newline|'\n' name|'if' name|'entry' op|':' newline|'\n' indent|' ' name|'return' name|'HostEntry' op|'(' name|'self' op|',' name|'entry' op|'[' number|'0' op|']' op|')' newline|'\n' dedent|'' name|'else' op|':' newline|'\n' indent|' ' name|'return' name|'None' newline|'\n' nl|'\n' DECL|member|subentries_with_ip dedent|'' dedent|'' name|'def' name|'subentries_with_ip' op|'(' name|'self' op|',' name|'ip' op|')' op|':' newline|'\n' indent|' ' name|'entries' op|'=' name|'self' op|'.' name|'lobj' op|'.' name|'search_s' op|'(' name|'self' op|'.' name|'dn' op|',' name|'ldap' op|'.' name|'SCOPE_SUBTREE' op|',' nl|'\n' string|"'(aRecord=%s)'" op|'%' name|'utils' op|'.' name|'utf8' op|'(' name|'ip' op|')' op|')' newline|'\n' name|'objs' op|'=' op|'[' op|']' newline|'\n' name|'for' name|'entry' name|'in' name|'entries' op|':' newline|'\n' indent|' ' name|'if' string|"'associatedDomain'" name|'in' name|'entry' op|'[' number|'1' op|']' op|':' newline|'\n' indent|' ' name|'objs' op|'.' name|'append' op|'(' name|'HostEntry' op|'(' name|'self' op|',' name|'entry' op|')' op|')' newline|'\n' nl|'\n' dedent|'' dedent|'' name|'return' name|'objs' newline|'\n' nl|'\n' DECL|member|add_entry dedent|'' name|'def' name|'add_entry' op|'(' name|'self' op|',' name|'name' op|',' name|'address' op|')' op|':' newline|'\n' indent|' ' name|'if' name|'self' op|'.' name|'subentry_with_name' op|'(' name|'name' op|')' op|':' newline|'\n' indent|' ' name|'raise' name|'exception' op|'.' name|'FloatingIpDNSExists' op|'(' name|'name' op|'=' name|'name' op|',' nl|'\n' name|'domain' op|'=' name|'self' op|'.' name|'qualified_domain' op|')' newline|'\n' nl|'\n' dedent|'' name|'entries' op|'=' name|'self' op|'.' name|'subentries_with_ip' op|'(' name|'address' op|')' newline|'\n' name|'if' name|'entries' op|':' newline|'\n' comment|'# We already have an ldap entry for this IP, so we just' nl|'\n' comment|'# need to add the new name.' nl|'\n' indent|' ' name|'existingdn' op|'=' name|'entries' op|'[' number|'0' op|']' op|'.' name|'dn' newline|'\n' name|'self' op|'.' name|'lobj' op|'.' name|'modify_s' op|'(' name|'existingdn' op|',' op|'[' op|'(' name|'ldap' op|'.' name|'MOD_ADD' op|',' nl|'\n' string|"'associatedDomain'" op|',' nl|'\n' name|'utils' op|'.' name|'utf8' op|'(' name|'self' op|'.' name|'_qualify' op|'(' name|'name' op|')' op|')' op|')' op|']' op|')' newline|'\n' nl|'\n' name|'return' name|'self' op|'.' name|'subentry_with_name' op|'(' name|'name' op|')' newline|'\n' dedent|'' name|'else' op|':' newline|'\n' comment|'# We need to create an entirely new entry.' nl|'\n' indent|' ' name|'newdn' op|'=' string|"'dc=%s,%s'" op|'%' op|'(' name|'name' op|',' name|'self' op|'.' name|'dn' op|')' newline|'\n' name|'attrs' op|'=' op|'{' string|"'objectClass'" op|':' op|'[' string|"'domainrelatedobject'" op|',' string|"'dnsdomain'" op|',' nl|'\n' string|"'domain'" op|',' string|"'dcobject'" op|',' string|"'top'" op|']' op|',' nl|'\n' string|"'aRecord'" op|':' op|'[' name|'address' op|']' op|',' nl|'\n' string|"'associatedDomain'" op|':' op|'[' name|'self' op|'.' name|'_qualify' op|'(' name|'name' op|')' op|']' op|',' nl|'\n' string|"'dc'" op|':' op|'[' name|'name' op|']' op|'}' newline|'\n' name|'self' op|'.' name|'lobj' op|'.' name|'add_s' op|'(' name|'newdn' op|',' name|'create_modlist' op|'(' name|'attrs' op|')' op|')' newline|'\n' name|'return' name|'self' op|'.' name|'subentry_with_name' op|'(' name|'name' op|')' newline|'\n' nl|'\n' DECL|member|remove_entry dedent|'' dedent|'' name|'def' name|'remove_entry' op|'(' name|'self' op|',' name|'name' op|')' op|':' newline|'\n' indent|' ' name|'entry' op|'=' name|'self' op|'.' name|'subentry_with_name' op|'(' name|'name' op|')' newline|'\n' name|'if' name|'not' name|'entry' op|':' newline|'\n' indent|' ' name|'raise' name|'exception' op|'.' name|'NotFound' op|'(' op|')' newline|'\n' dedent|'' name|'entry' op|'.' name|'remove_name' op|'(' name|'name' op|')' newline|'\n' name|'self' op|'.' name|'update_soa' op|'(' op|')' newline|'\n' nl|'\n' nl|'\n' DECL|class|HostEntry dedent|'' dedent|'' name|'class' name|'HostEntry' op|'(' name|'DNSEntry' op|')' op|':' newline|'\n' nl|'\n' DECL|member|__init__ indent|' ' name|'def' name|'__init__' op|'(' name|'self' op|',' name|'parent' op|',' name|'tuple' op|')' op|':' newline|'\n' indent|' ' name|'super' op|'(' name|'HostEntry' op|',' name|'self' op|')' op|'.' name|'__init__' op|'(' name|'parent' op|'.' name|'lobj' op|')' newline|'\n' name|'self' op|'.' name|'parent_entry' op|'=' name|'parent' newline|'\n' name|'self' op|'.' name|'_set_tuple' op|'(' name|'tuple' op|')' newline|'\n' name|'self' op|'.' name|'qualified_domain' op|'=' name|'parent' op|'.' name|'qualified_domain' newline|'\n' nl|'\n' DECL|member|remove_name dedent|'' name|'def' name|'remove_name' op|'(' name|'self' op|',' name|'name' op|')' op|':' newline|'\n' indent|' ' name|'names' op|'=' name|'self' op|'.' name|'ldap_tuple' op|'[' number|'1' op|']' op|'[' string|"'associatedDomain'" op|']' newline|'\n' name|'if' name|'not' name|'names' op|':' newline|'\n' indent|' ' name|'raise' name|'exception' op|'.' name|'NotFound' op|'(' op|')' newline|'\n' dedent|'' name|'if' name|'len' op|'(' name|'names' op|')' op|'>' number|'1' op|':' newline|'\n' comment|'# We just have to remove the requested domain.' nl|'\n' indent|' ' name|'self' op|'.' name|'lobj' op|'.' name|'modify_s' op|'(' name|'self' op|'.' name|'dn' op|',' op|'[' op|'(' name|'ldap' op|'.' name|'MOD_DELETE' op|',' string|"'associatedDomain'" op|',' nl|'\n' name|'self' op|'.' name|'_qualify' op|'(' name|'utils' op|'.' name|'utf8' op|'(' name|'name' op|')' op|')' op|')' op|']' op|')' newline|'\n' name|'if' op|'(' name|'self' op|'.' name|'rdn' op|'[' number|'1' op|']' op|'==' name|'name' op|')' op|':' newline|'\n' comment|'# We just removed the rdn, so we need to move this entry.' nl|'\n' indent|' ' name|'names' op|'.' name|'remove' op|'(' name|'self' op|'.' name|'_qualify' op|'(' name|'name' op|')' op|')' newline|'\n' name|'newrdn' op|'=' string|"'dc=%s'" op|'%' name|'self' op|'.' name|'_dequalify' op|'(' name|'names' op|'[' number|'0' op|']' op|')' newline|'\n' name|'self' op|'.' name|'lobj' op|'.' name|'modrdn_s' op|'(' name|'self' op|'.' name|'dn' op|',' op|'[' name|'newrdn' op|']' op|')' newline|'\n' dedent|'' dedent|'' name|'else' op|':' newline|'\n' comment|'# We should delete the entire record.' nl|'\n' indent|' ' name|'self' op|'.' name|'lobj' op|'.' name|'delete_s' op|'(' name|'self' op|'.' name|'dn' op|')' newline|'\n' nl|'\n' DECL|member|modify_address dedent|'' dedent|'' name|'def' name|'modify_address' op|'(' name|'self' op|',' name|'name' op|',' name|'address' op|')' op|':' newline|'\n' indent|' ' name|'names' op|'=' name|'self' op|'.' name|'ldap_tuple' op|'[' number|'1' op|']' op|'[' string|"'associatedDomain'" op|']' newline|'\n' name|'if' name|'not' name|'names' op|':' newline|'\n' indent|' ' name|'raise' name|'exception' op|'.' name|'NotFound' op|'(' op|')' newline|'\n' dedent|'' name|'if' name|'len' op|'(' name|'names' op|')' op|'==' number|'1' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'lobj' op|'.' name|'modify_s' op|'(' name|'self' op|'.' name|'dn' op|',' op|'[' op|'(' name|'ldap' op|'.' name|'MOD_REPLACE' op|',' string|"'aRecord'" op|',' nl|'\n' op|'[' name|'utils' op|'.' name|'utf8' op|'(' name|'address' op|')' op|']' op|')' op|']' op|')' newline|'\n' dedent|'' name|'else' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'remove_name' op|'(' name|'name' op|')' newline|'\n' name|'self' op|'.' name|'parent' op|'.' name|'add_entry' op|'(' name|'name' op|',' name|'address' op|')' newline|'\n' nl|'\n' DECL|member|_names dedent|'' dedent|'' name|'def' name|'_names' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'names' op|'=' op|'[' op|']' newline|'\n' name|'for' name|'domain' name|'in' name|'self' op|'.' name|'ldap_tuple' op|'[' number|'1' op|']' op|'[' string|"'associatedDomain'" op|']' op|':' newline|'\n' indent|' ' name|'names' op|'.' name|'append' op|'(' name|'self' op|'.' name|'_dequalify' op|'(' name|'domain' op|')' op|')' newline|'\n' dedent|'' name|'return' name|'names' newline|'\n' DECL|variable|names dedent|'' name|'names' op|'=' name|'property' op|'(' name|'_names' op|')' newline|'\n' nl|'\n' DECL|member|_ip name|'def' name|'_ip' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'ip' op|'=' name|'self' op|'.' name|'ldap_tuple' op|'[' number|'1' op|']' op|'[' string|"'aRecord'" op|']' op|'[' number|'0' op|']' newline|'\n' name|'return' name|'ip' newline|'\n' DECL|variable|ip dedent|'' name|'ip' op|'=' name|'property' op|'(' name|'_ip' op|')' newline|'\n' nl|'\n' DECL|member|_parent name|'def' name|'_parent' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'return' name|'self' op|'.' name|'parent_entry' newline|'\n' DECL|variable|parent dedent|'' name|'parent' op|'=' name|'property' op|'(' name|'_parent' op|')' newline|'\n' nl|'\n' nl|'\n' DECL|class|LdapDNS dedent|'' name|'class' name|'LdapDNS' op|'(' name|'dns_driver' op|'.' name|'DNSDriver' op|')' op|':' newline|'\n' indent|' ' string|'"""Driver for PowerDNS using ldap as a back end.\n\n This driver assumes ldap-method=strict, with all domains\n in the top-level, aRecords only.\n """' newline|'\n' nl|'\n' DECL|member|__init__ name|'def' name|'__init__' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'if' name|'not' name|'ldap' op|':' newline|'\n' indent|' ' name|'raise' name|'ImportError' op|'(' name|'_' op|'(' string|"'ldap not installed'" op|')' op|')' newline|'\n' nl|'\n' dedent|'' name|'self' op|'.' name|'lobj' op|'=' name|'ldap' op|'.' name|'initialize' op|'(' name|'CONF' op|'.' name|'ldap_dns_url' op|')' newline|'\n' name|'self' op|'.' name|'lobj' op|'.' name|'simple_bind_s' op|'(' name|'CONF' op|'.' name|'ldap_dns_user' op|',' nl|'\n' name|'CONF' op|'.' name|'ldap_dns_password' op|')' newline|'\n' nl|'\n' DECL|member|get_domains dedent|'' name|'def' name|'get_domains' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'return' name|'DomainEntry' op|'.' name|'_get_all_domains' op|'(' name|'self' op|'.' name|'lobj' op|')' newline|'\n' nl|'\n' DECL|member|create_entry dedent|'' name|'def' name|'create_entry' op|'(' name|'self' op|',' name|'name' op|',' name|'address' op|',' name|'type' op|',' name|'domain' op|')' op|':' newline|'\n' indent|' ' name|'if' name|'type' op|'.' name|'lower' op|'(' op|')' op|'!=' string|"'a'" op|':' newline|'\n' indent|' ' name|'raise' name|'exception' op|'.' name|'InvalidInput' op|'(' name|'_' op|'(' string|'"This driver only supports "' nl|'\n' string|'"type \'a\' entries."' op|')' op|')' newline|'\n' nl|'\n' dedent|'' name|'dEntry' op|'=' name|'DomainEntry' op|'(' name|'self' op|'.' name|'lobj' op|',' name|'domain' op|')' newline|'\n' name|'dEntry' op|'.' name|'add_entry' op|'(' name|'name' op|',' name|'address' op|')' newline|'\n' nl|'\n' DECL|member|delete_entry dedent|'' name|'def' name|'delete_entry' op|'(' name|'self' op|',' name|'name' op|',' name|'domain' op|')' op|':' newline|'\n' indent|' ' name|'dEntry' op|'=' name|'DomainEntry' op|'(' name|'self' op|'.' name|'lobj' op|',' name|'domain' op|')' newline|'\n' name|'dEntry' op|'.' name|'remove_entry' op|'(' name|'name' op|')' newline|'\n' nl|'\n' DECL|member|get_entries_by_address dedent|'' name|'def' name|'get_entries_by_address' op|'(' name|'self' op|',' name|'address' op|',' name|'domain' op|')' op|':' newline|'\n' indent|' ' name|'try' op|':' newline|'\n' indent|' ' name|'dEntry' op|'=' name|'DomainEntry' op|'(' name|'self' op|'.' name|'lobj' op|',' name|'domain' op|')' newline|'\n' dedent|'' name|'except' name|'exception' op|'.' name|'NotFound' op|':' newline|'\n' indent|' ' name|'return' op|'[' op|']' newline|'\n' dedent|'' name|'entries' op|'=' name|'dEntry' op|'.' name|'subentries_with_ip' op|'(' name|'address' op|')' newline|'\n' name|'names' op|'=' op|'[' op|']' newline|'\n' name|'for' name|'entry' name|'in' name|'entries' op|':' newline|'\n' indent|' ' name|'names' op|'.' name|'extend' op|'(' name|'entry' op|'.' name|'names' op|')' newline|'\n' dedent|'' name|'return' name|'names' newline|'\n' nl|'\n' DECL|member|get_entries_by_name dedent|'' name|'def' name|'get_entries_by_name' op|'(' name|'self' op|',' name|'name' op|',' name|'domain' op|')' op|':' newline|'\n' indent|' ' name|'try' op|':' newline|'\n' indent|' ' name|'dEntry' op|'=' name|'DomainEntry' op|'(' name|'self' op|'.' name|'lobj' op|',' name|'domain' op|')' newline|'\n' dedent|'' name|'except' name|'exception' op|'.' name|'NotFound' op|':' newline|'\n' indent|' ' name|'return' op|'[' op|']' newline|'\n' dedent|'' name|'nEntry' op|'=' name|'dEntry' op|'.' name|'subentry_with_name' op|'(' name|'name' op|')' newline|'\n' name|'if' name|'nEntry' op|':' newline|'\n' indent|' ' name|'return' op|'[' name|'nEntry' op|'.' name|'ip' op|']' newline|'\n' nl|'\n' DECL|member|modify_address dedent|'' dedent|'' name|'def' name|'modify_address' op|'(' name|'self' op|',' name|'name' op|',' name|'address' op|',' name|'domain' op|')' op|':' newline|'\n' indent|' ' name|'dEntry' op|'=' name|'DomainEntry' op|'(' name|'self' op|'.' name|'lobj' op|',' name|'domain' op|')' newline|'\n' name|'nEntry' op|'=' name|'dEntry' op|'.' name|'subentry_with_name' op|'(' name|'name' op|')' newline|'\n' name|'nEntry' op|'.' name|'modify_address' op|'(' name|'name' op|',' name|'address' op|')' newline|'\n' nl|'\n' DECL|member|create_domain dedent|'' name|'def' name|'create_domain' op|'(' name|'self' op|',' name|'domain' op|')' op|':' newline|'\n' indent|' ' name|'DomainEntry' op|'.' name|'create_domain' op|'(' name|'self' op|'.' name|'lobj' op|',' name|'domain' op|')' newline|'\n' nl|'\n' DECL|member|delete_domain dedent|'' name|'def' name|'delete_domain' op|'(' name|'self' op|',' name|'domain' op|')' op|':' newline|'\n' indent|' ' name|'dEntry' op|'=' name|'DomainEntry' op|'(' name|'self' op|'.' name|'lobj' op|',' name|'domain' op|')' newline|'\n' name|'dEntry' op|'.' name|'delete' op|'(' op|')' newline|'\n' nl|'\n' DECL|member|delete_dns_file dedent|'' name|'def' name|'delete_dns_file' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'LOG' op|'.' name|'warning' op|'(' name|'_LW' op|'(' string|'"This shouldn\'t be getting called except during "' nl|'\n' string|'"testing."' op|')' op|')' newline|'\n' name|'pass' newline|'\n' dedent|'' dedent|'' endmarker|'' end_unit
[ 27471, 62, 20850, 198, 23893, 91, 6, 2, 15069, 2321, 6858, 21555, 1252, 329, 262, 44877, 5693, 6, 198, 21283, 91, 6, 59, 77, 6, 198, 23893, 91, 6, 2, 6, 198, 21283, 91, 6, 59, 77, 6, 198, 23893, 91, 6, 2, 220, 220, 220, 4996...
1.799445
15,502
# -*- coding: utf-8 -*- """ 1946. Largest Number After Mutating Substring https://leetcode.com/problems/largest-number-after-mutating-substring/ Example 1: Input: num = "132", change = [9,8,5,0,3,6,4,2,6,8] Output: "832" Explanation: Replace the substring "1": - 1 maps to change[1] = 8. Thus, "132" becomes "832". "832" is the largest number that can be created, so return it. Example 2: Input: num = "021", change = [9,4,3,5,7,2,1,9,0,6] Output: "934" Explanation: Replace the substring "021": - 0 maps to change[0] = 9. - 2 maps to change[2] = 3. - 1 maps to change[1] = 4. Thus, "021" becomes "934". "934" is the largest number that can be created, so return it. Example 3: Input: num = "5", change = [1,4,7,5,3,2,5,6,9,4] Output: "5" Explanation: "5" is already the largest number that can be created, so return it. """ from typing import List
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 1129, 3510, 13, 406, 853, 395, 7913, 2293, 13859, 803, 3834, 8841, 198, 5450, 1378, 293, 316, 8189, 13, 785, 14, 1676, 22143, 14, 28209, 12, 17618, 12...
2.486957
345
import pytest from bot.bot import Bot
[ 11748, 12972, 9288, 198, 6738, 10214, 13, 13645, 1330, 18579, 628 ]
3.545455
11
""" Hackerrank Day 9: Recursion 3 https://www.hackerrank.com/challenges/30-recursion/problem?h_r=email&unlock_token=bc6d5f3963afb26ed0b2f69c3f4f3ddb1826e1b2&utm_campaign=30_days_of_code_continuous&utm_medium=email&utm_source=daily_reminder Objective Today, we are learning about an algorithmic concept called recursion. Check out the Tutorial tab for learning materials and an instructional video. Recursive Method for Calculating Factorial Function Description Complete the factorial function in the editor below. Be sure to use recursion. factorial has the following paramter: int n: an integer Returns int: the factorial of Note: If you fail to use recursion or fail to name your recursive function factorial or Factorial, you will get a score of . Input Format A single integer, (the argument to pass to factorial). Constraints Your submission must contain a recursive function named factorial. Sample Input 3 Sample Output 6 Explanation Consider the following steps. After the recursive calls from step 1 to 3, results are accumulated from step 3 to 1. """ import math import os import random import re import sys # Complete the factorial function below. if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) result = factorial(n) fptr.write(str(result) + '\n') fptr.close()
[ 37811, 198, 32833, 8056, 962, 198, 12393, 860, 25, 3311, 24197, 513, 198, 5450, 1378, 2503, 13, 31153, 8056, 962, 13, 785, 14, 36747, 34120, 14, 1270, 12, 8344, 24197, 14, 45573, 30, 71, 62, 81, 28, 12888, 5, 403, 5354, 62, 30001, ...
3.238663
419
from django.db import models
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198 ]
3.625
8
import logging from qbrobot import qsettings try : from util import send_dingding except ImportError: DINGDING_CANUSE = False else: DINGDING_CANUSE = True """ class DingDingLogger pass all args to logger.method, and call dingding.send_msg() 1. debug message don't send to dingding. 2. only send_msg( message ), can't pass multi args. """ """ handler = logging.handlers.RotatingFileHandler(str(logFile) + '.LOG', maxBytes = 1024 * 1024 * 500, backupCount = 5) fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s' formatter = logging.Formatter(fmt) handler.setFormatter(formatter) logger = logging.getLogger(str(logFile)) logger.addHandler(handler) logger.setLevel(logging.INFO) """
[ 11748, 18931, 198, 198, 6738, 220, 10662, 7957, 13645, 1330, 10662, 33692, 628, 198, 28311, 1058, 198, 220, 220, 220, 422, 7736, 1330, 3758, 62, 12083, 12083, 220, 198, 16341, 17267, 12331, 25, 198, 220, 220, 220, 360, 2751, 35, 2751, ...
2.664336
286
# %% import os import pandas as pd import numpy as np import datetime # %% CARGA DE DATOS path = r'F:\Trabajo\Promotive\Chile\PRT\7\CSV\3' os.chdir(path) files = os.listdir(path) files # %% files_xls = [f for f in files if f[-3:] == 'csv'] files_xls # %% columnas = ['PPU', 'MARCA', 'MODELO', 'ANO_FABRICACION', 'NUM_MOTOR', 'NUM_CHASIS', 'VIN'] chile = pd.DataFrame(columns=columnas) # %% for f in files_xls: data = pd.read_csv(f, sep=";", encoding="latin-1") chile = pd.concat([chile , data], ignore_index=True, join='outer') # %% chile = chile[columnas] # %% chile.drop_duplicates(subset="PPU", inplace=True) # %% chile.to_csv(r'F:\Trabajo\Promotive\Chile\PRT\Limpio\OfflineRB3.csv') # %% chile # %%
[ 2, 43313, 198, 11748, 28686, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 4818, 8079, 628, 198, 2, 43313, 17368, 9273, 5550, 360, 1404, 2640, 198, 6978, 796, 374, 6, 37, 7479, 2898, 397, 34944,...
2.181269
331
import os.path as osp import numpy as np from PIL import Image import torch.utils.data as data import torch __all__ = ['LFW_CROP'] EXTENSION_FACTOR = 2
[ 11748, 28686, 13, 6978, 355, 267, 2777, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 350, 4146, 1330, 7412, 198, 198, 11748, 28034, 13, 26791, 13, 7890, 355, 1366, 198, 11748, 28034, 198, 198, 834, 439, 834, 796, 37250, 43, 24160, 6...
2.736842
57
from pyimei import ImeiSupport #testing classes ImeiSupport.test() valid_imeis = [ 356938035643809, 490154203237518, "356938035643809" ] invalid_imeis = [ 358065019104263, "357805023984941", 356938035643801 ] checkImeisArray(valid_imeis) checkImeisArray(invalid_imeis) print("Generating independent FAKE imeis...") RANDOM_IMEIS_QTY = 5 for i in range(RANDOM_IMEIS_QTY): print("\tfake IMEI[{}] = {}".format(i+1, ImeiSupport.generateNew())) print("Generating sequental FAKE imeis:") DEP_RANDOM_IMEIS_QTY = 5 startImei = ImeiSupport.generateNew() currentImei = startImei print("start IMEI: {}".format(startImei)) for i in range(RANDOM_IMEIS_QTY): currentImei = ImeiSupport.next(currentImei) print("\tfake IMEI[{}] = {}".format(i+1, currentImei)) print("DONE")
[ 6738, 12972, 45519, 1330, 314, 1326, 72, 15514, 201, 198, 201, 198, 2, 33407, 6097, 201, 198, 40, 1326, 72, 15514, 13, 9288, 3419, 201, 198, 201, 198, 12102, 62, 524, 271, 796, 685, 201, 198, 220, 220, 220, 3439, 3388, 23734, 2327, ...
2.076355
406
""" .. code-block:: python from aioauth import responses Response objects used throughout the project. ---- """ from dataclasses import dataclass, field from http import HTTPStatus from typing import Dict from .collections import HTTPHeaderDict from .constances import default_headers from .types import ErrorType, TokenType
[ 37811, 198, 492, 2438, 12, 9967, 3712, 21015, 628, 220, 220, 220, 422, 257, 952, 18439, 1330, 9109, 198, 198, 31077, 5563, 973, 3690, 262, 1628, 13, 198, 198, 650, 198, 37811, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 11, ...
3.788889
90
#!/usr/bin/env python3 import logging import platform import time from functools import partial from statistics import stdev from typing import List, Tuple, Dict, Union, Any import psutil from joblib import Parallel, delayed from fimdp.objectives import BUCHI from fipomdp import ConsPOMDP from fipomdp.energy_solvers import ConsPOMDPBasicES from fipomdp.experiments.NYC_environment import NYCPOMDPEnvironment from fipomdp.experiments.UUV_experiment import simulate_observation from fipomdp.pomcp import OnlineStrategy from fipomdp.rollout_functions import basic, grid_manhattan_distance, product, consumption_based if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 18931, 198, 11748, 3859, 198, 11748, 640, 198, 6738, 1257, 310, 10141, 1330, 13027, 198, 6738, 7869, 1330, 336, 7959, 198, 6738, 19720, 1330, 7343, 11, 309, 29291, 11, 360, ...
3.245098
204
import network import time # deactivate AP ap = network.WLAN(network.AP_IF) ap.active(False) # activate static network wlan = network.WLAN(network.STA_IF) wlan.active(True) # connect to local WIFI wlan.connect('TFM-Attendees') # wait until connected while not wlan.isconnected(): print('connecting...') time.sleep(1) print('Connected!') print('Current network config:', wlan.ifconfig())
[ 11748, 3127, 198, 11748, 640, 198, 198, 2, 390, 39022, 3486, 198, 499, 796, 3127, 13, 54, 25697, 7, 27349, 13, 2969, 62, 5064, 8, 198, 499, 13, 5275, 7, 25101, 8, 198, 198, 2, 15155, 9037, 3127, 198, 86, 9620, 796, 3127, 13, 54,...
2.829787
141
import asyncio import logging import os import time from addict import Addict from aiogram.types import Message from hikcamerabot.config.config import get_result_queue from hikcamerabot.constants import Event, VideoGifType from hikcamerabot.utils.utils import format_ts, gen_random_str
[ 11748, 30351, 952, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 640, 198, 198, 6738, 19678, 1330, 3060, 713, 198, 6738, 257, 72, 21857, 13, 19199, 1330, 16000, 198, 198, 6738, 289, 1134, 66, 2382, 397, 313, 13, 11250, 13, 11250, ...
3.284091
88
""" Provides functionalilty for working with celled hypercubes. Hypercubes are extensions of lines, squares and cubes into higher dimensions. Celled hypercubes can be thought as a grid or lattice structure. From this point, hypercubes is used to mean celled hypercubes. A hypercube can be described by its dimension and the number of cells in any dimension. We denote this as h(d, n). For example: h(2, 3) is a 3x3 grid; h(3, 4) is a 4x4x4 lattice. A hypercube of dimension d may also be referred to as a d-cube. A cell's position can be specified in coordinate style. For example, given h(3, 4) and an agreed ordering of dimension then some valid coordinates are (1,1,1), (2,1,3) and (4,4,4). The term m-agonal is a short for "m-dimensional diagonal" and can be thought of as a line of contiguous cells that span m dimensions. For example, in a 3-cube you would find many 1-agonals, 2-agonals and 3-agonals. A 1-agonal is customarily known as a row, column or pillar. In another example, if a line of contiguous cells in a 5-cell have the property that 3 coordinates change, while the others remain constant, these cells constitute a 3-agonal. For a given h(d, n), 1 <= m <= n, a m-agonal always has n cells. The term line is used to refer to any m-agonal in general. A cell apppears in multiple lines, which are refered to as the scope of the cell, or the scoped lines of the cell. The combination of lines and scopes is referred to as the structure of the hypercube. For a given cell, we define its connected cells as those cells that appear in the scoped lines of the given cell. We define a slice as a sub-cube of a hypercube. For example, consder h(2,3), a 3x3 hypercube. Let the dimensions be denoted as d1 and d2, respectively, where 1 <= d1, d2 <= 3. If we consider d1 as rows, and d2 as columns, then the slice that is the first column is defined by d1 = 1, 2, 3, and d2 = 1. This has the form h(1, 3). The slice that is the top left 2x2 corner is defined by d1, d2 = 1, 2. This has the form h(2, 2). This module essentially has 2 classes of functions: 1. Those that use a numpy ndarray to implement the underlying hypercube. These functions have the suffix _np. An array of d dimensions may be referred to as a d-array 2. Those that do not implement the underlying hypercube but provide information as coordinates that can be used with a user-implementation of the hypercube. These functions have the suffix _coord. ######################################################################## Type annotations are used in this module. In addition to the standard types defined in the typing module, several aliases are also defined which can be viewed in the source code. """ # numpy (and scipy) don't yet have type annotations import numpy as np # type: ignore from scipy.special import comb # type: ignore import itertools as it import numbers import re from typing import List, Callable, Union, Collection, Tuple, Any, Type, Deque from typing import DefaultDict, TypeVar, Counter, Dict, Iterable, Generator, Sequence Cell_coord = Tuple[int, ...] Cube_np = TypeVar('Cube_np', np.ndarray, np.ndarray) # Cube_np should really be a numpy array representing h(d, n) Line_np = TypeVar('Line_np', np.ndarray, np.ndarray) # Line_np should really be a 1d numpy array with n elements Line_coord = List[Cell_coord] Lines_np = List[Line_np] Lines_enum_np = Dict[int, Line_np] Lines_coord = List[Line_coord] Lines_enum_coord = Dict[int, Line_coord] Scopes_np = DefaultDict[Cell_coord, Lines_np] Scopes_coord = DefaultDict[Cell_coord, Lines_coord] Scopes_enum = DefaultDict[Cell_coord, List[int]] Scopes = Union[Scopes_np, Scopes_coord, Scopes_enum] Structure_np = Tuple[Cube_np, Lines_np, Scopes_np] Structure_enum_np = Tuple[Cube_np, Lines_enum_np, Scopes_enum] Structure_coord = Tuple[Lines_coord, Scopes_coord] Structure_enum_coord = Tuple[Lines_enum_coord, Scopes_enum] Connected_cells = DefaultDict[Cell_coord, List[Cell_coord]] def num_lines(d: int, n: int) -> int: """ num_lines(d: int, n: int) -> int: Calculate the number of lines in a hypercube. Parameters ---------- d The number of dimensions of the hypercube n The number of cells in any dimension Returns ------- The number of lines in a hypercube. See Also -------- num_lines_grouped Notes ----- There are two ways to calculate the number of lines: 1. Call the function num_lines_grouped and sum the number of lines spanning each dimension. 2. Directly, using the formula: ((n+2)**d-n**d)/2 Sketch of proof: Embed the n**d hypercube in an (n+2)**d hypercube which extends one cell further in each dimension. Then each winning line in the n**d hypercube terminates in exactly two "border" cells of the enlarged hypercube, and these two borders are unique to that line. Moreover, every border cell is at the end of a line, so that (n+2)**d border cells are in two-to-one correspondence with the winning lines. (See Hypercube -Tic-Tac-Toe: Solomon W.Golomb and Alfred W. Hales) Examples -------- >>> num_lines(2, 3) 8 >>> num_lines(3, 4) 76 """ # return sum(list(num_lines_grouped(d, n))) return int(((n+2)**d-n**d)/2) def get_scopes_np(lines: Lines_np, d: int) -> Scopes_np: """ get_scopes_np(lines: Lines_np, d: int) -> Scopes_np: Calculate the scope of each cell in a hypercube Parameters ---------- lines The returned value from get_lines_np(hc) where hc is of the form np.arange(n ** d, dtype = intx__).reshape([n] * d). That is, hc is populated with the values 0,1,2,...,n^d - 1. dim The dimension of the hypercube that was used to generate `lines`. Returns ------- A dictionary with keys equal to the coordinates of each cell in the hypercube. For each cell key, the value is the cell's scope - a list of numpy.ndarray views that are lines containing the cell. See Also -------- get_lines_np Notes ----- The implementation of this function uses np.unravel_index, and relies uopn the lines parameter being generated from an array populated with values 0,1,2,... Examples -------- >>> import numpy as np >>> from pprint import pprint >>> hc = np.arange(4).reshape(2, 2) >>> hc array([[0, 1], [2, 3]]) >>> lines = list(get_lines_np(hc)) >>> lines #doctest: +NORMALIZE_WHITESPACE [array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3]), array([0, 3]), array([2, 1])] >>> scopes = get_scopes_np(lines, 2) >>> pprint(scopes) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [array([0, 2]), array([0, 1]), array([0, 3])], (0, 1): [array([1, 3]), array([0, 1]), array([2, 1])], (1, 0): [array([0, 2]), array([2, 3]), array([2, 1])], (1, 1): [array([1, 3]), array([2, 3]), array([0, 3])]}) >>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [array([0, 2]), array([0, 1]), array([0, 3])]), ((0, 1), [array([1, 3]), array([0, 1]), array([2, 1])]), ((1, 0), [array([0, 2]), array([2, 3]), array([2, 1])]), ((1, 1), [array([1, 3]), array([2, 3]), array([0, 3])])] >>> hc[0, 0] = 99 >>> pprint(scopes) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [array([99, 2]), array([99, 1]), array([99, 3])], (0, 1): [array([1, 3]), array([99, 1]), array([2, 1])], (1, 0): [array([99, 2]), array([2, 3]), array([2, 1])], (1, 1): [array([1, 3]), array([2, 3]), array([99, 3])]}) >>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [array([99, 2]), array([99, 1]), array([99, 3])]), ((0, 1), [array([1, 3]), array([99, 1]), array([2, 1])]), ((1, 0), [array([99, 2]), array([2, 3]), array([2, 1])]), ((1, 1), [array([1, 3]), array([2, 3]), array([99, 3])])] """ n = lines[0].size shape = [n] * d scopes: Scopes_np = DefaultDict(list) for line in lines: for j in range(n): cell = np.unravel_index(line[j], shape) scopes[cell].append(line) return scopes def structure_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) -> Structure_np: """ structure_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) -> Structure_np: Return a hypercube, its lines, and the scopes of its cells. Parameters ---------- d The number of dimensions of the hypercube n The number of cells in any dimension zeros If true, all values in array are 0, else they are 0,1,2,... OFFSET The number of cells is n^d. If this greater than (2^31 - OFFSET - 1) then we use np.int64 (instead of np.int32) as the dtype of the numpy array. Returns ------- The hypercube (as a numpy array), its lines, and the scopes of its cells. See Also -------- get_lines_np get_scopes_np Examples -------- >>> import numpy as np >>> from pprint import pprint >>> struct = structure_np(2, 2) >>> struct[0] array([[0, 0], [0, 0]]) >>> struct[1] #doctest: +NORMALIZE_WHITESPACE [array([0, 0]), array([0, 0]), array([0, 0]), array([0, 0]), array([0, 0]), array([0, 0])] >>> pprint(struct[2]) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [array([0, 0]), array([0, 0]), array([0, 0])], (0, 1): [array([0, 0]), array([0, 0]), array([0, 0])], (1, 0): [array([0, 0]), array([0, 0]), array([0, 0])], (1, 1): [array([0, 0]), array([0, 0]), array([0, 0])]}) >>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [array([0, 0]), array([0, 0]), array([0, 0])]), ((0, 1), [array([0, 0]), array([0, 0]), array([0, 0])]), ((1, 0), [array([0, 0]), array([0, 0]), array([0, 0])]), ((1, 1), [array([0, 0]), array([0, 0]), array([0, 0])])] >>> struct = structure_np(2, 2, False) >>> struct[0] array([[0, 1], [2, 3]]) >>> struct[1] #doctest: +NORMALIZE_WHITESPACE [array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3]), array([0, 3]), array([2, 1])] >>> pprint(struct[2]) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [array([0, 2]), array([0, 1]), array([0, 3])], (0, 1): [array([1, 3]), array([0, 1]), array([2, 1])], (1, 0): [array([0, 2]), array([2, 3]), array([2, 1])], (1, 1): [array([1, 3]), array([2, 3]), array([0, 3])]}) >>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [array([0, 2]), array([0, 1]), array([0, 3])]), ((0, 1), [array([1, 3]), array([0, 1]), array([2, 1])]), ((1, 0), [array([0, 2]), array([2, 3]), array([2, 1])]), ((1, 1), [array([1, 3]), array([2, 3]), array([0, 3])])] """ # number of cells is n^d. If this greater than (2^31 - OFFSET - 1) # then we use int64. This is because the get_scopes # function populates the arrays with values 0,1,2, ... dtype = np.int64 if n ** d > 2 ** 31 - OFFSET - 1 else np.int32 hc = np.arange(n ** d, dtype = dtype).reshape([n] * d) lines = list(get_lines_np(hc)) scopes = get_scopes_np(lines, d) if zeros: hc.fill(0) return (hc, lines, scopes) def get_lines_enum_np(hc: Cube_np) -> Lines_enum_np: """ get_lines_enum_np(hc: Cube_np) -> Lines_enum_np Returns emunerated lines of a hypercube Parameters ---------- hc The hypercube whose lines are to be calculated Returns ------- Enumerated numpy.ndarray views of the lines in `hc`. See Also -------- get_lines_np Examples -------- >>> import numpy as np >>> from pprint import pprint >>> hc = np.arange(4).reshape(2, 2) >>> hc array([[0, 1], [2, 3]]) >>> lines = get_lines_enum_np(hc) >>> pprint(lines) #doctest: +SKIP {0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]), 3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])} >>> sorted(lines.items()) #doctest: +NORMALIZE_WHITESPACE [(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])), (3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))] """ lines: Lines_enum_np = dict() idx = 0 for line in get_lines_np(hc): lines[idx] = line idx += 1 return lines def get_scopes_enum_np(lines: Lines_enum_np, d: int) -> Scopes_enum: """ get_scopes_enum_np(lines: Lines_enum_np, d: int) -> Scopes_enum: Calculate the scope of each cell in a hypercube Parameters ---------- lines The returned value from get_lines_enum_np(hc) where hc is of the form np.arange(n ** d, dtype = intxx).reshape([n] * d). That is, hc is populated with the values 0,1,2,...,n^d - 1. dim The dimension of the hypercube that was used to generate `lines`. Returns ------- A dictionary with keys equal to each cell coordinates of the hypercube. For each cell key, the value is the cell's scope - a list of line enumerations that are lines containing the cell. See Also -------- get_lines_enum_np Examples -------- >>> import numpy as np >>> from pprint import pprint >>> hc = np.arange(4).reshape(2, 2) >>> hc array([[0, 1], [2, 3]]) >>> lines = get_lines_enum_np(hc) >>> pprint(lines) #doctest: +SKIP {0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]), 3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])} >>> sorted(lines.items()) #doctest: +NORMALIZE_WHITESPACE [(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])), (3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))] >>> scopes = get_scopes_enum_np(lines, 2) >>> pprint(scopes) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [0, 2, 4], (0, 1): [1, 2, 5], (1, 0): [0, 3, 5], (1, 1): [1, 3, 4]}) >>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]), ((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])] """ n = lines[0].size shape = [n] * d scopes: Scopes_enum = DefaultDict(list) for idx, line in lines.items(): for j in range(n): cell = np.unravel_index(line[j], shape) scopes[cell].append(idx) return scopes def structure_enum_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) -> Structure_enum_np: """ structure_enum_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) -> Structure_enum_np: Return a hypercube, its enumerated lines and the scopes of its cell scopes. Parameters ---------- d The number of dimensions of the hypercube n The number of cells in any dimension zeros If true, all values in array are 0, else they are 0,1,2,... base: int Tne number of cells is n^d. If this greater than (2^31 - OFFSET - 1) then we use np.int64 (instead of np.int32) as the dtype of the numpy array. Returns ------- A tuple containing the hypercube, its enumerated lines, and the scopes of its cells. See Also -------- get_lines_enum_np get_scopes_enum_np Examples -------- >>> import numpy as np >>> from pprint import pprint >>> struct = structure_enum_np(2, 2) >>> struct[0] array([[0, 0], [0, 0]]) >>> pprint(struct[1]) #doctest: +SKIP {0: array([0, 0]), 1: array([0, 0]), 2: array([0, 0]), 3: array([0, 0]), 4: array([0, 0]), 5: array([0, 0])} >>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE [(0, array([0, 0])), (1, array([0, 0])), (2, array([0, 0])), (3, array([0, 0])), (4, array([0, 0])), (5, array([0, 0]))] >>> pprint(struct[2]) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [0, 2, 4], (0, 1): [1, 2, 5], (1, 0): [0, 3, 5], (1, 1): [1, 3, 4]}) >>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]), ((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])] >>> struct = structure_enum_np(2, 2, False) >>> struct[0] array([[0, 1], [2, 3]]) >>> pprint(struct[1]) #doctest: +SKIP {0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]), 3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])} >>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE [(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])), (3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))] >>> pprint(struct[2]) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [0, 2, 4], (0, 1): [1, 2, 5], (1, 0): [0, 3, 5], (1, 1): [1, 3, 4]}) >>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]), ((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])] """ # number of cells is n^d. If this greater than (2^31 - OFFSET - 1) # then we use int64. This is because the the get_scopes # function populates the arrays with values 0,1,2, ... dtype = np.int64 if n ** d > 2 ** 31 - OFFSET - 1 else np.int32 hc = np.arange(n ** d, dtype = dtype).reshape([n] * d) lines = get_lines_enum_np(hc) scopes = get_scopes_enum_np(lines, d) if zeros: hc.fill(0) return (hc, lines, scopes) def connected_cells_np(lines: Lines_enum_np, scopes: Scopes_enum, d: int) -> Connected_cells: """ connected_cells_np(lines: Lines_enum_np, scopes: Scopes_enum, d: int) -> Connected_cells: Calculate the connected cells for a cube. Parameters ---------- lines The enumerated lines of the hypercube scopes The enumerated scopes of the hypercube Returns ------ A dictionary with keys beings cell coordinates and values the connected cell coordinates. See Also -------- structure_enum_np Examples -------- >>> from pprint import pprint >>> d = 2 >>> n = 3 >>> struct = structure_enum_np(d, n, False) >>> struct[1] #doctest: +NORMALIZE_WHITESPACE {0: array([0, 3, 6]), 1: array([1, 4, 7]), 2: array([2, 5, 8]), 3: array([0, 1, 2]), 4: array([3, 4, 5]), 5: array([6, 7, 8]), 6: array([0, 4, 8]), 7: array([6, 4, 2])} >>> pprint(struct[2]) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [0, 3, 6], (0, 1): [1, 3], (0, 2): [2, 3, 7], (1, 0): [0, 4], (1, 1): [1, 4, 6, 7], (1, 2): [2, 4], (2, 0): [0, 5, 7], (2, 1): [1, 5], (2, 2): [2, 5, 6]}) >>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [0, 3, 6]), ((0, 1), [1, 3]), ((0, 2), [2, 3, 7]), ((1, 0), [0, 4]), ((1, 1), [1, 4, 6, 7]), ((1, 2), [2, 4]), ((2, 0), [0, 5, 7]), ((2, 1), [1, 5]), ((2, 2), [2, 5, 6])] >>> connected_cells = connected_cells_np(struct[1], struct[2], d) >>> pprint(connected_cells) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)], (0, 1): [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)], (0, 2): [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (0, 2)], (1, 0): [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)], (1, 1): [(0, 1), (1, 2), (0, 0), (0, 2), (2, 1), (2, 0), (2, 2), (1, 0), (1, 1)], (1, 2): [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)], (2, 0): [(0, 0), (0, 2), (2, 1), (2, 0), (2, 2), (1, 0), (1, 1)], (2, 1): [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)], (2, 2): [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (0, 2)]}) >>> sorted(connected_cells.items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]), ((0, 1), [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)]), ((0, 2), [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (0, 2)]), ((1, 0), [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)]), ((1, 1), [(0, 1), (1, 2), (0, 0), (0, 2), (2, 1), (2, 0), (2, 2), (1, 0), (1, 1)]), ((1, 2), [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)]), ((2, 0), [(0, 0), (0, 2), (2, 1), (2, 0), (2, 2), (1, 0), (1, 1)]), ((2, 1), [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)]), ((2, 2), [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (0, 2)])] """ n = lines[0].size shape = [n] * d connected_cells: Connected_cells = DefaultDict(list) for cell, lines_enums in scopes.items(): for line_enum in lines_enums: for j in range(n): cc = np.unravel_index(lines[line_enum][j], shape) connected_cells[cell].append(cc) connected_cells[cell] = list(set(connected_cells[cell])) return connected_cells def get_scopes_coord(lines: Lines_coord, d: int) -> Scopes_coord: """ get_scopes_coord(lines: Lines_coord, d: int) -> Scopes_coord: Calculate the scope of each cell in a hypercube Parameters ---------- lines The returned value from get_lines_coord(d, n). dim The dimension of the hypercube that was used to generate `lines`. Returns ------- A dictionary with keys equal to the coordinates of each cell in the hypercube. For each cell key, the value is the cell's scope - a list of coordinates that are lines containing the cell. See Also -------- get_lines_coord Examples -------- >>> from pprint import pprint >>> lines = list(get_lines_coord(2, 2)) >>> lines #doctest: +NORMALIZE_WHITESPACE [[(0, 0), (1, 0)], [(0, 1), (1, 1)], [(0, 0), (0, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)], [(0, 1), (1, 0)]] >>> scopes = get_scopes_coord(lines, 2) >>> pprint(scopes) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]], (0, 1): [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]], (1, 0): [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]], (1, 1): [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]]}) >>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]]), ((0, 1), [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]]), ((1, 0), [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]]), ((1, 1), [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]])] """ n = len(lines[0]) scopes: Scopes_coord = DefaultDict(list) cells = it.product(range(n), repeat = d) # get all possible cells for cell in cells: for line in lines: if cell in line: scopes[cell].append(line) return scopes def structure_coord(d: int, n: int) -> Structure_coord: """ structure_coord(d: int, n: int) -> Structure_coord: Return lines, and the scopes of its cells, for h(d, n) Parameters ---------- d The number of dimensions of the hypercube n The number of cells in any dimension Returns ------- Lines, and the scopes of its cells, for h(d, n) See Also -------- get_lines_coord get_scopes_coord Examples -------- >>> from pprint import pprint >>> struct = structure_coord(2, 2) >>> struct[0] #doctest: +NORMALIZE_WHITESPACE [[(0, 0), (1, 0)], [(0, 1), (1, 1)], [(0, 0), (0, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)], [(0, 1), (1, 0)]] >>> pprint(struct[1]) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]], (0, 1): [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]], (1, 0): [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]], (1, 1): [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]]}) >>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]]), ((0, 1), [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]]), ((1, 0), [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]]), ((1, 1), [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]])] """ lines = list(get_lines_coord(d, n)) scopes = get_scopes_coord(lines, d) return (lines, scopes) def get_lines_enum_coord(d: int, n: int) -> Lines_enum_coord: """ get_lines_enum_coord(d: int, n: int) -> Lines_enum_coord: Returns enumerated lines of a hypercube Parameters ---------- d The number of dimensions of the hypercube n The number of cells in any dimension Yields ------- Enumerated lines in h(d, n). See Also -------- get_lines_coord Examples -------- >>> lines = get_lines_enum_coord(2, 2) >>> lines #doctest: +NORMALIZE_WHITESPACE {0: [(0, 0), (1, 0)], 1: [(0, 1), (1, 1)], 2: [(0, 0), (0, 1)], 3: [(1, 0), (1, 1)], 4: [(0, 0), (1, 1)], 5: [(0, 1), (1, 0)]} """ lines: Lines_enum_coord = dict() idx = 0 for line in get_lines_coord(d, n): lines[idx] = line idx += 1 return lines def get_scopes_enum_coord(lines: Lines_enum_coord, d: int) -> Scopes_enum: """ get_scopes_enum_coord(lines: Lines_enum_coord, d: int) -> Scopes_enum: Calculate the scope of each cell in a hypercube Parameters ---------- lines The returned value from get_lines_enum_coord(d, n). dim The dimension of the hypercube that was used to generate `lines`. Returns ------- A dictionary with keys equal to each cell coordinates of the hypercube. For each cell key, the value is the cell's scope - a list of line enumerations that are lines containing the cell. See Also -------- get_lines_enum_coord Examples -------- >>> from pprint import pprint >>> lines = get_lines_enum_coord(2, 2) >>> lines #doctest: +NORMALIZE_WHITESPACE {0: [(0, 0), (1, 0)], 1: [(0, 1), (1, 1)], 2: [(0, 0), (0, 1)], 3: [(1, 0), (1, 1)], 4: [(0, 0), (1, 1)], 5: [(0, 1), (1, 0)]} >>> scopes = get_scopes_enum_coord(lines, 2) >>> pprint(scopes) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [0, 2, 4], (0, 1): [1, 2, 5], (1, 0): [0, 3, 5], (1, 1): [1, 3, 4]}) >>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]), ((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])] """ n = len(lines[0]) scopes: Scopes_enum = DefaultDict(list) cells = it.product(range(n), repeat = d) # get all possible cells for cell in cells: for idx, line in lines.items(): if cell in line: scopes[cell].append(idx) return scopes def structure_enum_coord(d: int, n: int) -> Structure_enum_coord: """ structure_enum_coord(d: int, n: int) -> Structure_enum_coord: Return enumerated lines, and the scopes of its cells, for h(d, n) Parameters ---------- d The number of dimensions of the hypercube n The number of cells in any dimension Returns ------- Enumerated lines, and the scopes of its cells, for h(d, n) See Also -------- get_lines_enum_coord get_scopes_enum_coord Examples -------- >>> from pprint import pprint >>> struct = structure_enum_coord(2, 2) >>> struct[0] #doctest: +NORMALIZE_WHITESPACE {0: [(0, 0), (1, 0)], 1: [(0, 1), (1, 1)], 2: [(0, 0), (0, 1)], 3: [(1, 0), (1, 1)], 4: [(0, 0), (1, 1)], 5: [(0, 1), (1, 0)]} >>> pprint(struct[1]) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [0, 2, 4], (0, 1): [1, 2, 5], (1, 0): [0, 3, 5], (1, 1): [1, 3, 4]}) >>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]), ((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])] """ lines = get_lines_enum_coord(d, n) scopes = get_scopes_enum_coord(lines, d) return (lines, scopes) def connected_cells_coord(lines: Lines_enum_coord, scopes: Scopes_enum) -> Connected_cells: """ connected_cells_coord(lines: Lines_enum_coord, scopes: Scopes_enum) -> Connected_cells: Calculate the connected cells for a cube. Parameters ---------- lines The enumerated lines of the hypercube scopes The enumerated scopes of the hypercube Returns ------ A dictionary with keys beings cell coordinates and values the connected cell coordinates. See Also -------- structure_enum_coord Examples -------- >>> from pprint import pprint >>> struct = structure_enum_coord(2, 3) >>> struct[0] #doctest: +NORMALIZE_WHITESPACE {0: [(0, 0), (1, 0), (2, 0)], 1: [(0, 1), (1, 1), (2, 1)], 2: [(0, 2), (1, 2), (2, 2)], 3: [(0, 0), (0, 1), (0, 2)], 4: [(1, 0), (1, 1), (1, 2)], 5: [(2, 0), (2, 1), (2, 2)], 6: [(0, 0), (1, 1), (2, 2)], 7: [(0, 2), (1, 1), (2, 0)]} >>> pprint(struct[1]) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [0, 3, 6], (0, 1): [1, 3], (0, 2): [2, 3, 7], (1, 0): [0, 4], (1, 1): [1, 4, 6, 7], (1, 2): [2, 4], (2, 0): [0, 5, 7], (2, 1): [1, 5], (2, 2): [2, 5, 6]}) >>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [0, 3, 6]), ((0, 1), [1, 3]), ((0, 2), [2, 3, 7]), ((1, 0), [0, 4]), ((1, 1), [1, 4, 6, 7]), ((1, 2), [2, 4]), ((2, 0), [0, 5, 7]), ((2, 1), [1, 5]), ((2, 2), [2, 5, 6])] >>> connected_cells = connected_cells_coord(*struct) >>> pprint(connected_cells) #doctest: +SKIP defaultdict(<class 'list'>, {(0, 0): [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)], (0, 1): [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)], (0, 2): [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (0, 2)], (1, 0): [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)], (1, 1): [(0, 1), (1, 2), (0, 0), (0, 2), (2, 1), (2, 0), (2, 2), (1, 0), (1, 1)], (1, 2): [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)], (2, 0): [(0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)], (2, 1): [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)], (2, 2): [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (0, 2)]}) >>> sorted(connected_cells.items()) #doctest: +NORMALIZE_WHITESPACE [((0, 0), [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]), ((0, 1), [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)]), ((0, 2), [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (0, 2)]), ((1, 0), [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)]), ((1, 1), [(0, 1), (1, 2), (0, 0), (0, 2), (2, 1), (2, 0), (2, 2), (1, 0), (1, 1)]), ((1, 2), [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)]), ((2, 0), [(0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]), ((2, 1), [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)]), ((2, 2), [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (0, 2)])] """ connected_cells: Connected_cells = DefaultDict(list) for cell, lines_enums in scopes.items(): for line_enum in lines_enums: connected_cells[cell].extend(lines[line_enum]) connected_cells[cell] = list(set(connected_cells[cell])) return connected_cells def scopes_size(scopes: Scopes) -> Counter: """ scopes_size(scopes: Scopes) -> Counter: Calculate the different scope lengths. Parameters ---------- scopes Dictionary of cells (keys) and their scopes Returns ------- Counter of scopes lengths (key) and their frequency (values). See Also -------- get_scopes_np get_scopes_coord Examples -------- >>> import numpy as np >>> scopes = structure_np(2, 3)[2] >>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1}) True >>> scopes = structure_enum_np(2, 3)[2] >>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1}) True >>> scopes = structure_coord(2, 3)[1] >>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1}) True >>> scopes = structure_enum_coord(2, 3)[1] >>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1}) True """ return Counter([len(scope) for scope in scopes.values()]) def scopes_size_cell(scopes: Scopes) -> DefaultDict[int, List[Cell_coord]]: """ scopes_size_cell(scopes: Scopes) -> DefaultDict[int, List[Cell_coord]]: Group cells by length of their scope. Parameters ---------- scopes Dictionary of cells (keys) and their scopes Returns ------- Dictonary of scopes lengths (key) and the list of cells with scopes of that length. See Also -------- get_scopes_np get_scopes_coord get_scopes_enum Examples -------- >>> import numpy as np >>> from pprint import pprint >>> scopes = structure_np(2, 3)[2] >>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP defaultdict(<class 'list'>, {2: [(1, 0), (0, 1), (2, 1), (1, 2)], 3: [(0, 0), (2, 0), (0, 2), (2, 2)], 4: [(1, 1)]}) >>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE [(2, [(1, 0), (0, 1), (2, 1), (1, 2)]), (3, [(0, 0), (2, 0), (0, 2), (2, 2)]), (4, [(1, 1)])] >>> scopes = structure_enum_np(2, 3)[2] >>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP defaultdict(<class 'list'>, {2: [(1, 0), (0, 1), (2, 1), (1, 2)], 3: [(0, 0), (2, 0), (0, 2), (2, 2)], 4: [(1, 1)]}) >>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE [(2, [(1, 0), (0, 1), (2, 1), (1, 2)]), (3, [(0, 0), (2, 0), (0, 2), (2, 2)]), (4, [(1, 1)])] >>> scopes = structure_coord(2, 3)[1] >>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP defaultdict(<class 'list'>, {2: [(0, 1), (1, 0), (1, 2), (2, 1)], 3: [(0, 0), (0, 2), (2, 0), (2, 2)], 4: [(1, 1)]}) >>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE [(2, [(0, 1), (1, 0), (1, 2), (2, 1)]), (3, [(0, 0), (0, 2), (2, 0), (2, 2)]), (4, [(1, 1)])] >>> scopes = structure_enum_coord(2, 3)[1] >>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP defaultdict(<class 'list'>, {2: [(0, 1), (1, 0), (1, 2), (2, 1)], 3: [(0, 0), (0, 2), (2, 0), (2, 2)], 4: [(1, 1)]}) >>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE [(2, [(0, 1), (1, 0), (1, 2), (2, 1)]), (3, [(0, 0), (0, 2), (2, 0), (2, 2)]), (4, [(1, 1)])] """ scopes_size_cell: DefaultDict[int, List[Cell_coord]] = DefaultDict(list) for cell, scope in scopes.items(): scopes_size_cell[len(scope)].append(cell) return scopes_size_cell #################################################################################################### # The following 3 functions are for the displaying of a hypercube to a terminal. # It is assumed that an numpy ndarray has been used to represent the hypercube def display_np(hc: Cube_np, display_cell: Callable[[Any], Tuple[str, str, str]] = None, ul = False) -> str: """ display_np(hc: Cube_np, display_cell: Callable[[Any], Tuple[str, str, str]] = None, ul = False) -> str: Construct a string to display the hypercube in the terminal. Parameters ---------- hc The hypercube to be displayed display_cell A callback function called with the value of each cell value. It returns a tuple of strings - the character/string to be displayed, and any formatting to be applied (typically ansi color sequences). See Examples for how colors are specified. If display_cell is not provided, the cell value is displayed. ul display_np calls itself recursively (see Notes). This parameter is used to track whether a cell is on the bottom row of a 2-d array. It has direct impact when the user calls dislay_np unless the array is 1-d, in which case it determines if cell values are underlined when displayed. Returns ------- A string that can be printed to the terminal to display the hypercube. See Also -------- underline join_multiline Notes ----- The '|' character is used to represent the board horizontally. Cell contents are underlined in order to represent the board vertically. For example, the character 'X' is underlined to give 'X'. This function is recursive, it starts with hypercube and keeps removing dimensions until at a single cell, which can be given a string value. We are trying to display d dimensions in two dimensions. To do this, odd dimensions are shown horizontally; even dimensions are shown vertically. Examples -------- >>> import numpy as np >>> from pprint import pprint >>> def dc(v: Any) -> Tuple[str, str, str]: ... ... # define colors - could also use colorama module ... # red foreground + yellow background ... pre_fmt = '\033[31;43m' ... post_fmt = '\033[0m' # removes color settings ... ... if v > 0: ... return 'X', pre_fmt, post_fmt ... elif v < 0: ... return 'O', pre_fmt, post_fmt ... else: ... return ' ', '', '' >>> d = 3 >>> n = 3 >>> hc = np.zeros((n,) * d, dtype = int) >>> hc[0, 0, 0] = 1 >>> hc[1, 1, 1] = -1 >>> disp = display_np(hc, dc) >>> print(disp) #doctest: +SKIP X|_|_ _|_|_ _|_|_ _|_|_ _|O|_ _|_|_ | | | | | | """ if hc.size == 1: # hc is a single cell if display_cell is None: s, pre_fmt, post_fmt = str(hc), '', '' else: s, pre_fmt, post_fmt = display_cell(hc) # underline displayed string (to repsent board structure) unless # string is in the bottom row of array if ul: s = '_' * len(s) if s.isspace() else underline(s) return pre_fmt + s + post_fmt # hc is not a single cell d = hc.ndim # break the array into sub arrays along the first dimension sub_hc = [hc[i] for i in range(hc.shape[0])] # constuct a string for each sub array sub_hc_str = [] for c, a in enumerate(sub_hc): if d == 2 and c == len(sub_hc) - 1: # sub arr is 2-dimensional and last row - don't underline ul = False elif d != 1: ul = True sub_hc_str.append(display_np(a, display_cell, ul)) # join the sub strings if d % 2 == 0: # even number of dimensions - display down the screen if d == 2: return ''.join('\n'.join(sub_hc_str)) else: sp = '\n' + '\n' * (int((d / 2) ** 1.5) - 1) # increase space between higher dimesions return sp.join(sub_hc_str) else: # odd number of dimensions - display across the screen if d == 1: return '|'.join(sub_hc_str) else: return join_multiline(sub_hc_str, ' ' + ' ' * int((d - 2) ** 1.5) + ' ', False) def underline(s: str, alpha_only = True) -> str: """ underline(s: str, alpha_only = True) -> str Underlines a string. Parameters ---------- s The string to be underlined Returns ------- An underlined string Notes ----- The code appears only to work properly with alphabetic characters. Examples -------- >>> underline('X') 'X' >>> underline('XX') 'XX' >>> underline('1') '1' >>> underline('1', False) '1' """ try: if alpha_only: s_ = "" for chr in str(s): if chr.isalpha(): s_ = s_ + chr + "\u0332" else: s_ = s_ + chr return s_ else: return ''.join([chr + "\u0332" for chr in str(s)]) except: return s def join_multiline(iter: Iterable[str], divider: str = ' ', divide_empty_lines: bool = False, fill_value: str = '_') -> str: """ join_multiline(iter: Iterable[str], divider: str = ' ', divide_empty_lines: bool = False, fill_value: str = '_') -> str Join multiline string line by line. Parameters ---------- iter An iterable of multiline (or single line) strings divider String to divide the corresponding lines in each iterable divide_empty_lines If the corresponding line in each iterable is blank, then determines if the lines are still divided by divider, or divided by ''. fill_value If the number of lines in each multiline string in iter differs, then fill_value is used to fill in values of the shorter strings. Returns ------- The joined string. Examples -------- >>> # note that newline has to be escaped to work in doctest examples below. >>> ml_1 = 'AA\\nMM\\nXX' >>> ml_2 = 'BB\\nNN\\nYY' >>> ml_3 = 'CC\\nOO\\nZZ' >>> ml = join_multiline([ml_1, ml_2, ml_3]) >>> print(ml) #doctest: +NORMALIZE_WHITESPACE AA BB CC MM NN OO XX YY ZZ >>> ml = join_multiline([ml_1, ml_2, ml_3], divider = '_') >>> print(ml) #doctest: +NORMALIZE_WHITESPACE AA_BB_CC MM_NN_OO XX_YY_ZZ >>> ml_3 = 'CC\\nOO' >>> ml = join_multiline([ml_1, ml_2, ml_3], fill_value = '@') >>> print(ml) #doctest: +NORMALIZE_WHITESPACE AA BB CC MM NN OO XX YY @ >>> ml_1 = 'AA\\n\\nMM' >>> ml_2 = 'BB\\n\\nNN' >>> ml_3 = 'CC\\n\\nZZ' >>> ml = join_multiline([ml_1, ml_2, ml_3], divider = '_') >>> print(ml) #doctest: +NORMALIZE_WHITESPACE AA_BB_CC <BLANKLINE> MM_NN_ZZ >>> ml = join_multiline([ml_1, ml_2, ml_3], '_', True) >>> print(ml) #doctest: +NORMALIZE_WHITESPACE AA_BB_CC __ MM_NN_ZZ """ # for each multiline block, split into individual lines spl = [x.split('\n') for x in iter] # create list of tuples with tuple i containing line i from each multiline block tl = [i for i in it.zip_longest(*spl, fillvalue = fill_value)] if divide_empty_lines: st = [divider.join(t) for t in tl] else: st = [] for t in tl: if all([not x.strip() for x in t]): st.append('') else: st.append(divider.join(t)) # finally, join each string separated by a new line return '\n'.join(st) #################################################################################################### #################################################################################################### # The following functions are helper functions def slice_ndarray(arr: Cube_np, dims: Collection[int], coords: Collection[int]) -> Cube_np: """ slice_ndarray(arr: Cube_np, dims: Collection[int], coords: Collection[int]) -> Cube_np: Returns a slice of a hypercube. Parameters ---------- arr The hypercube to be sliced dims The dims to slice along coords The coordinates corresponding to the dimensions being sliced Returns ------- A view of a slice of `arr`. Raises ------ ValueError If length of `dims` is not equal to length of `coords` Examples -------- >>> import numpy as np >>> arr = np.arange(8).reshape(2, 2, 2) >>> arr array([[[0, 1], [2, 3]], <BLANKLINE> [[4, 5], [6, 7]]]) >>> slice_ndarray(arr, (0,), (0,)) array([[0, 1], [2, 3]]) >>> slice_ndarray(arr, (1, 2), (0, 0)) array([0, 4]) """ # create a list of slice objects, one for each dimension of the array # Note: slice(None) is the same as ":". E.g. arr[:, 4] = arr[slice(none), 4)] sl: List[Union[slice, int]] = [slice(None)] * arr.ndim if len(dims) != len(coords): raise ValueError("dims and coords must be of the same length") for dim, coord in zip(dims, coords): sl[dim] = coord return arr[tuple(sl)] def insert_into_tuple(tup: Tuple, pos: Union[int, Collection[int]], val: Any) -> Tuple[int, ...]: """ insert_into_tuple(tup: Tuple, pos: Union[int, Collection[int]], val: Any) -> Tuple[int, ...]: Insert values into a tuple. Parameters ---------- tup the tuple into which values are to be inserted pos The positions into which values are to be inserted val The values corresponding to the positions in `pos` Returns ------- A copy of `tup` with values inserted. Raises ------ ValueError If length of `pos` is not equal to length of `val` Examples -------- >>> tup = (0, 1, 2, 3) >>> pos = (5, 1) >>> val = (9, 8) >>> insert_into_tuple(tup, pos, val) (0, 8, 1, 2, 3, 9) >>> insert_into_tuple(tup, (), ()) (0, 1, 2, 3) """ tl = list(tup) if isinstance(pos, int): tl.insert(pos, val) else: if len(pos) != len(val): raise ValueError("pos and val must be of the same length") if len(pos) == 0: return tup # sort pos so from low to high; sort val correspondingly stl = list(zip(*sorted(zip(pos, val)))) for p, v in zip(stl[0], stl[1]): tl.insert(p, v) return tuple(tl) def increment_cell_coord(cell: Cell_coord, pos: Sequence[int], incr: Sequence[int], add: bool = True) -> Cell_coord: """ increment_cell_coord(cell: Cell_coord, pos: Sequence[int], incr: Sequence[int], add: bool = True) -> Cell_coord: Increments coordinates of a cell. Parameters ---------- cell the cell which will have coordinates incremented pos The coordinates which are to be incremented incr The increment values at the specified coordinates add If True, the the increments are added, else they are subtracted Returns ------- A copy of `cell` with incremented coordinates. Raises ------ ValueError If length of `pos` is not equal to length of `val` Examples -------- >>> cell = (1, 2, 1) >>> pos = (0, 2) >>> incr = (1, -1) >>> increment_cell_coord(cell, pos, incr) (2, 2, 0) >>> increment_cell_coord(cell, pos, incr, False) (0, 2, 2) """ if len(pos) != len(incr): raise ValueError("pos and incr must be of the same length") if len(pos) == 0: return cell cl = list(cell) for i in range(len(pos)): if add: cl[pos[i]] += incr[i] else: cl[pos[i]] -= incr[i] return tuple(cl) def str_to_tuple(d: int, n: int, cell: str, offset: int = 1) -> Cell_coord: """ str_to_tuple(d: int, n: int, cell: str, offset: int = 1) -> Cell_coord: Returns cells coordinates provided as a string as a tuple of integers. Parameters ---------- d The number of dimensions of the hypercube n The number of cells in any dimension cell Cell coordinates specified as a string (see Notes). Will accept a non-string argument which will be cast to a string. offset idx offset - typically 0 or 1. Raises ------ ValueError 1. if digits are not separated and the n is greater than 9 2. Incorrect numbers of coordinates provided 3. One or more coordinates is not valid Notes ----- If the string is all digits then assumes that each digit is a coordinate. If non-digit characters are provided then assumes that these split coordinates. Returns ------- A tuple containing the cell coordinates. Examples -------- >>> d = 3 >>> n = 3 >>> str_to_tuple(d, n, '123') (0, 1, 2) >>> str_to_tuple(d, n, '012', offset = 0) (0, 1, 2) >>> str_to_tuple(d, n, '1,2::3') (0, 1, 2) >>> str_to_tuple(d, n, 123) (0, 1, 2) >>> str_to_tuple(d, n, '12') Traceback (most recent call last): ... ValueError: Incorrect number of coordinates provided >>> str_to_tuple(d, n, '125') Traceback (most recent call last): ... ValueError: One or more coordinates are not valid >>> d = 3 >>> n = 10 >>> str_to_tuple(d, n, '123') #doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Board is too big for each dimension to be specified by single digit """ cell = str(cell) # check to see if there are any non-digits nd = re.findall(r'\D+', cell) if len(nd) == 0: if n > 9: raise ValueError("Board is too big for each dimension to be specified by single digit") else: tup = tuple(int(coord) - offset for coord in cell) else: # there are non-digits, use these as separators tup = tuple(int(coord) - offset for coord in re.findall(r'\d+', cell)) # check that correct number of coordinates specified if len(tup) != d: raise ValueError("Incorrect number of coordinates provided") # check that each coordinate is valid if all(t in range(n) for t in tup): return tup else: raise ValueError("One or more coordinates are not valid") def remove_invalid_cells_coord(n:int, line: Line_coord) -> Line_coord: """ remove_invalid_cells_coord(n:int, line: Line_coord) -> Line_coord Remove cells that do not have valid coordinates. Parameters ---------- n The number of cells in any dimension line list of tuples representing cell coordinates (possibly invalid) Returns ------- list of tuples representing valid cell coordinate Examples -------- >>> n = 3 >>> line = [(1, 2, 0), (-1, 0, 3), (0, 1, 2), (1, 2, 3)] >>> remove_invalid_cells_coord(n, line) [(1, 2, 0), (0, 1, 2)] """ rl = [] for cell in line: if all(coord in range(n) for coord in cell): rl.append(cell) return rl #################################################################################################### # used in internal testing def _lines_np_coord_check(d: int, n: int) -> bool: """ _lines_np_coord_check(d: int, n: int) -> bool Checks if lines_np and lines_coord give the same lines. Parameters ---------- d The number of dimensions of the hypercube n The number of cells in any dimension Returns ------- True if lines_np and lines_coord give the same lines. False otherwise. See Also -------- get_lines_np get_lines_coord Notes ----- This function is a private function used in testing. """ dtype = np.int64 if n ** d > 2 ** 31 else np.int32 arr = np.arange(n ** d, dtype = dtype).reshape([n] * d) lines_np = get_lines_np(arr) lines_coord = get_lines_coord(d, n) t_np = [tuple(sorted(l.tolist())) for l in lines_np] # type: ignore t_coord = [tuple(sorted([arr[c] for c in l])) for l in lines_coord] return set(t_np) == set(t_coord)
[ 37811, 47081, 10345, 6267, 329, 1762, 351, 2685, 276, 8718, 66, 29080, 13, 198, 198, 38197, 66, 29080, 389, 18366, 286, 3951, 11, 24438, 290, 34896, 656, 2440, 220, 198, 27740, 5736, 13, 327, 11978, 8718, 66, 29080, 460, 307, 1807, 35...
2.103772
25,739
# Package version __version__ = "0.16.31"
[ 2, 15717, 2196, 198, 834, 9641, 834, 796, 366, 15, 13, 1433, 13, 3132, 1, 198 ]
2.625
16
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ # SPDX-FileCopyrightText: 2021 Janek Groehl # SPDX-License-Identifier: MIT from simpa.core.device_digital_twins import SlitIlluminationGeometry, LinearArrayDetectionGeometry, PhotoacousticDevice from simpa import perform_k_wave_acoustic_forward_simulation from simpa.core.simulation_modules.reconstruction_module.reconstruction_module_delay_and_sum_adapter import \ reconstruct_delay_and_sum_pytorch from simpa import MCXAdapter, ModelBasedVolumeCreationAdapter, \ GaussianNoise from simpa.utils import Tags, Settings, TISSUE_LIBRARY from simpa.core.simulation import simulate from simpa.io_handling import load_data_field import numpy as np from simpa.utils.path_manager import PathManager from simpa_tests.manual_tests import ManualIntegrationTestClass import matplotlib.pyplot as plt # FIXME temporary workaround for newest Intel architectures import os os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" if __name__ == '__main__': test = KWaveAcousticForwardConvenienceFunction() test.run_test(show_figure_on_screen=False)
[ 2, 30628, 55, 12, 8979, 15269, 8206, 25, 33448, 7458, 286, 49452, 8366, 11998, 11, 32975, 37, 57, 198, 2, 30628, 55, 12, 8979, 15269, 8206, 25, 33448, 2365, 988, 10299, 17231, 75, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, ...
3.215517
348
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2016, Silvio Peroni <essepuntato@gmail.com> # # Permission to use, copy, modify, and/or distribute this software for any purpose # with or without fee is hereby granted, provided that the above copyright notice # and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, # OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, # DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS # SOFTWARE. from __future__ import annotations import os from shutil import copymode, move from tempfile import mkstemp from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import BinaryIO, Tuple, List, Dict from oc_ocdm.counter_handler.counter_handler import CounterHandler
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 1584, 11, 4243, 85, 952, 2448, 14651, 1279, 35270, 79, 2797, 5549, 31, 14816, 13, 785, 29, 198, 2, ...
3.373089
327
from rest_framework import serializers from backend_app import models # RESPONSES SERIALIZERS
[ 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 198, 6738, 30203, 62, 1324, 1330, 4981, 628, 628, 628, 628, 628, 628, 628, 198, 198, 2, 47203, 19213, 1546, 18871, 12576, 14887, 4877, 628, 628, 628, 198 ]
3.222222
36
"""The database connection manager. """ import logging import psycopg2
[ 37811, 464, 6831, 4637, 4706, 13, 198, 37811, 198, 198, 11748, 18931, 198, 11748, 17331, 22163, 70, 17, 628 ]
3.842105
19
""" cvp.py Functions for generating CVP feeds. :copyright: (C) 2014 by github.com/alfg. :license: MIT, see README for more details. """ def cvp_player_to_dict(player): """ Convert a player object from a Tree to a CVP-compliant dict. """ return { "session": player.session, "userid": player.userid, "name": player.name, "deaf": player.deaf, "mute": player.mute, "selfDeaf": player.selfDeaf, "selfMute": player.selfMute, "suppress": player.suppress, "onlinesecs": player.onlinesecs, "idlesecs": player.idlesecs } def cvp_chan_to_dict(channel): """ Convert a channel from a Tree object to a CVP-compliant dict, recursively. """ return { "id": channel.c.id, "parent": channel.c.parent, "name": channel.c.name, "description": channel.c.description, "channels": [cvp_chan_to_dict(c) for c in channel.children], "users": [cvp_player_to_dict(p) for p in channel.users], "position": channel.c.position, "temporary": channel.c.temporary, "links": channel.c.links }
[ 37811, 198, 33967, 79, 13, 9078, 198, 24629, 2733, 329, 15453, 327, 8859, 21318, 13, 198, 198, 25, 22163, 4766, 25, 357, 34, 8, 1946, 416, 33084, 13, 785, 14, 1604, 70, 13, 198, 25, 43085, 25, 220, 220, 17168, 11, 766, 20832, 1168...
2.206501
523
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import base import unittest import tempfile from webkitpy.common.system.executive import Executive, ScriptError from webkitpy.thirdparty.mock import Mock
[ 2, 15069, 357, 34, 8, 3050, 3012, 3457, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11, 351, 393, 1231, 198, 2, 17613, 11, 389, 10431, 2810, 326, 262, 1708, 3403, 389, 198, ...
3.605996
467
# -*- coding: utf-8 -*- from skimage import transform import tensorflow as tf import numpy as np import glob import face_recognition as FR import os import shutil
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 1341, 9060, 1330, 6121, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 15095, 198, 11748, 1986, 62, 26243, 653, 355, 8782...
3.173077
52
print("Enter Values of cylindrical part of tent ") h = float(input("Height : ")) r = float(input("radius : ")) csa_cyl = cyl(h, r) l = float(input("Enter slant height ")) csa_con = con(r, l) canvas_area = csa_cyl + csa_con print("Area of canvas = ", canvas_area, " m^2") unit_price = float(input("Enter cost of 1 m^2 ")) total_price = unit_price * canvas_area print("Total cost of canvas before tax ",total_price) print("Inluding tax"+ str(final_price(total_price)))
[ 4798, 7203, 17469, 27068, 286, 17327, 521, 8143, 636, 286, 11105, 366, 8, 198, 71, 796, 12178, 7, 15414, 7203, 23106, 1058, 366, 4008, 198, 81, 796, 12178, 7, 15414, 7203, 42172, 1058, 366, 4008, 198, 6359, 64, 62, 38801, 796, 17327, ...
2.769231
169
# Plotly integration for the Moku:Lab Datalogger # Copyright 2016 Liquid Instruments Pty. Ltd. from pymoku import InvalidOperationException
[ 2, 28114, 306, 11812, 329, 262, 337, 11601, 25, 17822, 360, 10254, 519, 1362, 198, 198, 2, 15069, 1584, 21020, 43953, 350, 774, 13, 12052, 13, 198, 198, 6738, 279, 4948, 11601, 1330, 17665, 32180, 16922 ]
3.916667
36
import publisher test_pdf_filename = "test/test.pdf" test_css_filename = "test/test.css" test_md_filename = "test/test.md" test_html_filename = "test/test.html" test_sender = "cpg@yakko.cs.wmich.edu" test_recipient = "cpgillem@gmail.com" test_md = "# Test heading\n\n- test item 1\n- test item 2" # The test case currently in use from_md_to_html_email()
[ 11748, 9991, 198, 198, 9288, 62, 12315, 62, 34345, 796, 366, 9288, 14, 9288, 13, 12315, 1, 198, 9288, 62, 25471, 62, 34345, 796, 366, 9288, 14, 9288, 13, 25471, 1, 198, 9288, 62, 9132, 62, 34345, 796, 366, 9288, 14, 9288, 13, 9132...
2.531915
141
from .train import Train
[ 6738, 764, 27432, 1330, 16835 ]
4.8
5
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 11, 15720, 602, 628 ]
2.891892
37
#!/usr/bin/env python # -*- coding:utf-8 -*- from .Contract import * from .Receivable import * from .Receipt import * from .Shop import * from .Statement import * from .Application import *
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 220, 198, 2, 532, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 198, 6738, 764, 45845, 1330, 1635, 198, 6738, 764, 3041, 48054, 1330, 1635, 198, 6738, 764, 3041, 344, 10257, 1330, 1...
3.080645
62
# -*- coding: utf-8 -*- import csv from stop_words import get_stop_words from nltk.stem.porter import PorterStemmer from gensim import corpora import gensim import os import re from nltk.tokenize import RegexpTokenizer #SET PATH path = r'' inputname="" def remove_html_tags(text): """Remove html tags from a string""" clean = re.compile('<.*?>') return re.sub(clean, '', text) #setup tokenizer = RegexpTokenizer(r'\w+') en_stop = get_stop_words('en') p_stemmer = PorterStemmer() fn = os.path.join(path, inputname) doc_set = [] with open(fn, encoding="utf8" ) as f: csv_f = csv.reader(f) for i, row in enumerate(csv_f): if i > 1 and len(row) > 1 : temp=remove_html_tags(row[1]) temp = re.sub("[^a-zA-Z ]","", temp) doc_set.append(temp) texts = [] for i in doc_set: if i.strip(): raw = i.lower() tokens = tokenizer.tokenize(raw) if len(tokens)>5: stopped_tokens = [i for i in tokens if not i in en_stop] texts.append(stopped_tokens) dictionary = corpora.Dictionary(texts) corpus = [dictionary.doc2bow(text) for text in texts] lsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary, num_topics=5 ) print (lsi.print_topics(num_topics=3, num_words=3)) ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=20, id2word = dictionary, passes=20) print(ldamodel.print_topics(num_topics=20, num_words=5)) K = ldamodel.num_topics topicWordProbMat = ldamodel.print_topics(K)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 269, 21370, 198, 6738, 2245, 62, 10879, 1330, 651, 62, 11338, 62, 10879, 198, 6738, 299, 2528, 74, 13, 927, 13, 26634, 1330, 20890, 1273, 368, 647, 198, 67...
2.052897
794
#!/usr/bin/env python import sys import os sys.path.append(os.path.join(os.path.dirname('__file__'), '..', 'src')) from random import randint from datetime import datetime, timedelta from logsandra.model.client import CassandraClient client = CassandraClient('test', 'localhost', 9160, 3) keywords = ['foo', 'bar', 'baz'] print "Loading sample data for the following keywords:", ', '.join(keywords) today = datetime.now() for i in range(1000): d = today + timedelta(randint(-7, -1), randint(-3600*24, 3600*24)) client.add_log(d, 'test entry', 'here', [keywords[i % len(keywords)]])
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 25064, 198, 11748, 28686, 198, 198, 17597, 13, 6978, 13, 33295, 7, 418, 13, 6978, 13, 22179, 7, 418, 13, 6978, 13, 15908, 3672, 10786, 834, 7753, 834, 33809, 705, 492, 3256, 7...
2.865385
208
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages setup( # name of the lib name='bioshadock_biotools', # version version='1.0.1', packages=find_packages(), author="Francois Moreews", description="Import tool for biotools from Dockerfile", include_package_data=True, classifiers=[ "Programming Language :: Python", "Development Status :: 5 - Production/Stable", "License :: Apache 2.0", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Topic :: Communications", ], scripts = [ 'parseDockerFile.py', 'registryClient.py' ], install_requires = [ 'lxml', 'requests>=2.7.0' ], license="Apache 2.0", )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 198, 40406, 7, 628, 220, 220, 220, 1303, 1438, 28...
2.362398
367
import numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.optimizers import RMSprop from tensorflow.lite.python import lite X_train = np.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0], [1.0, 1.0]]) Y_train = np.array([0.0, 1.0, 1.0, 0.0]) model = Sequential() output_count_layer0 = 2 model.add( Dense( output_count_layer0, input_shape=(2, ), activation='sigmoid')) # Need to specify input shape for input layer output_count_layer1 = 1 model.add(Dense(output_count_layer1, activation='linear')) model.compile( loss='mean_squared_error', optimizer=RMSprop(), metrics=['accuracy']) BATCH_SIZE = 4 history = model.fit( X_train, Y_train, batch_size=BATCH_SIZE, epochs=3600, verbose=1) X_test = X_train Y_test = Y_train score = model.evaluate(X_test, Y_test, verbose=0) model.save('xor_model.h5') converter = lite.TFLiteConverter.from_keras_model_file('xor_model.h5') tflite_model = converter.convert() open('public/xor_model.tflite', 'wb').write(tflite_model)
[ 11748, 299, 32152, 355, 45941, 198, 198, 6738, 11192, 273, 11125, 13, 6122, 292, 13, 27530, 1330, 24604, 1843, 198, 6738, 11192, 273, 11125, 13, 6122, 292, 13, 75, 6962, 1330, 360, 1072, 198, 6738, 11192, 273, 11125, 13, 6122, 292, 13...
2.115108
556
from django.db import models from django.contrib.auth.models import AbstractUser from django.utils.translation import ugettext_lazy as _
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 27741, 12982, 198, 6738, 42625, 14208, 13, 26791, 13, 41519, 1330, 334, 1136, 5239, 62, 75, 12582, 355, 4808, 628, 198 ]
3.475
40
import logging from abc import ABC, abstractmethod from os.path import isfile, splitext import pathlib import torch from .waveform import get_waveform logger = logging.getLogger(__name__)
[ 11748, 18931, 198, 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 198, 6738, 28686, 13, 6978, 1330, 318, 7753, 11, 4328, 578, 742, 198, 11748, 3108, 8019, 198, 11748, 28034, 198, 198, 6738, 764, 19204, 687, 1330, 651, 62, 19204, 687, 19...
3.310345
58
import os import sys import serial import time import struct ser = serial.Serial('/dev/ttyACM0',9600) led = sys.argv[1] act = sys.argv[2] l = str(led) """a = str(act)""" time.sleep(5) ser.write(struct.pack(l.encode()) """ ser.write(l.encode()) """
[ 11748, 28686, 198, 11748, 25064, 198, 11748, 11389, 198, 11748, 640, 198, 11748, 2878, 198, 198, 2655, 796, 11389, 13, 32634, 10786, 14, 7959, 14, 42852, 2246, 44, 15, 3256, 4846, 405, 8, 198, 992, 796, 25064, 13, 853, 85, 58, 16, 6...
2.390476
105
""" Primary module for Froggit This module contains the main controller class for the Froggit application. There is no need for any additional classes in this module. If you need more classes, 99% of the time they belong in either the lanes module or the models module. If you are unsure about where a new class should go, post a question on Piazza. Kendra Obika kao78 December 20 2020 """ from consts import * from game2d import * from level import * import introcs from kivy.logger import Logger # PRIMARY RULE: Froggit can only access attributes in level.py via getters/setters # Froggit is NOT allowed to access anything in lanes.py or models.py.
[ 37811, 198, 35170, 8265, 329, 9734, 1130, 270, 198, 198, 1212, 8265, 4909, 262, 1388, 10444, 1398, 329, 262, 9734, 1130, 270, 3586, 13, 1318, 198, 271, 645, 761, 329, 597, 3224, 6097, 287, 428, 8265, 13, 220, 1002, 345, 761, 517, 60...
3.655556
180
import logging from functools import wraps from PIL import Image, ImageFont, ImageDraw from config import LIST_ALLOWED_USERS
[ 11748, 18931, 198, 6738, 1257, 310, 10141, 1330, 27521, 198, 6738, 350, 4146, 1330, 7412, 11, 7412, 23252, 11, 7412, 25302, 198, 198, 6738, 4566, 1330, 39498, 62, 7036, 3913, 1961, 62, 2937, 4877, 628, 198 ]
3.555556
36
from .Camera import * from .GloveBox import * from .Microscope import * from .Stage import * from .UserInterface import * from .NeuralNetwork import *
[ 6738, 764, 35632, 1330, 1635, 198, 6738, 764, 9861, 659, 14253, 1330, 1635, 220, 198, 6738, 764, 13031, 29982, 1330, 1635, 220, 198, 6738, 764, 29391, 1330, 1635, 198, 6738, 764, 12982, 39317, 1330, 1635, 198, 6738, 764, 8199, 1523, 262...
3.534884
43
""" Copyright 2019-2021 Boris Shminke Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import re import sqlite3 from typing import Callable, Optional import torch from tqdm import tqdm from neural_semigroups.semigroups_dataset import SemigroupsDataset from neural_semigroups.utils import connect_to_db
[ 37811, 198, 220, 220, 15069, 13130, 12, 1238, 2481, 25026, 911, 1084, 365, 628, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 220, 220, 345, 743, 407, 779, 428, 2393, 2845, 287, ...
3.574561
228
from __future__ import absolute_import, division, print_function import cv2 import random import numpy as np import colorsys import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.patches import Polygon from skimage.measure import find_contours def log(text, array=None): """Prints a text message. And, optionally, if a Numpy array is provided it prints it's shape, min, and max values. """ if array is not None: text = text.ljust(25) text += ("shape: {:20} ".format(str(array.shape))) if array.size: text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max())) else: text += ("min: {:10} max: {:10}".format("","")) text += " {}".format(array.dtype) print(text) def random_colors(N, bright=True): """ Generate random colors. To get visually distinct colors, generate them in HSV space then convert to RGB. """ brightness = 1.0 if bright else 0.7 hsv = [(i / N, 1, brightness) for i in range(N)] colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) random.shuffle(colors) return colors def apply_mask(image, mask, color, alpha=0.5): """Apply the given mask to the image. """ for c in range(3): image[:, :, c] = np.where(mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c] * 255, image[:, :, c]) return image def display_instances(image, boxes, masks, keypoints, class_id=1, class_name='person', scores=None, title="", figsize=(16, 16), ax=None, show_mask=True, show_bbox=True, show_keypoint=True, colors=None, captions=None): """ boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. masks: [height, width, num_instances] class_ids: 1 for person class_name: class name of the dataset scores: (optional) confidence scores for each box title: (optional) Figure title show_mask, show_bbox: To show masks and bounding boxes or not figsize: (optional) the size of the image colors: (optional) An array or colors to use with each object captions: (optional) A list of strings to use as captions for each object """ # Number of instances N = boxes.shape[0] if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[0] # If no axis is passed, create one and automatically call show() auto_show = False if not ax: _, ax = plt.subplots(1, figsize=figsize) auto_show = True # Generate random colors colors = colors or random_colors(N) # Show area outside image boundaries. height, width = image.shape[:2] ax.set_ylim(height + 10, -10) ax.set_xlim(-10, width + 10) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): color = colors[i] # Bounding box if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] if show_bbox: p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7, linestyle="dashed", edgecolor=color, facecolor='none') ax.add_patch(p) # Label if not captions: class_id = class_id score = scores[i] if scores is not None else None label = class_name caption = "{} {:.3f}".format(label, score) if score else label else: caption = captions[i] ax.text(x1, y1 + 8, caption, color='w', size=11, backgroundcolor="none") # Mask mask = masks[i, :, :] keypoint = keypoints[i] if show_mask: masked_image = apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros( (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) if show_keypoint: masked_image = apply_keypoint(masked_image, keypoint) ax.imshow(masked_image.astype(np.uint8)) if auto_show: plt.show() def extract_bboxes(mask): """Compute bounding boxes from masks. mask: [num_instances, height, width]. Mask pixels are either 1 or 0. Returns: bbox array [num_instances, (y1, x1, y2, x2)]. """ boxes = np.zeros([mask.shape[0], 4], dtype=np.int32) for i in range(mask.shape[0]): m = mask[i, :, :] # Bounding box. horizontal_indicies = np.where(np.any(m, axis=0))[0] vertical_indicies = np.where(np.any(m, axis=1))[0] if horizontal_indicies.shape[0]: x1, x2 = horizontal_indicies[[0, -1]] y1, y2 = vertical_indicies[[0, -1]] # x2 and y2 should not be part of the box. Increment by 1. x2 += 1 y2 += 1 else: # No mask for this instance. Might happen due to # resizing or cropping. Set bbox to zeros x1, x2, y1, y2 = 0, 0, 0, 0 boxes[i] = np.array([y1, x1, y2, x2]) return boxes.astype(np.int32)
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 198, 198, 11748, 269, 85, 17, 198, 11748, 4738, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 7577, 893, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 3...
2.098956
2,779
import pytest
[ 11748, 12972, 9288, 198 ]
3.5
4
"""Particle filters for inference in state space models.""" import abc from typing import Tuple, Dict, Callable, Any, Optional import numpy as np from numpy.random import Generator from scipy.special import logsumexp from scipy.sparse import csr_matrix from dapy.filters.base import AbstractEnsembleFilter from dapy.models.base import AbstractModel import dapy.ot as optimal_transport
[ 37811, 7841, 1548, 16628, 329, 32278, 287, 1181, 2272, 4981, 526, 15931, 198, 198, 11748, 450, 66, 198, 6738, 19720, 1330, 309, 29291, 11, 360, 713, 11, 4889, 540, 11, 4377, 11, 32233, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 29...
3.536364
110
import random
[ 11748, 4738 ]
6.5
2
""" Copyright 2011 Shao-Chuan Wang <shaochuan.wang AT gmail.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import operator from itertools import imap, repeat import functools iterable = lambda obj: isinstance(obj, basestring) or hasattr(obj, '__iter__') vector_add = functools.partial(vector_op, operator.add) vector_sub = functools.partial(vector_op, operator.sub) vector_mul = functools.partial(vector_op, operator.mul) vector_div = functools.partial(vector_op, operator.div) vector_and = functools.partial(vector_op, operator.and_) vector_or = functools.partial(vector_op, operator.or_) if __name__ == '__main__': positions = [(1,2,1), (3,4,3), (5,6,3)] print vector_sum(positions) print vector_mean(positions)
[ 37811, 198, 220, 220, 15069, 2813, 911, 5488, 12, 1925, 7258, 15233, 1279, 26270, 5374, 7258, 13, 47562, 5161, 308, 4529, 13, 785, 29, 628, 220, 220, 220, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, ...
3.218807
553
# coding: utf-8 """ App Center Client Microsoft Visual Studio App Center API # noqa: E501 OpenAPI spec version: preview Contact: benedetto.abbenanti@gmail.com Project Repository: https://github.com/b3nab/appcenter-sdks """ import pprint import re # noqa: F401 import six def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BranchConfigurationToolsets): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 37811, 198, 220, 220, 220, 2034, 3337, 20985, 628, 220, 220, 220, 5413, 15612, 11733, 2034, 3337, 7824, 220, 1303, 645, 20402, 25, 412, 33548, 628, 220, 220, 220, 4946, 17614, 1020, 2196, 25...
2.593373
332
from click.testing import CliRunner import unittest from mock import patch, Mock, PropertyMock from floyd.cli.version import upgrade
[ 6738, 3904, 13, 33407, 1330, 1012, 72, 49493, 198, 11748, 555, 715, 395, 198, 6738, 15290, 1330, 8529, 11, 44123, 11, 14161, 44, 735, 198, 198, 6738, 781, 12192, 13, 44506, 13, 9641, 1330, 8515, 198 ]
3.722222
36
"""Contains HelpCommand class.""" import discord from discord.ext import commands from offthedialbot import utils def short(self, command, doc=True): """List the command as a one-liner.""" sig = self.get_command_signature(command) if not doc else f'{self.clean_prefix}{command}' return f'`{sig[:-1] if sig.endswith(" ") else sig}` {(command.short_doc if doc else "")}' help_command = HelpCommand()
[ 37811, 4264, 1299, 10478, 21575, 1398, 526, 15931, 198, 198, 11748, 36446, 198, 6738, 36446, 13, 2302, 1330, 9729, 198, 198, 6738, 572, 83, 704, 498, 13645, 1330, 3384, 4487, 628, 198, 220, 220, 220, 825, 1790, 7, 944, 11, 3141, 11, ...
2.734177
158
def is_anagram ( word1, word2 ): ''' Returns True if word1 is 'anagram' of word2 or False if otherwise. word1: str word2: str ''' return sorted(word1) == sorted(word2) print(is_anagram("silence", "listen"))
[ 4299, 318, 62, 272, 6713, 357, 1573, 16, 11, 1573, 17, 15179, 198, 220, 220, 220, 705, 7061, 198, 220, 220, 220, 16409, 6407, 611, 1573, 16, 318, 705, 272, 6713, 6, 286, 1573, 17, 393, 10352, 611, 4306, 13, 198, 220, 220, 220, 2...
2.161017
118
import torch from torch import nn as nn from learning.modules.blocks import ResBlock, ResBlockConditional
[ 11748, 28034, 198, 6738, 28034, 1330, 299, 77, 355, 299, 77, 198, 198, 6738, 4673, 13, 18170, 13, 27372, 1330, 1874, 12235, 11, 1874, 12235, 25559, 1859 ]
3.925926
27
# Used for deploying on Apache with mod_wsgi from cryptovote.app import create_app application = create_app()
[ 2, 16718, 329, 29682, 319, 24843, 351, 953, 62, 18504, 12397, 198, 6738, 8194, 709, 1258, 13, 1324, 1330, 2251, 62, 1324, 198, 31438, 796, 2251, 62, 1324, 3419, 198 ]
3.666667
30
# # @lc app=leetcode id=203 lang=python3 # # [203] Remove Linked List Elements # # @lc code=start # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next # @lc code=end
[ 2, 198, 2, 2488, 44601, 598, 28, 293, 316, 8189, 4686, 28, 22416, 42392, 28, 29412, 18, 198, 2, 198, 2, 685, 22416, 60, 17220, 7502, 276, 7343, 26632, 198, 2, 198, 198, 2, 2488, 44601, 2438, 28, 9688, 198, 2, 30396, 329, 1702, 3...
2.333333
114
""" A module that defines the QLearning Agent for the pricing game as a class. Note that we have a numba version (for speed) which inherits everything from QLearningAgentBase. """ import numpy as np from numba import float64 from numba import int32 from numba import njit from numba.experimental import jitclass from .utils_q_learning import numba_argmax from .utils_q_learning import numba_max spec = [ ("n_actions", int32), ("n_states", int32), ("_qvalues", float64[:, :]), ("alpha", float64), ("epsilon", float64), ("discount", float64), ] def jitclass_to_baseclass(agent_jit): """ A helper function to create a new QLearningAgentBase object from the jitclass equivalent. This is needed as we cannot serialize jitclasses in the current numba version. The function takes all parameters from the QLearningAgent *agent_jit* and rewrites it to a new QLearningAgentBase object. Args: agent_jit (QLearningAgent): jitclass instance of agent Returns: QLearningAgentBase: Serializable version of the agent """ agent_nojit = QLearningAgentBase( alpha=agent_jit.alpha, epsilon=agent_jit.epsilon, discount=agent_jit.discount, n_actions=agent_jit.n_actions, n_states=agent_jit.n_states, ) agent_nojit.set_qmatrix(new_matrix=agent_jit.get_qmatrix()) return agent_nojit
[ 37811, 198, 198, 32, 8265, 326, 15738, 262, 1195, 41730, 15906, 329, 262, 13045, 983, 355, 257, 1398, 13, 198, 6425, 326, 356, 423, 257, 997, 7012, 2196, 357, 1640, 2866, 8, 543, 10639, 896, 2279, 422, 198, 9711, 451, 768, 36772, 14...
2.707692
520
#!/usr/bin/python # with help from teleop_keyboard.py, # https://github.com/ros-teleop/teleop_twist_keyboard/blob/master/teleop_twist_keyboard.py # Graylin Trevor Jay and Austin Hendrix, BSD licensed import roslib; #roslib.load_manifest('teleop_move') import rospy from geometry_msgs.msg import Twist import sys, select, termios, tty starting_msg = """Move with: i j k l (or wasd, space to stop) CTRL-C to quit """ movement={ 'i':(1,0,0,0), 'j':(0,0,0,1), 'k':(0,0,0,-1), 'l':(-1,0,0,0), 'w':(1,0,0,0), 'a':(0,0,0,1), 's':(0,0,0,-1), 'd':(-1,0,0,0), ' ':(0,0,0,0), } if __name__=="__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 2, 351, 1037, 422, 5735, 404, 62, 2539, 3526, 13, 9078, 11, 220, 198, 2, 220, 220, 3740, 1378, 12567, 13, 785, 14, 4951, 12, 46813, 404, 14, 46813, 404, 62, 4246, 396, 62, 2539, 35...
1.961538
338
""" File: shrink.py ame: Wilson Wang 2020/08/05 ------------------------------- Create a new "out" image half the width and height of the original. Set pixels at x=0 1 2 3 in out , from x=0 2 4 6 in original, and likewise in the y direction. """ from simpleimage import SimpleImage def shrink(filename): """ This function should shrink the 'filename' image into a 1/2 size new image. :param filename: img, the image of origin size :return img: new_img, the image of half size of the origin photo """ img = SimpleImage(filename) # This step should makes a blank photo, which has half size of the origin photo new_img = SimpleImage.blank(img.width//2,img.height//2) for y in range(new_img.height): for x in range(new_img.width): # This step catch pixel in origin photo in every two pixel. x=0,2,4,6 img_pixel = img.get_pixel(x*2,y*2) new_img_pixel = new_img.get_pixel(x,y) # These three steps are filling pixels from the origin photo into 'new_pixel' new_img_pixel.red = img_pixel.red new_img_pixel.green = img_pixel.green new_img_pixel.blue = img_pixel.blue return new_img def main(): """ This program should shrink any image into a half size photo. 'without code:make_as_big_as' """ original = SimpleImage("images/poppy.png") original.show() after_shrink = shrink("images/poppy.png") after_shrink.show() if __name__ == '__main__': main()
[ 37811, 201, 198, 8979, 25, 22085, 13, 9078, 201, 198, 480, 25, 8127, 15233, 12131, 14, 2919, 14, 2713, 201, 198, 1783, 24305, 201, 198, 16447, 257, 649, 366, 448, 1, 2939, 2063, 262, 9647, 290, 6001, 286, 262, 2656, 13, 201, 198, ...
2.520194
619
""" show ip dhcp database show ip dhcp snooping database show ip dhcp snooping database detail """ # Python import re # Metaparser from genie.metaparser import MetaParser from genie.metaparser.util.schemaengine import (Schema, Any, Optional, Or, And, Default, Use) # Parser Utils from genie.libs.parser.utils.common import Common # ======================================= # Schema for 'show ip dhcp database' # ======================================= # ======================================= # Parser for 'show ip dhcp database' # ======================================= # =================================================== # Schema for 'show ip dhcp snooping database' # 'show ip dhcp snooping database detail' # =================================================== # =================================================== # Parser for 'show ip dhcp snooping database' # =================================================== # =================================================== # Parser for 'show ip dhcp snooping database detail' # ===================================================
[ 37811, 198, 12860, 20966, 34590, 13155, 6831, 198, 12860, 20966, 34590, 13155, 3013, 11224, 278, 6831, 198, 12860, 20966, 34590, 13155, 3013, 11224, 278, 6831, 3703, 198, 37811, 198, 198, 2, 11361, 198, 11748, 302, 198, 198, 2, 3395, 499,...
3.821192
302
import os import sys import json import ipaddress import paramiko br=sys.argv[1] r=sys.argv[2] IP=sys.argv[3] func_createcont(br,r,IP)
[ 11748, 28686, 198, 11748, 25064, 198, 11748, 33918, 198, 11748, 20966, 21975, 198, 11748, 5772, 12125, 198, 198, 1671, 28, 17597, 13, 853, 85, 58, 16, 60, 198, 81, 28, 17597, 13, 853, 85, 58, 17, 60, 198, 4061, 28, 17597, 13, 853, ...
2.266667
60
### flaskr/__init__.py import os from flask import Flask from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy()
[ 21017, 42903, 81, 14, 834, 15003, 834, 13, 9078, 198, 198, 11748, 28686, 198, 6738, 42903, 1330, 46947, 198, 6738, 42903, 62, 25410, 282, 26599, 1330, 16363, 2348, 26599, 198, 198, 9945, 796, 16363, 2348, 26599, 3419 ]
3.135135
37
import boto3 import time if __name__ == "__main__": ec2 = AwsEc2("", "") res = ec2.get_instance_by_resource('xxxxxx') for i in res: print(i.placement)
[ 11748, 275, 2069, 18, 198, 11748, 640, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 9940, 17, 796, 5851, 82, 49136, 17, 7203, 1600, 366, 4943, 198, 220, 220, 220, 581, 796, 9940, 17, ...
2.185185
81
import control import speech_recognition as sr def recognize_speech_from_mic(recognizer, microphone): """Transcribe speech from recorded from `microphone`. Returns a dictionary with three keys: "success": a boolean indicating whether or not the API request was successful "error": `None` if no error occured, otherwise a string containing an error message if the API could not be reached or speech was unrecognizable "transcription": `None` if speech could not be transcribed, otherwise a string containing the transcribed text """ # check that recognizer and microphone arguments are appropriate type if not isinstance(recognizer, sr.Recognizer): raise TypeError("`recognizer` must be `Recognizer` instance") if not isinstance(microphone, sr.Microphone): raise TypeError("`microphone` must be `Microphone` instance") # adjust the recognizer sensitivity to ambient noise and record audio # from the microphone with microphone as source: recognizer.adjust_for_ambient_noise(source) audio = recognizer.listen(source) # set up the response object response = { "success" : True, "error" : None, "transcription" : None } # try recognizing the speech in the recording # if a RequestError or UnknownValueError exception is caught, update the response object accordingly try: response["transcription"] = recognizer.recognize_google(audio) except sr.RequestError: # API was unreachable or unresponsive response["success"] = False response["error"] = "API unavailable" except sr.UnknownValueError: # speech was unintelligible response["error"] = "Unable to recognize speech" return response r = sr.Recognizer() m = sr.Microphone() while(True): while(True): print('Listening... ') arg = recognize_speech_from_mic(r, m) if arg["transcription"]: break if not arg["success"]: break if arg["error"]: print('Error! {}'.format(arg["error"])) pass print('Heard: {}'.format(arg["transcription"])) control.doAction(str.lower(arg["transcription"]))
[ 11748, 1630, 198, 11748, 4046, 62, 26243, 653, 355, 19677, 198, 198, 4299, 7564, 62, 45862, 62, 6738, 62, 9383, 7, 26243, 7509, 11, 21822, 2599, 198, 220, 220, 220, 37227, 8291, 66, 4892, 4046, 422, 6264, 422, 4600, 24055, 4862, 44646...
2.756595
834
from .consts import * # Object matching by classid OBJECTS_CLSID_RULES = [ {'type' : RULETYPE_EQUAL, 'text' : 'clsid:D27CDB6E-AE6D-11cf-96B8-444553540000', 'entities' : [ {'name' : 'web:tech/flash'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'clsid:d27cdb6e-ae6d-11cf-96b8-444553540000', 'entities' : [ {'name' : 'web:tech/flash'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'clsid:-D27CDB6E-AE6D-11cf-96B8-444553540000', 'entities' : [ {'name' : 'web:tech/flash'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95', 'entities' : [ {'name' : 'web:tech:activex/wmplayer'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95', 'entities' : [ {'name' : 'web:tech:activex/wmplayer'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'clsid:22D6F312-B0F6-11D0-94AB-0080C74C7E95', 'entities' : [ {'name' : 'web:tech:activex/wmplayer'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'clsid:6BF52A52-394A-11D3-B153-00C04F79FAA6', 'entities' : [ {'name' : 'web:tech:activex/wmplayer'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'CLSID:CFCDAA03-8BE4-11cf-B84B-0020AFBBCCFA', 'entities' : [ {'name' : 'web:tech:activex/realplayer'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'clsid:CFCDAA03-8BE4-11cf-B84B-0020AFBBCCFA', 'entities' : [ {'name' : 'web:tech:activex/realplayer'} ] }, ] # match object tags by type OBJECTS_TYPE_RULES = [ {'type' : RULETYPE_EQUAL, 'text' : 'application/x-silverlight-2', 'entities' : [ {'name' : 'web:tech/silverlight'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'application/x-shockwave-flash', 'entities' : [ {'name' : 'web:tech/flash'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'application/x-oleobject', 'entities' : [ {'name' : 'web:tech/activex'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'image/svg+xml', 'entities' : [ {'name' : 'web:tech/svg'} ] }, ] # match object tags by data OBJECTS_DATA_RULES = [ {'type' : RULETYPE_REGEXP, 'text' : '^http://img\.yandex\.net/i/time/clock\.swf', 'entities' : [ {'name' : 'web:widgets:clock/yandexclock'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://vimeo\.com', 'entities' : [ {'name' : 'web:media:video/vimeo'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://www\.youtube\.com', 'entities' : [ {'name' : 'web:media:video/youtube'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://cdn\.last\.fm/widgets/chart', 'entities' : [ {'name' : 'web:widgets:audio/lastfm'} ] }, ] # match object tags by embed src EMBED_SRC_RULES = [ {'type' : RULETYPE_REGEXP, 'text' : '^http://img\.mail\.ru/r/video2/player_v2\.swf', 'entities' : [ {'name' : 'web:media:video/mailru'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://flv\.video\.yandex\.ru', 'entities' : [ {'name' : 'web:media:video/yandex'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://img\.gismeteo\.ru/flash', 'entities' : [ {'name' : 'web:widgets:meteo/gismeteo'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://www\.clocklink\.com/clocks/', 'entities' : [ {'name' : 'web:widgets:time/clocklink'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'http://iii.ru/static/Vishnu.swf', 'entities' : [ {'name' : 'web:widgets:chat/iiiru'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://[a-z0-9]{1,3}\.videos\.sapo\.pt/play', 'entities' : [ {'name' : 'web:media:video/sapovideos'} ] }, {'type' : RULETYPE_EQUAL, 'text' : 'http://pub.tvigle.ru/swf/tvigle_single_v2.swf', 'entities' : [ {'name' : 'web:media:video/twigle'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://rpod\.ru/i/b/listen_240x400_01/core\.swf', 'entities' : [ {'name' : 'web:media:audio/rpodru'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://vision\.rambler\.ru/i/e\.swf', 'entities' : [ {'name' : 'web:media:video/ramblervision'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://pics\.smotri\.com/scrubber_custom8\.swf', 'entities' : [ {'name' : 'web:media:video/smotricom'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://www\.russia\.ru/player/main\.swf', 'entities' : [ {'name' : 'web:media:video/russiaru'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://video\.google\.(com|ru|ca|de)/googleplayer.swf', 'entities' : [ {'name' : 'web:media:video/googlevideo'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://www\.youtube\.com/v/', 'entities' : [ {'name' : 'web:media:video/youtube'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^/bitrix/templates/', 'entities' : [ {'name' : 'web:cms/bitrix'}, {'name' : 'web:tech:lang/php'}, ] }, {'type' : RULETYPE_REGEXP, 'text' : '^/bitrix/components/', 'entities' : [ {'name' : 'web:cms/bitrix'}, {'name' : 'web:tech:lang/php'}, ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://developer\.truveo\.com/apps/listWidget', 'entities' : [ {'name' : 'web:media:video/truveo'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://pics\.rbc\.ru/informer', 'entities' : [ {'name' : 'web:widgets:fin/rbcinformer'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://video\.rutube\.ru', 'entities' : [ {'name' : 'web:media:video/rutube'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://static\.twitter\.com/flash/widgets/profile/TwitterWidget\.swf', 'entities' : [ {'name' : 'web:widgets:blog/twitter'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://vimeo\.com/moogaloop.swf', 'entities' : [ {'name' : 'web:media:video/vimeo'} ] }, {'type' : RULETYPE_REGEXP, 'text' : '^http://www.1tv.ru/(n|p)video', 'entities' : [ {'name' : 'web:media:video/1tvru'} ] }, ]
[ 6738, 764, 1102, 6448, 1330, 1635, 628, 198, 2, 9515, 12336, 416, 1398, 312, 198, 9864, 41, 2943, 4694, 62, 5097, 50, 2389, 62, 49, 6239, 1546, 796, 685, 198, 220, 220, 220, 1391, 6, 4906, 6, 1058, 371, 24212, 25216, 62, 36, 10917...
1.764968
3,808
# Why does this file exist, and why not put this in `__main__`? # # You might be tempted to import things from `__main__` later, # but that will cause problems: the code will get executed twice: # # - When you run `python -m failprint` python will execute # `__main__.py` as a script. That means there won't be any # `failprint.__main__` in `sys.modules`. # - When you import `__main__` it will get executed again (as a module) because # there's no `failprint.__main__` in `sys.modules`. """Module that contains the command line application.""" import argparse from typing import List, Optional, Sequence from failprint.capture import Capture from failprint.formats import accept_custom_format, formats from failprint.runners import run def add_flags(parser, set_defaults=True) -> ArgParser: """ Add some boolean flags to the parser. We made this method separate and public for its use in [duty](https://github.com/pawamoy/duty). Arguments: parser: The parser to add flags to. set_defaults: Whether to set default values on arguments. Returns: The augmented parser. """ # IMPORTANT: the arguments destinations should match # the parameters names of the failprint.runners.run function. # As long as names are consistent between the two, # it's very easy to pass CLI args to the function, # and it also allows to avoid duplicating the parser arguments # in dependent projects like duty (https://github.com/pawamoy/duty) :) parser.add_argument( "-c", "--capture", choices=list(Capture), type=Capture, help="Which output to capture. Colors are supported with 'both' only, unless the command has a 'force color' option.", ) parser.add_argument( "-f", "--fmt", "--format", dest="fmt", choices=formats.keys(), type=accept_custom_format, default=None, help="Output format. Pass your own Jinja2 template as a string with '-f custom=TEMPLATE'. " "Available variables: command, title (command or title passed with -t), code (exit status), " "success (boolean), failure (boolean), number (command number passed with -n), " "output (command output), nofail (boolean), quiet (boolean), silent (boolean). " "Available filters: indent (textwrap.indent).", ) parser.add_bool_argument( ["-y", "--pty"], ["-Y", "--no-pty"], dest="pty", default=True if set_defaults else None, truthy_help="Enable the use of a pseudo-terminal. PTY doesn't allow programs to use standard input.", falsy_help="Disable the use of a pseudo-terminal. PTY doesn't allow programs to use standard input.", ) parser.add_bool_argument( ["-p", "--progress"], ["-P", "--no-progress"], dest="progress", default=True if set_defaults else None, truthy_help="Print progress while running a command.", falsy_help="Don't print progress while running a command.", ) parser.add_bool_argument( ["-q", "--quiet"], ["-Q", "--no-quiet"], dest="quiet", default=False if set_defaults else None, truthy_help="Don't print the command output, even if it failed.", falsy_help="Print the command output when it fails.", ) parser.add_bool_argument( ["-s", "--silent"], ["-S", "--no-silent"], dest="silent", default=False if set_defaults else None, truthy_help="Don't print anything.", falsy_help="Print output as usual.", ) parser.add_bool_argument( ["-z", "--zero", "--nofail"], ["-Z", "--no-zero", "--strict"], dest="nofail", default=False if set_defaults else None, truthy_help="Don't fail. Always return a success (0) exit code.", falsy_help="Return the original exit code.", ) return parser def get_parser() -> ArgParser: """ Return the CLI argument parser. Returns: An argparse parser. """ parser = add_flags(ArgParser(prog="failprint")) parser.add_argument("-n", "--number", type=int, default=1, help="Command number. Useful for the 'tap' format.") parser.add_argument("-t", "--title", help="Command title. Default is the command itself.") parser.add_argument("cmd", metavar="COMMAND", nargs="+") return parser def main(args: Optional[List[str]] = None) -> int: """ Run the main program. This function is executed when you type `failprint` or `python -m failprint`. Arguments: args: Arguments passed from the command line. Returns: An exit code. """ parser = get_parser() opts = parser.parse_args(args).__dict__.items() # noqa: WPS609 return run(**{_: value for _, value in opts if value is not None}).code
[ 2, 4162, 857, 428, 2393, 2152, 11, 290, 1521, 407, 1234, 428, 287, 4600, 834, 12417, 834, 63, 30, 198, 2, 198, 2, 921, 1244, 307, 26194, 284, 1330, 1243, 422, 4600, 834, 12417, 834, 63, 1568, 11, 198, 2, 475, 326, 481, 2728, 276...
2.681143
1,819
# -*- coding: utf-8 -*- # Copyright (c) 2020, RC and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 12131, 11, 13987, 290, 20420, 198, 2, 1114, 5964, 1321, 11, 3387, 766, 5964, 13, 14116, 198, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62,...
3.451613
62
# coding:utf-8 # author:MurphyWan # www.py """ controller.py/index.py from flask import Blueprint route_index = Blueprint('index_page', __name__) @route_index.route("/") def index(): return "Hello World" """ from application import app ''' ''' from web.interceptors.Authinterceptor import * ''' url ''' from web.controllers.index import route_index from web.controllers.user.User import route_user from web.controllers.static import route_static from web.controllers.account.Account import route_account # from web.controllers.food.Food import route_food # from web.controllers.member.Member import route_member # from web.controllers.finance.Finance import route_finance # from web.controllers.stat.Stat import route_stat # # app.register_blueprint(route_index, url_prefix='/') # manager.pywww.pywww app.register_blueprint(route_user, url_prefix='/user') app.register_blueprint(route_static, url_prefix='/static') app.register_blueprint(route_account, url_prefix='/account') app.register_blueprint(route_food, url_prefix='/food') app.register_blueprint(route_member, url_prefix='/member') app.register_blueprint(route_finance, url_prefix='/finance') app.register_blueprint(route_stat, url_prefix='/stat')
[ 2, 19617, 25, 40477, 12, 23, 198, 2, 1772, 25, 23830, 6883, 45681, 198, 2, 7324, 13, 9078, 198, 198, 37811, 10444, 13, 9078, 14, 9630, 13, 9078, 198, 6738, 42903, 1330, 39932, 198, 38629, 62, 9630, 796, 39932, 10786, 9630, 62, 7700,...
3.039506
405
# -------------- #Header files import pandas as pd import numpy as np import matplotlib.pyplot as plt #path of the data file- path data=pd.read_csv(path) data["Gender"].replace("-","Agender",inplace=True) gender_count=data.Gender.value_counts() gender_count.plot(kind="bar") #Code starts here # -------------- #Code starts here alignment=data.Alignment.value_counts() plt.pie(alignment,labels=["good","bad","newutral"]) # -------------- #Code starts here sc_df=data[["Strength","Combat"]] sc_covariance=sc_df.cov().iloc[0,1] sc_strength=sc_df.Strength.std() sc_combat=sc_df.Combat.std() sc_pearson=sc_covariance/(sc_strength*sc_combat) print(sc_pearson) ic_df=data[["Intelligence","Combat"]] ic_covariance=ic_df.cov().iloc[0,1] ic_intelligence=ic_df.Intelligence.std() ic_combat=ic_df.Combat.std() ic_pearson=ic_covariance/(ic_intelligence*ic_combat) print(ic_pearson) # -------------- #Code starts here total_high=data.Total.quantile(0.99) super_best=data[data.Total>total_high] super_best_names=list(super_best.Name) print(super_best_names) # -------------- #Code starts here Intelligence, ax_1 = plt.subplots() ax_1.boxplot(data.Intelligence) ax_1.set_title('Intelligence') Speed, ax_2 = plt.subplots() ax_2.boxplot(data.Speed) ax_2.set_title('Speed') Power, ax_3 = plt.subplots() ax_3.boxplot(data.Power) ax_3.set_title('Power')
[ 2, 220, 26171, 198, 2, 39681, 3696, 201, 198, 11748, 19798, 292, 355, 279, 67, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 201, 198, 201, 198, 201, 198, 2, 6978, 286, ...
2.373737
594
#!/usr/bin/env python3 #MIT License #Copyright (c) 2018 The University of Michigan #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. import shutil import os import json # json parsing import zipfile import sys from modifyDBFiles import modifyDBFiles
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 36393, 13789, 198, 198, 2, 15269, 357, 66, 8, 2864, 383, 2059, 286, 7055, 198, 198, 2, 5990, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 486...
3.737952
332
# coding: utf-8 # In[35]: import matplotlib.pyplot as plt from pylab import * import numpy as np main_image=plt.figure(figsize=(10,10)) subplots_adjust(hspace=0.3,wspace=0.3)# #1- x_0=np.linspace(0,2*np.pi,20) #X sub_image_1=plt.subplot(2,2,1) plt.xlabel('X value') plt.ylabel('Sin value') plt.grid(True) sub_image_1.plot(x_0, np.sin(x), 'r--o',label='Sin(x)') sub_image_1.legend()# sub_image_1.annotate('sin wave', xy=(3,0.25), xytext=(4,0.5), arrowprops=dict(facecolor='black',shrink=0.05))# sub_image_1.set_title('Sin Waves') #2- x_1=np.linspace(0,2*np.pi,20) sub_image_2=plt.subplot(2,2,2) plt.xlabel('X value') plt.ylabel('Cos and Sin value') plt.grid(True) sub_image_2.plot(x_1, np.cos(x), color='blue', linestyle='--',linewidth=1, marker='o', markerfacecolor='red', markersize='6', label='Cos(x)') sub_image_2.plot(x_1, np.sin(x), color='green', linestyle='-.', linewidth=3, marker='^', markerfacecolor='yellow', markersize='8', label='Sin(x)') sub_image_2.legend() sub_image_2.set_title('Cos and Sin Waves') #3- bins_count=10 mu,sigma=100,20 x_hist=mu+sigma*np.random.randn(1000,1)#randn1000 sub_image_3=plt.subplot(2,2,3) plt.xlabel('value') plt.ylabel('count') plt.grid(False) tuple_return=sub_image_3.hist(x_hist, bins=bins_count, facecolor='red', alpha=0.8, edgecolor='black',normed=0)#normed=0normed=1 sub_image_3.set_title('Frequency Histogram') plt.xlim((floor(x_hist.min()),ceil(x_hist.max()))) bar_width=(x_hist.max()-x_hist.min())/bins_count plt.xticks(np.arange(floor(x_hist.min()),ceil(x_hist.max()),round(bar_width)))# for i in range(bins_count): sub_image_3.text(x_hist.min()+(bar_width*i)+(bar_width/2), tuple_return[0][i], str(tuple_return[0][i]), horizontalalignment='center', verticalalignment='bottom') #3- x_part_1=np.linspace(-10,-1,10)# x_part_2=np.linspace(0,10,11) sub_image_4=plt.subplot(2,2,4) plt.xlabel('X value') plt.ylabel('Y value') plt.grid(False) sub_image_4.plot(x_part_1,x_part_1*2+1,'b--o',label='y=2x+1') sub_image_4.plot(x_part_2,x_part_2**2,'r--o',label='y=x^2') sub_image_4.legend() sub_image_4.set_title('PieceWise Function') # plt.show()
[ 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 2, 554, 58, 2327, 5974, 628, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 279, 2645, 397, 1330, 1635, 198, 11748, 299, 32152, 355, 45941, 198, 198, 12417, ...
2.092702
1,014
"""Generated class for event_audit.json""" from .common import Entry
[ 37811, 8645, 515, 1398, 329, 1785, 62, 3885, 270, 13, 17752, 37811, 198, 198, 6738, 764, 11321, 1330, 21617, 628 ]
3.55
20
#!/usr/bin/env python # -*- coding: utf-8 -*- """ RGB Colourspace Transformations =============================== Defines the *RGB* colourspace transformations: - :func:`XYZ_to_RGB` - :func:`RGB_to_XYZ` - :func:`RGB_to_RGB` See Also -------- `RGB Colourspaces IPython Notebook <http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/rgb.ipynb>`_ # noqa """ from __future__ import division, unicode_literals import numpy as np from colour.models import xy_to_XYZ from colour.adaptation import chromatic_adaptation_matrix __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers' __license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = 'colour-science@googlegroups.com' __status__ = 'Production' __all__ = ['XYZ_to_RGB', 'RGB_to_XYZ', 'RGB_to_RGB'] def XYZ_to_RGB(XYZ, illuminant_XYZ, illuminant_RGB, to_RGB, chromatic_adaptation_method='CAT02', transfer_function=None): """ Converts from *CIE XYZ* colourspace to *RGB* colourspace using given *CIE XYZ* colourspace matrix, *illuminants*, *chromatic adaptation* method, *normalised primary matrix* and *transfer function*. Parameters ---------- XYZ : array_like, (3,) *CIE XYZ* colourspace matrix. illuminant_XYZ : array_like *CIE XYZ* colourspace *illuminant* *xy* chromaticity coordinates. illuminant_RGB : array_like *RGB* colourspace *illuminant* *xy* chromaticity coordinates. to_RGB : array_like, (3, 3) *Normalised primary matrix*. chromatic_adaptation_method : unicode, optional ('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02') *Chromatic adaptation* method. transfer_function : object, optional *Transfer function*. Returns ------- ndarray, (3,) *RGB* colourspace matrix. Notes ----- - Input *CIE XYZ* colourspace matrix is in domain [0, 1]. - Input *illuminant_XYZ* *xy* chromaticity coordinates are in domain [0, 1]. - Input *illuminant_RGB* *xy* chromaticity coordinates are in domain [0, 1]. - Output *RGB* colourspace matrix is in domain [0, 1]. Examples -------- >>> XYZ = np.array([0.1151847498, 0.1008, 0.0508937252]) >>> illuminant_XYZ = (0.34567, 0.35850) >>> illuminant_RGB = (0.31271, 0.32902) >>> chromatic_adaptation_method = 'Bradford' >>> to_RGB = np.array([ ... [3.24100326, -1.53739899, -0.49861587], ... [-0.96922426, 1.87592999, 0.04155422], ... [0.05563942, -0.2040112, 1.05714897]]) >>> XYZ_to_RGB( ... XYZ, ... illuminant_XYZ, ... illuminant_RGB, ... to_RGB, ... chromatic_adaptation_method) # doctest: +ELLIPSIS array([ 0.1730350..., 0.0821103..., 0.0567249...]) """ np.array([ [3.24100326, -1.53739899, -0.49861587], [-0.96922426, 1.87592999, 0.04155422], [0.05563942, -0.2040112, 1.05714897]]) cat = chromatic_adaptation_matrix(xy_to_XYZ(illuminant_XYZ), xy_to_XYZ(illuminant_RGB), method=chromatic_adaptation_method) adapted_XYZ = np.dot(cat, XYZ) RGB = np.dot(to_RGB.reshape((3, 3)), adapted_XYZ.reshape((3, 1))) if transfer_function is not None: RGB = np.array([transfer_function(x) for x in np.ravel(RGB)]) return np.ravel(RGB) def RGB_to_XYZ(RGB, illuminant_RGB, illuminant_XYZ, to_XYZ, chromatic_adaptation_method='CAT02', inverse_transfer_function=None): """ Converts from *RGB* colourspace to *CIE XYZ* colourspace using given *RGB* colourspace matrix, *illuminants*, *chromatic adaptation* method, *normalised primary matrix* and *transfer function*. Parameters ---------- RGB : array_like, (3,) *RGB* colourspace matrix. illuminant_RGB : array_like *RGB* colourspace *illuminant* chromaticity coordinates. illuminant_XYZ : array_like *CIE XYZ* colourspace *illuminant* chromaticity coordinates. to_XYZ : array_like, (3, 3) *Normalised primary matrix*. chromatic_adaptation_method : unicode, optional ('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02') *Chromatic adaptation* method. inverse_transfer_function : object, optional *Inverse transfer function*. Returns ------- ndarray, (3,) *CIE XYZ* colourspace matrix. Notes ----- - Input *RGB* colourspace matrix is in domain [0, 1]. - Input *illuminant_RGB* *xy* chromaticity coordinates are in domain [0, 1]. - Input *illuminant_XYZ* *xy* chromaticity coordinates are in domain [0, 1]. - Output *CIE XYZ* colourspace matrix is in domain [0, 1]. Examples -------- >>> RGB = np.array([0.17303501, 0.08211033, 0.05672498]) >>> illuminant_RGB = (0.31271, 0.32902) >>> illuminant_XYZ = (0.34567, 0.35850) >>> chromatic_adaptation_method = 'Bradford' >>> to_XYZ = np.array([ ... [0.41238656, 0.35759149, 0.18045049], ... [0.21263682, 0.71518298, 0.0721802], ... [0.01933062, 0.11919716, 0.95037259]]) >>> RGB_to_XYZ( ... RGB, ... illuminant_RGB, ... illuminant_XYZ, ... to_XYZ, ... chromatic_adaptation_method) # doctest: +ELLIPSIS array([ 0.1151847..., 0.1008 , 0.0508937...]) """ if inverse_transfer_function is not None: RGB = np.array([inverse_transfer_function(x) for x in np.ravel(RGB)]) XYZ = np.dot(to_XYZ.reshape((3, 3)), RGB.reshape((3, 1))) cat = chromatic_adaptation_matrix( xy_to_XYZ(illuminant_RGB), xy_to_XYZ(illuminant_XYZ), method=chromatic_adaptation_method) adapted_XYZ = np.dot(cat, XYZ.reshape((3, 1))) return np.ravel(adapted_XYZ) def RGB_to_RGB(RGB, input_colourspace, output_colourspace, chromatic_adaptation_method='CAT02'): """ Converts from given input *RGB* colourspace to output *RGB* colourspace using given *chromatic adaptation* method. Parameters ---------- RGB : array_like, (3,) *RGB* colourspace matrix. input_colourspace : RGB_Colourspace *RGB* input colourspace. output_colourspace : RGB_Colourspace *RGB* output colourspace. chromatic_adaptation_method : unicode, optional ('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02') *Chromatic adaptation* method. ndarray, (3,) *RGB* colourspace matrix. Notes ----- - *RGB* colourspace matrices are in domain [0, 1]. Examples -------- >>> from colour import sRGB_COLOURSPACE, PROPHOTO_RGB_COLOURSPACE >>> RGB = np.array([0.35521588, 0.41, 0.24177934]) >>> RGB_to_RGB( ... RGB, ... sRGB_COLOURSPACE, ... PROPHOTO_RGB_COLOURSPACE) # doctest: +ELLIPSIS array([ 0.3579334..., 0.4007138..., 0.2615704...]) """ cat = chromatic_adaptation_matrix( xy_to_XYZ(input_colourspace.whitepoint), xy_to_XYZ(output_colourspace.whitepoint), chromatic_adaptation_method) trs_matrix = np.dot(output_colourspace.to_RGB, np.dot(cat, input_colourspace.to_XYZ)) return np.dot(trs_matrix, RGB)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 36982, 38773, 13200, 26981, 602, 198, 4770, 25609, 18604, 198, 198, 7469, 1127, 262, 1635, 36982, 9, 9...
2.204743
3,458
from typing import List import pandas as pd import pytest from preprocessing.assign_folds import assign_folds testdata = [ [ [ "patient1", "patient2", "patient3", "patient4", "patient5", "patient6", "patient7", "patient8", "patient9", "patient1", # second 1 "patient3", # second 3 "patient10", ], [ "image1.dcm", "image2.dcm", "image3.dcm", "image4.dcm", "image5.dcm", "image6.dcm", "image7.dcm", "image8.dcm", "image9.dcm", "image10.dcm", "image11.dcm", "image12.dcm", ], [1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1], 3, ] ]
[ 6738, 19720, 1330, 7343, 198, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 12972, 9288, 198, 198, 6738, 662, 36948, 13, 562, 570, 62, 69, 10119, 1330, 8333, 62, 69, 10119, 198, 198, 9288, 7890, 796, 685, 198, 220, 220, 220, 685...
1.555556
558
from django.contrib.auth.models import User from django.db import models from apps.tags.models import Tag
[ 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 198, 6738, 6725, 13, 31499, 13, 27530, 1330, 17467, 628 ]
3.483871
31
"""Tests for __main__.py.""" # import logging from unittest.mock import MagicMock, patch import pytest import viseron.__main__ def test_init(simple_config, mocked_viseron): """Test init.""" viseron.__main__.main() # viseron.__main__.LOGGER.info("testing") with patch.object(viseron.__main__, "main", MagicMock()) as mock_main: with patch.object(viseron.__main__, "__name__", "__main__"): viseron.__main__.init() mock_main.assert_called_once() # class TestMyFormatter: # """Tests for class MyFormatter.""" # def test_format(self): # """Test formatter.""" # formatter = viseron.__main__.MyFormatter() # record = logging.makeLogRecord( # { # "name": "test_logger", # "level": 10, # "pathname": "test_main.py", # "msg": "Testing, message repeated 2 times", # } # ) # formatter.format(record)
[ 37811, 51, 3558, 329, 11593, 12417, 834, 13, 9078, 526, 15931, 198, 2, 1330, 18931, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 6139, 44, 735, 11, 8529, 198, 198, 11748, 12972, 9288, 198, 198, 11748, 1490, 263, 261, 13, 834, 12417,...
2.08137
467
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import testtools from ironicclient import exc from ironicclient.tests.unit import utils import ironicclient.v1.allocation ALLOCATION = {'uuid': '11111111-2222-3333-4444-555555555555', 'name': 'Allocation-name', 'owner': None, 'state': 'active', 'node_uuid': '66666666-7777-8888-9999-000000000000', 'last_error': None, 'resource_class': 'baremetal', 'traits': [], 'candidate_nodes': [], 'extra': {}} ALLOCATION2 = {'uuid': '55555555-4444-3333-2222-111111111111', 'name': 'Allocation2-name', 'owner': 'fake-owner', 'state': 'allocating', 'node_uuid': None, 'last_error': None, 'resource_class': 'baremetal', 'traits': [], 'candidate_nodes': [], 'extra': {}} CREATE_ALLOCATION = copy.deepcopy(ALLOCATION) for field in ('state', 'node_uuid', 'last_error'): del CREATE_ALLOCATION[field] fake_responses = { '/v1/allocations': { 'GET': ( {}, {"allocations": [ALLOCATION, ALLOCATION2]}, ), 'POST': ( {}, CREATE_ALLOCATION, ), }, '/v1/allocations/%s' % ALLOCATION['uuid']: { 'GET': ( {}, ALLOCATION, ), 'DELETE': ( {}, None, ), }, '/v1/allocations/?node=%s' % ALLOCATION['node_uuid']: { 'GET': ( {}, {"allocations": [ALLOCATION]}, ), }, '/v1/allocations/?owner=%s' % ALLOCATION2['owner']: { 'GET': ( {}, {"allocations": [ALLOCATION2]}, ), }, } fake_responses_pagination = { '/v1/allocations': { 'GET': ( {}, {"allocations": [ALLOCATION], "next": "http://127.0.0.1:6385/v1/allocations/?limit=1"} ), }, '/v1/allocations/?limit=1': { 'GET': ( {}, {"allocations": [ALLOCATION2]} ), }, '/v1/allocations/?marker=%s' % ALLOCATION['uuid']: { 'GET': ( {}, {"allocations": [ALLOCATION2]} ), }, } fake_responses_sorting = { '/v1/allocations/?sort_key=updated_at': { 'GET': ( {}, {"allocations": [ALLOCATION2, ALLOCATION]} ), }, '/v1/allocations/?sort_dir=desc': { 'GET': ( {}, {"allocations": [ALLOCATION2, ALLOCATION]} ), }, }
[ 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 921, 743, 7330, 198, 2, 220, 220, ...
1.919269
1,697
from flask import session, redirect, url_for from flask.json import jsonify from api import app, oauth from api import models
[ 6738, 42903, 1330, 6246, 11, 18941, 11, 19016, 62, 1640, 198, 6738, 42903, 13, 17752, 1330, 33918, 1958, 198, 6738, 40391, 1330, 598, 11, 267, 18439, 198, 6738, 40391, 1330, 4981, 628, 628, 198 ]
3.823529
34
if __name__ == '__main__': column_generator()
[ 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 5721, 62, 8612, 1352, 3419, 198 ]
2.363636
22
# -*- coding: utf-8 -*- """ This file contains language-specific implementation for an Afrikaans voice. The idea is that this file contains subclassed Voice and Phoneset implementations. This package ttslab/voices may then also contain speaker specific implementations e.g. "afrikaans_SPEAKER.py" """ from __future__ import unicode_literals, division, print_function #Py2 __author__ = "Daniel van Niekerk" __email__ = "dvn.demitasse@gmail.com" import re from collections import OrderedDict from .. phoneset import Phoneset from .. defaultvoice import LwaziHTSVoice, LwaziPromHTSVoice from .. synthesizer_htsme import SynthesizerHTSME import ttslab.hts_labels_prom as hts_labels_prom
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 770, 2393, 4909, 3303, 12, 11423, 7822, 329, 281, 198, 220, 220, 220, 2483, 28716, 504, 3809, 13, 628, 220, 220, 220, 383, 2126, 318, 326, 428, 2393, 4909, 4761...
3.115044
226
#!/usr/bin/python # -*- coding: utf-8 -*- """ This module contains analysis done for the Ocean iodide (Oi!) project This includes presentation at conferences etc... """ import numpy as np import pandas as pd import sparse2spatial as s2s import sparse2spatial.utils as utils import matplotlib import matplotlib.pyplot as plt # import AC_tools (https://github.com/tsherwen/AC_tools.git) import AC_tools as AC # Get iodide specific functions import observations as obs def main(): """ Run various misc. scripted tasks linked to the "iodide in the ocean" project """ pass # ---- ----- ----- ----- ----- ----- ----- ----- ----- # ----- ----- Misc (associated iodide project tasks) # These include getting CTM (GEOS-Chem) output for Anoop/Sawalha/TropMet # --- Make planeflight files for cruise # mk_pf_files4Iodide_cruise() # mk_pf_files4Iodide_cruise(mk_column_output_files=True) # Test the input files for these cruises? # test_input_files4Iodide_cruise_with_plots() # Test output files for cruises # TEST_iodide_cruise_output() # TEST_AND_PROCESS_iodide_cruise_output() # TEST_AND_PROCESS_iodide_cruise_output(just_process_surface_data=False) # Get numbers for data paper (data descriptor paper) # get_numbers_for_data_paper() # Get Longhurst province labelled NetCDF for res # add_LonghurstProvince2NetCDF(res='4x5', ExStr='TEST_VI' ) # add_LonghurstProvince2NetCDF(res='2x2.5', ExStr='TEST_V' ) # add_LonghurstProvince2NetCDF(res='0.125x0.125', ExStr='TEST_VIII' ) # Add Longhurst Province to a lower res NetCDF file # folder = './' # filename = 'Oi_prj_output_iodide_field_1x1_deg_0_5_centre.nc' # filename = 'Oi_prj_output_iodide_field_0_5x0_5_deg_centre.nc' # ds = xr.open_dataset(folder+filename) # add_LonghurstProvince2NetCDF(ds=ds, res='0.5x0.5', ExStr='TEST_VIII') # process this to csv files for Indian' sea-surface paper # --------------------------------------------------------------------------- # ---------- Functions to produce output for Iodide obs. paper ------------- # --------------------------------------------------------------------------- def get_PDF_of_iodide_exploring_data_rootset(show_plot=False, ext_str=None): """ Get PDF of plots exploring the iodide dataset """ import seaborn as sns sns.set(color_codes=True) # Get the data df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM # if ext_str == 'Open_ocean': # Kludge data # Kludge_tinel_data=True # if Kludge_tinel_data: # new_Data = [ 'He_2014', 'He_2013'] # new_Data += ['Chance_2018_'+i for i in 'I', 'II', 'III'] # df.loc[ df['Data_Key'].isin(new_Data), 'Coastal'] = False # only take data flagged open ocean df = df.loc[df[u'Coastal'] == 0.0, :] elif ext_str == 'Coastal': df = df.loc[df[u'Coastal'] == 1.0, :] elif ext_str == 'all': print('Using entire dataset') else: print('Need to set region of data to explore - currently', ext_str) sys.exit() # setup PDF savetitle = 'Oi_prj_data_root_exploration_{}'.format(ext_str) dpi = 320 pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi) # colours to use? # current_palette = sns.color_palette() current_palette = sns.color_palette("colorblind") # --- --- --- --- --- --- --- --- # ---- Add in extra varibles # iodide / iodate I_div_IO3_var = 'I$^{-}$/IO$_{3}^{-}$ (ratio)' df[I_div_IO3_var] = df['Iodide'] / df['Iodate'] # total iodide I_plus_IO3 = 'I$^{-}$+IO$_{3}^{-}$' df[I_plus_IO3] = df['Iodide'] + df['Iodate'] # --- Add ocean basin to dataframe area_var = 'Region' df[area_var] = None # setup a dummy column # --- --- --- --- --- --- --- --- # --- Plot dataset locations sns.reset_orig() # Get lats, lons and size of dataset lats = df['Latitude'].values lons = df['Longitude'].values N_size = df.shape[0] if ext_str == 'Open_ocean': title = 'Iodide data (Open Ocean) explored in PDF (N={})' else: title = 'Iodide data (all) explored in this PDF (N={})' # plot up AC.plot_lons_lats_spatial_on_map(lats=lats, lons=lons, title=title.format(N_size), split_title_if_too_long=False, f_size=10) # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) if show_plot: plt.show() plt.close() # --- --- --- --- --- --- --- --- # --- iodide to iodide ratio import seaborn as sns sns.set(color_codes=True) current_palette = sns.color_palette("colorblind") # plot up with no limits df.plot(kind='scatter', y=I_div_IO3_var, x='Latitude') # beautify plt.title(I_div_IO3_var + ' ({}, y axis unlimited)'.format(ext_str)) # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) if show_plot: plt.show() plt.close() # plot up with limits at 3 ylimits = 1.5, 0.75, 0.5, for ylimit in ylimits: df.plot(kind='scatter', y=I_div_IO3_var, x='Latitude') # beautify title = ' ({}, y axis limit: {})'.format(ext_str, ylimit) plt.title(I_div_IO3_var + title) plt.ylim(-0.05, ylimit) # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) if show_plot: plt.show() plt.close() # --- --- --- --- --- --- --- --- # TODO - update to use proper definitions # for southern ocean use the files below # for rest https://www.nodc.noaa.gov/woce/woce_v3/wocedata_1/woce-uot/summary/bound.htm # # --- iodide to iodide ratio ( split by region ) # Between 120E and -80E its Pacific upper_val = 120 lower_val = -80 unit = '$^{o}$E' bool_1 = df[u'Longitude'] >= upper_val bool_2 = df[u'Longitude'] < lower_val bool = (np.column_stack((bool_2, bool_1)).any(axis=1)) varname = 'Pacific Ocean ({} to {}{})'.format(upper_val, lower_val, unit) df.loc[bool, area_var] = varname # Between -80E and 30E its Atlantic upper_val = -80 lower_val = 30 unit = '$^{o}$E' bool_1 = df[u'Longitude'] >= upper_val bool_2 = df[u'Longitude'] < lower_val bool = (np.column_stack((bool_2, bool_1)).all(axis=1)) varname = 'Atlantic Ocean ({} to {}{})'.format(lower_val, upper_val, unit) df.loc[bool, area_var] = varname # Between 30E and 120E its Indian upper_val = 30 lower_val = 120 unit = '$^{o}$E' bool_1 = df[u'Longitude'] >= upper_val bool_2 = df[u'Longitude'] < lower_val bool = (np.column_stack((bool_2, bool_1)).all(axis=1)) varname = 'Indian Ocean ({} to {}{})'.format(lower_val, upper_val, unit) df.loc[bool, area_var] = varname # if latitude below 60S, overwrite to be Southern ocean varname = 'Southern Ocean' df.loc[df['Latitude'] < -60, area_var] = varname # --- --- --- --- --- --- --- --- # --- locations of data sns.reset_orig() # loop regions for var_ in list(set(df[area_var].tolist())): # select data for area df_tmp = df[df[area_var] == var_] # locations ? lons = df_tmp[u'Longitude'].tolist() lats = df_tmp[u'Latitude'].tolist() #Now plot AC.plot_lons_lats_spatial_on_map(lons=lons, lats=lats) # fig=fig, ax=ax , color='blue', label=label, alpha=alpha, # window=window, axis_titles=axis_titles, return_axis=True, # p_size=p_size) plt.title('{} ({})'.format(var_, ext_str)) if show_plot: plt.show() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # --- --- --- --- --- --- --- --- # --- iodide to iodide ratio import seaborn as sns sns.set(color_codes=True) current_palette = sns.color_palette("colorblind") # loop regions for var_ in list(set(df[area_var].tolist())): # select data for area df_tmp = df[df[area_var] == var_] # plot up with no limits df_tmp.plot(kind='scatter', y=I_div_IO3_var, x='Latitude') # beautify plt.title(I_div_IO3_var + ' ({}, y axis unlimited)'.format(var_)) # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) if show_plot: plt.show() plt.close() # plot up with limits at 3 ylimits = 1.5, 0.75, 0.5 for ylimit in ylimits: df_tmp.plot(kind='scatter', y=I_div_IO3_var, x='Latitude') # beautify title = ' ({}, y axis limit: {})'.format(var_, ylimit) plt.title(I_div_IO3_var + title) plt.ylim(-0.05, ylimit) # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) if show_plot: plt.show() plt.close() # --- --- --- --- --- --- --- --- # --- iodide + iodide import seaborn as sns sns.set(color_codes=True) current_palette = sns.color_palette("colorblind") # loop regions for var_ in list(set(df[area_var].tolist())): # select data for area df_tmp = df[df[area_var] == var_] # plot up with no limits df_tmp.plot(kind='scatter', y=I_plus_IO3, x='Latitude') # beautify plt.title(I_plus_IO3 + ' ({}, y axis unlimited)'.format(var_)) # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) if show_plot: plt.show() plt.close() # plot up with limits at 3 # ylimits = 1.5, 0.75, 0.5 # for ylimit in ylimits: # df.plot(kind='scatter', y=I_plus_IO3, x='Latitude' ) # # beautify # title= ' ({}, y axis limited to {})'.format(var_, ylimit) # plt.title( I_plus_IO3 + title ) # plt.ylim(-0.05, ylimit ) # # Save to PDF and close plot # AC.plot2pdfmulti( pdff, savetitle, dpi=dpi ) # if show_plot: plt.show() # plt.close() # plot up with limits on y ylimits = [100, 600] # for ylimit in ylimits: df_tmp.plot(kind='scatter', y=I_plus_IO3, x='Latitude') # beautify title = ' ({}, y axis={}-{})'.format(var_, ylimits[0], ylimits[1]) plt.title(I_plus_IO3 + title) plt.ylim(ylimits[0], ylimits[1]) # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) if show_plot: plt.show() plt.close() # -- Save entire pdf AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi) # --------------------------------------------------------------------------- # ---------- Funcs. to process iodine obs/external data -------------------- # --------------------------------------------------------------------------- def check_points_for_cruises(target='Iodide', verbose=False, debug=False): """ Check the cruise points for the new data (Tinel, He, etc...) """ # Get the observational data df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM # And the metadata metadata_df = obs.get_iodide_obs_metadata() # Only consider new datasets new_cruises = metadata_df[metadata_df['In Chance2014?'] == 'N'] df = df[df['Data_Key'].isin(new_cruises['Data_Key'].tolist())] # Strings to format printing ptr_str_I = '- '*5 + 'Cruise: {:<20}' ptr_str_II = '(Source: {:<20}, Location: {:<15}, N: {}, N(Iodide): {})' # Print by cruise for data_key in set(df['Data_Key']): df_m_tmp = metadata_df[metadata_df['Data_Key'] == data_key] df_tmp = df[df['Data_Key'] == data_key] # Extract metadata Cruise = df_m_tmp['Cruise'].values[0] Source = df_m_tmp['Source'].values[0] Location = df_m_tmp['Location'].values[0] # N = df_tmp.shape[0] N_I = df_tmp[target].dropna().shape[0] print(ptr_str_I.format(Cruise)) print(ptr_str_II.format(Source, Location, N, N_I)) # Points for all cruises N = df.shape[0] N_I = df[target].dropna().shape[0] print(ptr_str_I.format('ALL new data')) print(ptr_str_II.format('', '', N, N_I)) def plot_threshold_plus_SD_spatially(var=None, value=None, std=None, res='4x5', fillcontinents=True, show_plot=False, dpi=320, save2png=True, verbose=True, debug=False): """ Plot up the spatial extent of a input variable value + Std. Dev. """ # - Local variables # Get the core input variables data_root = utils.get_file_locations('data_root') filename = 'Oi_prj_feature_variables_{}.nc'.format(res) ds = xr.open_dataset(data_root + filename) # make sure the dataset has units ds = add_units2ds(ds) # Use appropriate plotting settings for resolution if res == '0.125x0.125': centre = True else: centre = False # Get data arr = ds[var].mean(dim='time').values # colour in values above and below threshold (works) arr[arr >= value] = 1 arr[arr >= value-std] = 0.5 arr[(arr != 1) & (arr != 0.5)] = 0.01 # Get units from dataset units = ds[var].units # Plot up title_str = "'{}' ({}) threshold Value ({}) + \n Standard deviation ({})" title = title_str.format(var, units, value, std) if var == 'WOA_TEMP_K': title += ' (in degC={}, std={})'.format(value-273.15, std) # Plot using AC_tools AC.plot_spatial_figure(arr, # extend=extend, # fixcb=fixcb, nticks=nticks, \ res=res, show=False, title=title, \ fillcontinents=fillcontinents, centre=centre, units=units, # f_size=f_size, no_cb=False) # Use a tight layout plt.tight_layout() # Now save or show if show_plot: plt.show() savetitle = 'Oi_prj_threshold_std_4_var_{}_{}'.format(var, res) if save2png: plt.savefig(savetitle+'.png', dpi=dpi) plt.close() # --------------------------------------------------------------------------- # -------------- Reproduction of Chance et al (2014) figures ---------------- # --------------------------------------------------------------------------- def plot_up_iodide_vs_latitude(show_plot=True): """ Reproduce Fig. 3 in Chance et al (2014) Notes ---- - figure captions: Variation of sea-surface iodide concentration with latitude for entire data set (open diamonds) and open ocean data only (filled diamonds). For clarity, one exceptionally high coastal iodide value (700 nM, 58.25N) has been omitted. """ # - Get data df = get_core_Chance2014_obs() # Select data of interest # ( later add a color selection based on coastal values here? ) vars = ['Iodide', 'Latitude'] print(df) # and select coastal/open ocean df_coastal = df[df['Coastal'] == True][vars] df_open_ocean = df[~(df['Coastal'] == True)][vars] # - Now plot Obs. # plot coastal ax = df_coastal.plot(kind='scatter', x='Latitude', y='Iodide', marker='D', color='blue', alpha=0.1, # markerfacecolor="None", **kwds ) ) # plot open ocean ax = df_open_ocean.plot(kind='scatter', x='Latitude', y='Iodide', marker='D', color='blue', alpha=0.5, ax=ax, # markerfacecolor="None", **kwds ) ) # Update aesthetics of plot plt.ylabel('[Iodide], nM') plt.xlabel('Latitude, $^{o}$N') plt.ylim(-5, 500) plt.xlim(-80, 80) # save or show? if show_plot: plt.show() plt.close() def plot_up_ln_iodide_vs_Nitrate(show_plot=True): """ Reproduc Fig. 11 in Chance et al (2014) Original caption: Ln[iodide] concentration plotted against observed ( ) and climatological ( ) nitrate concentration obtained from the World Ocean Atlas as described in the text for all data (A) and nitrate concentrations below 2 mM (B) and above 2 mM (C). Dashed lines in B and C show the relationships between iodide and nitrate adapted from Campos et al.41 by Ganzeveld et al.27 """ # - location of data to plot df = obs.get_processed_df_obs_mod() # take log of iodide df['Iodide'] = np.log(df['Iodide'].values) # - Plot up all nitrate concentrations df.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D', color='k') # , plt.ylabel('LN[Iodide], nM') plt.xlabel('LN[Nitrate], mM') if show_plot: plt.show() plt.close() # - Plot up all nitrate concentrations below 2 mM df_tmp = df[df['Nitrate'] < 2] df_tmp.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D', color='k') # , plt.ylabel('LN[Iodide], nM') plt.xlabel('LN[Nitrate], mM') if show_plot: plt.show() plt.close() # - Plot up all nitrate concentrations above 2 mM df_tmp = df[df['Nitrate'] > 2] df_tmp.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D', color='k'), plt.ylabel('LN[Iodide], nM') plt.xlabel('LN[Nitrate], mM') if show_plot: plt.show() plt.close() def plot_up_ln_iodide_vs_SST(show_plot=True): """ Reproduc Fig. 8 in Chance et al (2014) Original caption: Ln[iodide] concentration plotted against observed sea surface temperature ( ) and climatological sea surface temperature ( ) values obtained from the World Ocean Atlas as described in the text. """ # - location of data to plot folder = utils.get_file_locations('data_root') f = 'Iodine_obs_WOA.csv' df = pd.read_csv(folder+f, encoding='utf-8') # take log of iodide df['Iodide'] = np.log(df['Iodide'].values) # - Plot up all nitrate concentrations df.plot(kind='scatter', x='Temperature', y='Iodide', marker='D', color='k') plt.ylabel('LN[Iodide], nM') plt.xlabel('Sea surface temperature (SST), $^{o}$C') if show_plot: plt.show() plt.close() def plot_up_ln_iodide_vs_salinity(show_plot=True): """ Reproduc Fig. 8 in Chance et al (2014) Original caption: Ln[iodide] concentration plotted against observed salinity ( , ) and climatological salinity ( ) values obtained from the World Ocean Atlas as described in the text for: (A) all data; (B) samples with salinity greater than 30, shown in shaded area in (A). Note samples with salinity less than 30 have been excluded from further analysis and are not shown in Fig. 811. """ # - location of data to plot folder = utils.get_file_locations('data_root') f = 'Iodine_obs_WOA.csv' df = pd.read_csv(folder+f, encoding='utf-8') # Just select non-coastal data # df = df[ ~(df['Coastal']==True) ] # take log of iodide df['Iodide'] = np.log(df['Iodide'].values) # - Plot up all nitrate concentrations df.plot(kind='scatter', x='Salinity', y='Iodide', marker='D', color='k') plt.ylabel('LN[Iodide], nM') plt.xlabel('Salinity') plt.xlim(-2, AC.myround(max(df['Salinity']), 10, round_up=True)) if show_plot: plt.show() plt.close() # - Plot up all nitrate concentrations df_tmp = df[df['Salinity'] < 30] df_tmp.plot(kind='scatter', x='Salinity', y='Iodide', marker='D', color='k') plt.ylabel('LN[Iodide], nM') plt.xlabel('Salinity') plt.xlim(-2, AC.myround(max(df['Salinity']), 10, round_up=True)) if show_plot: plt.show() plt.close() # - Plot up all nitrate concentrations df_tmp = df[df['Salinity'] > 30] df_tmp.plot(kind='scatter', x='Salinity', y='Iodide', marker='D', color='k') plt.ylabel('LN[Iodide], nM') plt.xlabel('Salinity') plt.xlim(29, AC.myround(max(df['Salinity']), 10, round_up=True)) if show_plot: plt.show() plt.close() def plot_pair_grid(df=None, vars_list=None): """ Make a basic pair plot to test the data """ import seaborn as sns import matplotlib.pyplot as plt import pandas as pd import numpy as np from itertools import cycle # make a kde plot # define colormap to cycle make_kde.cmap_cycle = cycle(('Blues_r', 'Greens_r', 'Reds_r', 'Purples_r')) # Plot a pair plot pg = sns.PairGrid(data, vars=vars_list) # --------------------------------------------------------------------------- # ---------------- New plotting of iodine obs/external data ----------------- # --------------------------------------------------------------------------- def explore_extracted_data_in_Oi_prj_explore_Arctic_Antarctic_obs(dsA=None, res='0.125x0.125', dpi=320): """ Analyse the gridded data for the Arctic and Antarctic """ import matplotlib import matplotlib.pyplot as plt matplotlib.style.use('ggplot') import seaborn as sns sns.set() # - local variables # Get input variables if isinstance(dsA, type(None)): filename = 'Oi_prj_predicted_iodide_{}.nc'.format(res) # folder = '/shared/earth_home/ts551/labbook/Python_progs/' folder = '/shared/earth_home/ts551/data/iodide/' filename = 'Oi_prj_feature_variables_{}.nc'.format(res) dsA = xr.open_dataset(folder + filename) # ds = xr.open_dataset( filename ) # variables to consider vars2analyse = list(dsA.data_vars) # Add LWI to array - NOTE: 1 = water in Nature run LWI files ! # ( The above comment is not correct! why is this written here? ) folderLWI = utils.get_file_locations( 'AC_tools')+'/data/LM/TEMP_NASA_Nature_run/' filenameLWI = 'ctm.nc' LWI = xr.open_dataset(folderLWI+filenameLWI) # updates dates (to be Jan=>Dec) new_dates = [datetime.datetime(1970, i, 1) for i in LWI['time.month']] LWI.time.values = new_dates # Sort by new dates LWI = LWI.loc[{'time': sorted(LWI.coords['time'].values)}] # LWI = AC.get_LWI_map(res=res)[...,0] dsA['IS_WATER'] = dsA['WOA_TEMP'].copy() dsA['IS_WATER'].values = (LWI['LWI'] == 0) # add is land dsA['IS_LAND'] = dsA['IS_WATER'].copy() dsA['IS_LAND'].values = (LWI['LWI'] == 1) # get surface area s_area = AC.calc_surface_area_in_grid(res=res) # m2 land map dsA['AREA'] = dsA['WOA_TEMP'].mean(dim='time') dsA['AREA'].values = s_area.T # - Select data of interest by variable for locations # setup dicts to store the extracted values df65N, df65S, dfALL = {}, {}, {} # - setup booleans for the data # now loop and extract variablesl vars2use = [ 'WOA_Nitrate', # 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO', ] # setup PDF savetitle = 'Oi_prj_explore_Arctic_Antarctic_ancillaries_space_PERTURBED' pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi) # Loop by dataset (region) and plots for var_ in vars2use: # select the boolean for if water IS_WATER = dsA['IS_WATER'].values if IS_WATER.shape != dsA[var_].shape: # special case for depth # get value for >= 65 ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65)) arr = np.ma.array(12*[ds_tmp[var_].values]) arr = arr[ds_tmp['IS_WATER'].values] # add to saved arrays df65N[var_] = arr del ds_tmp # get value for >= 65 ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65)) arr = np.ma.array(12*[ds_tmp[var_].values]) arr = arr[ds_tmp['IS_WATER'].values] # add to saved arrays df65S[var_] = arr del ds_tmp # get value for all ds_tmp = dsA.copy() arr = np.ma.array(12*[ds_tmp[var_].values]) arr = arr[ds_tmp['IS_WATER'].values] # add to saved arrays dfALL[var_] = arr del ds_tmp else: # get value for >= 65 ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65)) arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values] # add to saved arrays df65N[var_] = arr del ds_tmp # get value for >= 65 ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65)) arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values] # add to saved arrays df65S[var_] = arr del ds_tmp # get value for >= 65 ds_tmp = dsA.copy() arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values] # add to saved arrays dfALL[var_] = arr del ds_tmp # setup a dictionary of regions to plot from dfs = { '>=65N': pd.DataFrame(df65N), '>=65S': pd.DataFrame(df65S), 'Global': pd.DataFrame(dfALL), } # - plot up the PDF distribution of each of the variables. for var2use in vars2use: print(var2use) # set a single axis to use. fig, ax = plt.subplots() for dataset in datasets: # select the DataFrame df = dfs[dataset][var2use] # Get sample size N_ = df.shape[0] # do a dist plot label = '{} (N={})'.format(dataset, N_) sns.distplot(df, ax=ax, label=label) # Make sure the values are correctly scaled ax.autoscale() # Plot up the perturbations too for perturb in perturb2use: perturb # Beautify title_str = "PDF of ancillary input for '{}'" fig.suptitle(title_str.format(var2use)) plt.legend() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # -Save entire pdf AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi) def explore_extracted_data_in_Oi_prj_explore_Arctic_Antarctic_obs(dsA=None, res='0.125x0.125', dpi=320): """ Analyse the input data for the Arctic and Antarctic """ import matplotlib # matplotlib.use('Agg') import matplotlib.pyplot as plt matplotlib.style.use('ggplot') import seaborn as sns sns.set() # - local variables # get input variables if isinstance(dsA, type(None)): filename = 'Oi_prj_predicted_iodide_{}.nc'.format(res) # folder = '/shared/earth_home/ts551/labbook/Python_progs/' folder = '/shared/earth_home/ts551/data/iodide/' filename = 'Oi_prj_feature_variables_{}.nc'.format(res) dsA = xr.open_dataset(folder + filename) # ds = xr.open_dataset( filename ) # variables to consider vars2analyse = list(dsA.data_vars) # add LWI to array - NOTE: 1 = water in Nature run LWI files ! # ( The above comment is not correct! why is this written here? ) folderLWI = utils.get_file_locations( 'AC_tools')+'/data/LM/TEMP_NASA_Nature_run/' filenameLWI = 'ctm.nc' LWI = xr.open_dataset(folderLWI+filenameLWI) # updates dates (to be Jan=>Dec) new_dates = [datetime.datetime(1970, i, 1) for i in LWI['time.month']] LWI.time.values = new_dates # Sort by new dates LWI = LWI.loc[{'time': sorted(LWI.coords['time'].values)}] # LWI = AC.get_LWI_map(res=res)[...,0] dsA['IS_WATER'] = dsA['WOA_TEMP'].copy() dsA['IS_WATER'].values = (LWI['LWI'] == 0) # add is land dsA['IS_LAND'] = dsA['IS_WATER'].copy() dsA['IS_LAND'].values = (LWI['LWI'] == 1) # get surface area s_area = AC.calc_surface_area_in_grid(res=res) # m2 land map dsA['AREA'] = dsA['WOA_TEMP'].mean(dim='time') dsA['AREA'].values = s_area.T # - Select data of interest by variable for locations # setup dicts to store the extracted values df65N, df65S, dfALL = {}, {}, {} # - setup booleans for the data # now loop and extract variablesl vars2use = [ 'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO', ] for var_ in vars2use: # select the boolean for if water IS_WATER = dsA['IS_WATER'].values if IS_WATER.shape != dsA[var_].shape: # special case for depth # get value for >= 65 ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65)) arr = np.ma.array(12*[ds_tmp[var_].values]) arr = arr[ds_tmp['IS_WATER'].values] # add to saved arrays df65N[var_] = arr del ds_tmp # get value for >= 65 ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65)) arr = np.ma.array(12*[ds_tmp[var_].values]) arr = arr[ds_tmp['IS_WATER'].values] # add to saved arrays df65S[var_] = arr del ds_tmp # get value for all ds_tmp = dsA.copy() arr = np.ma.array(12*[ds_tmp[var_].values]) arr = arr[ds_tmp['IS_WATER'].values] # add to saved arrays dfALL[var_] = arr del ds_tmp else: # get value for >= 65 ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65)) arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values] # add to saved arrays df65N[var_] = arr del ds_tmp # get value for >= 65 ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65)) arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values] # add to saved arrays df65S[var_] = arr del ds_tmp # get value for >= 65 ds_tmp = dsA.copy() arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values] # add to saved arrays dfALL[var_] = arr del ds_tmp # setup a dictionary of regions to plot from dfs = { '>=65N': pd.DataFrame(df65N), '>=65S': pd.DataFrame(df65S), 'Global': pd.DataFrame(dfALL), } # - Loop regions and plot PDFs of variables of interest # vars2use = dfs[ dfs.keys()[0] ].columns # set PDF savetitle = 'Oi_prj_explore_Arctic_Antarctic_ancillaries_space' pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi) # Loop by dataset (region) and plots datasets = sorted(dfs.keys()) for dataset in datasets: # select the DataFrame df = dfs[dataset][vars2use] # Get sample size N_ = df.shape[0] # do a pair plot g = sns.pairplot(df) # Add a title plt.suptitle("Pairplot for '{}' (N={})".format(dataset, N_)) # adjust plots g.fig.subplots_adjust(top=0.925, left=0.085) # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # - Plot up the PDF distribution of each of the variables. for var2use in vars2use: print(var2use) # set a single axis to use. fig, ax = plt.subplots() for dataset in datasets: # select the DataFrame df = dfs[dataset][var2use] # Get sample size N_ = df.shape[0] # do a dist plot label = '{} (N={})'.format(dataset, N_) sns.distplot(df, ax=ax, label=label) # Make sure the values are correctly scaled ax.autoscale() # Beautify title_str = "PDF of ancillary input for '{}'" fig.suptitle(title_str.format(var2use)) plt.legend() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # - Plot up the number of oceanic data points by lat for each lat # Plot up number of samples for South pole ds = dsA.sel(lat=(dsA['lat'] <= -65)) var_ = 'WOA_Salinity' N = {} for lat in ds['lat'].values: ds_tmp = ds.sel(lat=lat) N[lat] = ds_tmp[var_].values[ds_tmp['IS_WATER'].values].shape[-1] N = pd.Series(N) N.plot() plt.ylabel('number of gridboxes in predictor array') plt.xlabel('Latitude $^{\circ}$N') plt.title('Number of gridboxes for Antarctic (<= -65N)') # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # Plot up number of samples for North pole ds = dsA.sel(lat=(dsA['lat'] >= 65)) var_ = 'WOA_Salinity' N = {} for lat in ds['lat'].values: ds_tmp = ds.sel(lat=lat) N[lat] = ds_tmp[var_].values[ds_tmp['IS_WATER'].values].shape[-1] N = pd.Series(N) N.plot() plt.ylabel('number of gridboxes in predictor array') plt.xlabel('Latitude $^{\circ}$N') plt.title('Number of gridboxes') plt.title('Number of gridboxes for Arctic (>= 65N)') # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # Save entire pdf AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi) def explore_observational_data_in_Arctic_parameter_space(RFR_dict=None, plt_up_locs4var_conds=False, testset='Test set (strat. 20%)', dpi=320): """ Analysis the input observational data for the Arctic and Antarctic """ import matplotlib # matplotlib.use('Agg') import matplotlib.pyplot as plt matplotlib.style.use('ggplot') import seaborn as sns sns.set() # - local variables df = RFR_dict['df'] # Set splits in data to look at dfs = {} # All data dfs['All data'] = df.copy() # Get all the data above 65 N dfs['>=65N'] = df.loc[df['Latitude'] >= 65, :] # Get all the data above 65 N and in the testset bool_ = dfs['>=65N'][testset] == False dfs['>=65N (training)'] = dfs['>=65N'].loc[bool_, :] # Get all the data below 65 S dfs['<=65S'] = df.loc[df['Latitude'] <= -65, :] # Get all the data above 65 N and in the testset bool_ = dfs['<=65S'][testset] == False dfs['<=65S (training)'] = dfs['<=65S'].loc[bool_, :] # - variables to explore? vars2use = [ 'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO', ] # - Loop regions and plot pairplots of variables of interest # set PDF savetitle = 'Oi_prj_explore_Arctic_Antarctic_obs_space' pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi) # Loop by dataset (region) and plots datasets = sorted(dfs.keys()) for dataset in datasets: # select the DataFrame df = dfs[dataset] # Get sample size N_ = df.shape[0] # do a pair plot g = sns.pairplot(df[vars2use]) # Add a title plt.suptitle("Pairplot for '{}' (N={})".format(dataset, N_)) # adjust plots g.fig.subplots_adjust(top=0.925, left=0.085) # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # - Loop regions and plot PDFs of variables of interest # Loop by dataset (region) and plots import seaborn as sns sns.reset_orig() datasets = sorted(dfs.keys()) for dataset in datasets: fig, ax = plt.subplots() # select the DataFrame dfA = dfs[dataset] # Set title title = "Locations for '{}'".format(dataset) p_size = 50 alpha = 1 # plot up Non coatal locs df = dfA.loc[dfA['Coastal'] == False, :] color = 'blue' label = 'Non-coastal (N={})'.format(int(df.shape[0])) m = AC.plot_lons_lats_spatial_on_map(title=title, f_size=15, lons=df['Longitude'].values, lats=df['Latitude'].values, label=label, fig=fig, ax=ax, color=color, return_axis=True) #Plot up coatal locs df = dfA.loc[dfA['Coastal'] == True, :] color = 'green' label = 'Coastal (N={})'.format(int(df.shape[0])) lons = df['Longitude'].values lats = df['Latitude'].values m.scatter(lons, lats, edgecolors=color, c=color, marker='o', s=p_size, alpha=alpha, label=label) plt.legend() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # - Loop regions and plot PDFs of variables of interest import matplotlib.pyplot as plt matplotlib.style.use('ggplot') import seaborn as sns sns.set() df = RFR_dict['df'] dfs = {} # All data dfs['All data'] = df.copy() # Get all the data above 65 N dfs['>=65N'] = df.loc[df['Latitude'] >= 65, :] # Get all the data below 65 S dfs['<=65S'] = df.loc[df['Latitude'] <= -65, :] # - variables to explore? vars2use = [ 'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO', ] # plot up the PDF distribution of each of the variables. datasets = sorted(dfs.keys()) for var2use in vars2use: print(var2use) # set a single axis to use. fig, ax = plt.subplots() for dataset in datasets: # select the DataFrame df = dfs[dataset][var2use] # Get sample size N_ = df.shape[0] # do a dist plot label = '{} (N={})'.format(dataset, N_) sns.distplot(df, ax=ax, label=label) # Make sure the values are correctly scaled ax.autoscale() # Beautify title_str = "PDF of ancillary input for '{}'" fig.suptitle(title_str.format(var2use)) plt.legend() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # - Loop regions and plot PDFs of variables of interest if plt_up_locs4var_conds: df = RFR_dict['df'] dfs = {} # Nitrate greater of equal to var_ = 'Nitrate >=15' dfs[var_] = df.loc[df['WOA_Nitrate'] >= 15, :] # Nitrate greater of equal to var_ = 'Nitrate <=15' dfs[var_] = df.loc[df['WOA_Nitrate'] <= 15, :] # Nitrate greater of equal to var_ = 'Nitrate >=10' dfs[var_] = df.loc[df['WOA_Nitrate'] >= 10, :] # Nitrate greater of equal to var_ = 'Nitrate <=10' dfs[var_] = df.loc[df['WOA_Nitrate'] <= 10, :] # Nitrate greater of equal to var_ = 'Nitrate <=9' dfs[var_] = df.loc[df['WOA_Nitrate'] <= 9, :] # Nitrate greater of equal to var_ = 'Nitrate <=8' dfs[var_] = df.loc[df['WOA_Nitrate'] <= 8, :] # Nitrate greater of equal to var_ = 'Nitrate <=7' dfs[var_] = df.loc[df['WOA_Nitrate'] <= 7, :] # Nitrate greater of equal to var_ = 'Nitrate <=6' dfs[var_] = df.loc[df['WOA_Nitrate'] <= 6, :] # Nitrate greater of equal to var_ = 'Nitrate <=5' dfs[var_] = df.loc[df['WOA_Nitrate'] <= 5, :] # Nitrate greater of equal to var_ = 'Nitrate <=4' dfs[var_] = df.loc[df['WOA_Nitrate'] <= 4, :] # Nitrate greater of equal to var_ = 'Nitrate <=3' dfs[var_] = df.loc[df['WOA_Nitrate'] <= 3, :] # Nitrate greater of equal to var_ = 'Nitrate <=2' dfs[var_] = df.loc[df['WOA_Nitrate'] <= 2, :] # Nitrate greater of equal to var_ = 'Nitrate <=1' dfs[var_] = df.loc[df['WOA_Nitrate'] <= 1, :] # Loop by dataset (nitrate values) and plots import seaborn as sns sns.reset_orig() datasets = sorted(dfs.keys()) for dataset in datasets: fig, ax = plt.subplots() # select the DataFrame dfA = dfs[dataset] # Set title title = "Locations for '{}'".format(dataset) p_size = 50 alpha = 1 # plot up Non coatal locs df = dfA.loc[dfA['Coastal'] == False, :] color = 'blue' label = 'Non-coastal (N={})'.format(int(df.shape[0])) m = AC.plot_lons_lats_spatial_on_map(title=title, f_size=15, lons=df['Longitude'].values, lats=df['Latitude'].values, label=label, fig=fig, ax=ax, color=color, return_axis=True) #plot up coatal locs df = dfA.loc[dfA['Coastal'] == True, :] color = 'green' label = 'Coastal (N={})'.format(int(df.shape[0])) lons = df['Longitude'].values lats = df['Latitude'].values m.scatter(lons, lats, edgecolors=color, c=color, marker='o', s=p_size, alpha=alpha, label=label) plt.legend() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # - Save entire pdf AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi) def Driver2analyse_new_data_vs_existing_data(): """ Driver to plot up all options for old vs. new analysis plots """ regions = 'all', 'coastal', 'noncoastal' for limit_to_400nM in True, False: for region in regions: analyse_new_data_vs_existing_data(region=region, limit_to_400nM=limit_to_400nM) def analyse_new_data_vs_existing_data(limit_to_400nM=True, region='all'): """ build a set of analysis plots exploring the difference between new and exisiting datasets """ # - Get obs. data # Get data (inc. additions) and meta data df_meta = obs.get_iodide_obs_metadata() pro_df = obs.get_processed_df_obs_mod() # - Setup plotting # misc. shared variables axlabel = '[I$^{-}_{aq}$] (nM)' # setup PDf savetitle = 'Oi_prj_new_vs_existing_datasets' if limit_to_400nM: # Exclude v. high values (N=7 - in final dataset) pro_df = pro_df.loc[pro_df['Iodide'] < 400.] savetitle += '_limited_to_400nM' if region == 'all': savetitle += '_all' elif region == 'coastal': pro_df = pro_df.loc[pro_df['Coastal'] == 1, :] savetitle += '_{}'.format(region) elif region == 'noncoastal': pro_df = pro_df.loc[pro_df['Coastal'] == 0, :] savetitle += '_{}'.format(region) else: sys.exit() pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi) # colours to use? import seaborn as sns sns.set(color_codes=True) current_palette = sns.color_palette("colorblind") # - Plot up new data ( ~timeseries? ) New_datasets = df_meta.loc[df_meta['In Chance2014?'] == 'N'].Data_Key var2plot = 'Iodide' for dataset in New_datasets: # Select new dataset tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset] Cruise = tmp_df['Cruise'].values[0] # if dates present in DataFrame, update axis dates4cruise = pd.to_datetime(tmp_df['Date'].values) if len(set(dates4cruise)) == tmp_df.shape[0]: tmp_df.index = dates4cruise xlabel = 'Date' else: xlabel = 'Obs #' tmp_df[var2plot].plot() ax = plt.gca() plt.xlabel(xlabel) plt.ylabel(axlabel) title_str = "New {} data from '{}' ({})" plt.title(title_str.format(var2plot.lower(), Cruise, dataset)) AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # - Plot up new data ( PDF of iodide ) var2plot = 'Iodide' for dataset in New_datasets: # Select new dataset tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset] Cruise = tmp_df['Cruise'].values[0] # - Plot up PDF plots for the dataset # plot whole dataset obs_arr = pro_df[var2plot].values ax = sns.distplot(obs_arr, axlabel=axlabel, color='k', label='Whole dataset') # plot just new data ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise, color='red', ax=ax) # force y axis extend to be correct ax.autoscale() # Beautify title = "PDF of '{}' {} data ({}) at obs. locations" plt.title(title.format(dataset, var2plot, axlabel)) plt.legend() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # - Plot up new data ( PDF of salinity ) var2plot = u'WOA_Salinity' for dataset in New_datasets: # Select new dataset tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset] Cruise = tmp_df['Cruise'].values[0] # - Plot up PDF plots for the dataset # plot whole dataset obs_arr = pro_df[var2plot].values ax = sns.distplot(obs_arr, axlabel=axlabel, color='k', label='Whole dataset') # plot just new data ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise, color='red', ax=ax) # force y axis extend to be correct ax.autoscale() # Beautify title = "PDF of '{}' {} data ({}) at obs. locations" plt.title(title.format(dataset, var2plot, axlabel)) plt.legend() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # - Plot up new data ( PDF of temperature ) var2plot = 'WOA_TEMP' for dataset in New_datasets: # Select new dataset tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset] Cruise = tmp_df['Cruise'].values[0] # - Plot up PDF plots for the dataset # plot whole dataset obs_arr = pro_df[var2plot].values ax = sns.distplot(obs_arr, axlabel=axlabel, color='k', label='Whole dataset') # plot just new data ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise, color='red', ax=ax) # force y axis extend to be correct ax.autoscale() # Beautify title = "PDF of '{}' {} data ({}) at obs. locations" plt.title(title.format(dataset, var2plot, axlabel)) plt.legend() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # - Plot up new data ( PDF of depth ) var2plot = u'Depth_GEBCO' for dataset in New_datasets: # Select new dataset tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset] Cruise = tmp_df['Cruise'].values[0] # - Plot up PDF plots for the dataset # plot whole dataset obs_arr = pro_df[var2plot].values ax = sns.distplot(obs_arr, axlabel=axlabel, color='k', label='Whole dataset') # plot just new data ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise, color='red', ax=ax) # force y axis extend to be correct ax.autoscale() # Beautify title = "PDF of '{}' {} data ({}) at obs. locations" plt.title(title.format(dataset, var2plot, axlabel)) plt.legend() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # -- Save entire pdf AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi) def get_diagnostic_plots_analysis4observations(inc_all_extract_vars=False, include_hexbin_plots=False, model_name='TEMP+DEPTH+SAL', show_plot=False, dpi=320): """ Produce a PDF of comparisons of observations in dataset inventory """ # - Setup plotting # misc. shared variables axlabel = '[I$^{-}_{aq}$] (nM)' # setup PDf savetitle = 'Oi_prj_obs_plots' if inc_all_extract_vars: savetitle += '_all_extract_vars' include_hexbin_plots = True pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi) # colours to use? import seaborn as sns # - Get obs. data # Get data (inc. additions) and meta data df_meta = obs.get_iodide_obs_metadata() pro_df = obs.get_processed_df_obs_mod() LOCAL_model_name = 'RFR({})'.format(model_name) pro_df[LOCAL_model_name] = get_model_predictions4obs_point(pro_df, model_name=model_name) # Exclude v. high values (N=4 - in intial dataset) # Exclude v. high values (N=7 - in final dataset) pro_df = pro_df.loc[pro_df['Iodide'] < 400.] # Add coastal flag to data coastal_flag = 'coastal_flagged' pro_df = get_coastal_flag(df=pro_df, coastal_flag=coastal_flag) non_coastal_df = pro_df.loc[pro_df['coastal_flagged'] == 0] dfs = {'Open-Ocean': non_coastal_df, 'All': pro_df} # TODO ... add test dataset in here # Get the point data for params... point_ars_dict = {} for key_ in dfs.keys(): point_ars_dict[key_] = { 'Obs.': dfs[key_]['Iodide'].values, 'MacDonald et al (2014)': dfs[key_]['MacDonald2014_iodide'].values, 'Chance et al (2014)': dfs[key_][u'Chance2014_STTxx2_I'].values, 'Chance et al (2014) - Mutivariate': dfs[key_][ u'Chance2014_Multivariate' ].values, LOCAL_model_name: dfs[key_][LOCAL_model_name], } point_ars_dict = point_ars_dict['Open-Ocean'] parm_name_dict = { 'MacDonald et al (2014)': 'MacDonald2014_iodide', 'Chance et al (2014)': u'Chance2014_STTxx2_I', 'Chance et al (2014) - Mutivariate': u'Chance2014_Multivariate', LOCAL_model_name: LOCAL_model_name, } point_data_names = sorted(point_ars_dict.keys()) point_data_names.pop(point_data_names.index('Obs.')) param_names = point_data_names # setup color dictionary current_palette = sns.color_palette("colorblind") colour_dict = dict(zip(param_names, current_palette[:len(param_names)])) colour_dict['Obs.'] = 'K' # --- Plot up locations of old and new data import seaborn as sns sns.reset_orig() plot_up_data_locations_OLD_and_new(save_plot=False, show_plot=False) # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # --- Plot up all params against coastal data import seaborn as sns sns.set(color_codes=True) sns.set_context("paper") xlabel = 'Obs.' # just non-coastal for param_name in sorted(parm_name_dict.keys()): Y = non_coastal_df[parm_name_dict[param_name]].values X = non_coastal_df['Iodide'].values title = 'Regression plot of Open-ocean [I$^{-}_{aq}$] (nM) \n' title = title + '{} vs {} parameterisation'.format(xlabel, param_name) ax = sns.regplot(x=X, y=Y) # get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False, # title=None, add_ODR_trendline2plot=True) plt.title(title) plt.xlabel(xlabel) plt.ylabel(param_name) # Adjust X and Y range max_val = max(max(X), max(Y)) smidgen = max_val * 0.05 plt.xlim(0-smidgen, max_val+smidgen) plt.ylim(0-smidgen, max_val+smidgen) # Add 1:1 one2one = np.arange(0, max_val*2) plt.plot(one2one, one2one, color='k', linestyle='--', alpha=0.75, label='1:1') plt.legend() if show_plot: plt.show() AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # --- Plot up all params against all data import seaborn as sns sns.set(color_codes=True) sns.set_context("paper") xlabel = 'Obs.' X = point_ars_dict[xlabel] for param_name in point_data_names: Y = point_ars_dict[param_name] title = 'Regression plot of all [I$^{-}_{aq}$] (nM) \n' title = title + '{} vs {} parameterisation'.format(xlabel, param_name) ax = sns.regplot(x=X, y=Y) # get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False, # title=None, add_ODR_trendline2plot=True) plt.title(title) plt.xlabel(xlabel) plt.ylabel(param_name) # Adjust X and Y range max_val = max(max(X), max(Y)) smidgen = max_val * 0.05 plt.xlim(0-smidgen, max_val+smidgen) plt.ylim(0-smidgen, max_val+smidgen) # Add 1:1 one2one = np.arange(0, max_val*2) plt.plot(one2one, one2one, color='k', linestyle='--', alpha=0.75, label='1:1') plt.legend() if show_plot: plt.show() AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # ---- Plot up new data New_datasets = df_meta.loc[df_meta['In Chance2014?'] == 'N'].Data_Key var2plot = 'Iodide' for dataset in New_datasets: tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset] Cruise = tmp_df['Cruise'].values[0] # if dates present in DataFrame, update axis dates4cruise = pd.to_datetime(tmp_df['Date'].values) if len(set(dates4cruise)) == tmp_df.shape[0]: tmp_df.index = dates4cruise xlabel = 'Date' else: xlabel = 'Obs #' tmp_df[var2plot].plot() ax = plt.gca() # ax.axhline(30, color='red', label='Chance et al 2014 coastal divide') plt.xlabel(xlabel) plt.ylabel(axlabel) title_str = "New {} data from '{}' ({})" plt.title(title_str.format(var2plot.lower(), Cruise, dataset)) # plt.legend() AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # Plot up Salinity # var2plot = 'WOA_Salinity' # for dataset in New_datasets: # tmp_df = pro_df.loc[ pro_df['Data_Key'] == dataset ] # tmp_df[var2plot].plot() # ax= plt.gca() # ax.axhline(30, color='red', label='Chance et al 2014 coastal divide') # plt.xlabel( 'Obs #') # plt.ylabel( 'PSU' ) # plt.title( '{} during cruise from {}'.format( var2plot, dataset ) ) # plt.legend() # AC.plot2pdfmulti( pdff, savetitle, dpi=dpi ) # plt.close() # ---- Plot up key comparisons for coastal an non-coastal data for key_ in sorted(dfs.keys()): # --- Ln(Iodide) vs. T ylabel = 'ln(Iodide)' Y = dfs[key_][ylabel].values xlabel = 'WOA_TEMP' X = dfs[key_][xlabel].values # Plot up ax = sns.regplot(x=X, y=Y) # Beautify title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) if show_plot: plt.show() AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # --- Ln(Iodide) vs. 1/T ylabel = 'ln(Iodide)' Y = dfs[key_][ylabel].values xlabel = 'WOA_TEMP_K' X = 1 / dfs[key_][xlabel].values # Plot up ax = sns.regplot(x=X, y=Y) # Beautify title = '{} vs {} ({} data)'.format(ylabel, '1/'+xlabel, key_) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) if show_plot: plt.show() AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # --- Ln(Iodide) vs. 1/T ylabel = 'ln(Iodide)' Y = dfs[key_][ylabel].values xlabel = 'WOA_Salinity' X = dfs[key_][xlabel].values # Plot up ax = sns.regplot(x=X, y=Y) # Beautify title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) if show_plot: plt.show() AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # --- if inc_all_extract_vars: for key_ in sorted(dfs.keys()): # List extract vraiables extracted_vars = [ u'WOA_TEMP', u'WOA_Nitrate', u'WOA_Salinity', u'WOA_Dissolved_O2', u'WOA_Phosphate', u'WOA_Silicate', u'Depth_GEBCO', u'SeaWIFs_ChlrA', u'WOA_MLDpt', u'WOA_MLDpt_max', u'WOA_MLDpt_sum', u'WOA_MLDpd', u'WOA_MLDpd_max', u'WOA_MLDpd_sum', u'WOA_MLDvd', u'WOA_MLDvd_max', u'WOA_MLDvd_sum', u'DOC', u'DOCaccum', u'Prod', u'SWrad' ] # Loop extraced variables and plot for var_ in extracted_vars: ylabel = var_ xlabel = 'Iodide' tmp_df = dfs[key_][[xlabel, ylabel]] # Kludge to remove '--' from MLD columns for col in tmp_df.columns: bool_ = [i == '--' for i in tmp_df[col].values] tmp_df.loc[bool_, :] = np.NaN if tmp_df[col].dtype == 'O': tmp_df[col] = pd.to_numeric(tmp_df[col].values, errors='coerce') print(var_, tmp_df.min(), tmp_df.max()) # X = dfs[key_][xlabel].values # Plot up ax = sns.regplot(x=xlabel, y=ylabel, data=tmp_df ) # Beautify title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) if show_plot: plt.show() plt.close() # --- Plot up Just observations and predicted values from models as PDF import seaborn as sns sns.set(color_codes=True) sns.set_context("paper") # plot 1st model... point_name = 'Obs.' arr = point_ars_dict[point_name] ax = sns.distplot(arr, axlabel=axlabel, label=point_name, color=colour_dict[point_name]) # Add MacDonald, Chance... for point_name in point_data_names: arr = point_ars_dict[point_name] ax = sns.distplot(arr, axlabel=axlabel, label=point_name, color=colour_dict[point_name]) # force y axis extend to be correct ax.autoscale() # Beautify plt.title('PDF of predicted iodide ({}) at obs. points'.format(axlabel)) plt.legend() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # --- Plot up Just observations and predicted values from models as CDF import seaborn as sns sns.set(color_codes=True) sns.set_context("paper") # plot 1st model... point_name = 'Obs.' arr = point_ars_dict[point_name] ax = sns.distplot(arr, axlabel=axlabel, label=point_name, color=colour_dict[point_name], hist_kws=dict(cumulative=True), kde_kws=dict(cumulative=True)) # Add MacDonald, Chance... for point_name in point_data_names: arr = point_ars_dict[point_name] ax = sns.distplot(arr, axlabel=axlabel, label=point_name, color=colour_dict[point_name], hist_kws=dict(cumulative=True), kde_kws=dict(cumulative=True)) # force y axis extend to be correct ax.autoscale() # Beautify plt.title('CDF of predicted iodide ({}) at obs. points'.format(axlabel)) plt.legend() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # --- Plot up parameterisations as regression # import seaborn as sns; sns.set(color_codes=True) # sns.set_context("paper") # xlabel = 'Obs.' # X = point_ars_dict[xlabel] # for point_name in point_data_names: # title = 'Regression plot of [I$^{-}_{aq}$] (nM) ' # title = title + '{} vs {} parameterisation'.format(xlabel, point_name ) # Y = point_ars_dict[point_name] # ax = sns.regplot(x=X, y=Y ) # # get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False, # # title=None, add_ODR_trendline2plot=True) # plt.title(title) # plt.xlabel(xlabel) # plt.ylabel(point_name) # # Save to PDF and close plot # AC.plot2pdfmulti( pdff, savetitle, dpi=dpi ) # plt.close() # --- Plot up parameterisations as hexbin plot if include_hexbin_plots: xlabel = 'Obs.' X = point_ars_dict[xlabel] for point_name in point_data_names: title = 'Hexbin of [I$^{-}_{aq}$] (nM) \n' title = title + '{} vs {} parameterisation'.format(xlabel, point_name) Y = point_ars_dict[point_name] get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False, title=title, add_ODR_trendline2plot=True) # plt.show() # Save to PDF and close plot AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) plt.close() # -- Save entire pdf AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi) def plot_PDF_iodide_obs_mod(bins=10): """ plot up PDF of predicted values vs. observations """ import matplotlib.pyplot as plt import seaborn as sns # Location of data to plot folder = utils.get_file_locations('data_root') f = 'Iodine_obs_WOA.csv' df = pd.read_csv(folder+f, encoding='utf-8') # Just select non-coastal data print(df.shape) df = df[~(df['Coastal'] == True)] # df = df[ ~(df['Coastal']==True) ] # Salinity greater than 30 # df = df[ (df['Salinity'] > 30 ) ] print(df.shape) # Plot up data # Macdonaly et al 2014 values ax = sns.distplot(df['MacDonald2014_iodide'], label='MacDonald2014_iodide', bins=bins) # Chance et al 2014 values ax = sns.distplot(df['Chance2014_STTxx2_I'], label='Chance2014_STTxx2_I', bins=bins) # Iodide obs. ax = sns.distplot(df['Iodide'], label='Iodide, nM', bins=bins) # Update aesthetics and show plot? plt.xlim(-50, 400) plt.legend(loc='upper right') plt.show() def plt_predicted_iodide_vs_obs_Q1_Q3(dpi=320, show_plot=False, limit_to_400nM=False, inc_iodide=False): """ Plot predicted iodide on a latitudinal basis NOTES - the is the just obs. location equivilent of the plot produced to show predict values for all global locations (Oi_prj_global_predicted_vals_vs_lat) """ import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) sns.set_context("paper") # Get data folder = utils.get_file_locations('data_root') f = 'Iodine_obs_WOA.csv' df = pd.read_csv(folder+f, encoding='utf-8') # Local variables # sub select variables of interest. params2plot = [ 'Chance2014_STTxx2_I', 'MacDonald2014_iodide', ] # Set names to overwrite variables with rename_titles = {u'Chance2014_STTxx2_I': 'Chance et al. (2014)', u'MacDonald2014_iodide': 'MacDonald et al. (2014)', 'RFR(Ensemble)': 'RFR(Ensemble)', 'Iodide': 'Obs.', # u'Chance2014_Multivariate': 'Chance et al. (2014) (Multi)', } # filename to save values filename = 'Oi_prj_global_predicted_vals_vs_lat_only_obs_locs' # include iodide observations too? if inc_iodide: params2plot += ['Iodide'] filename += '_inc_iodide' CB_color_cycle = AC.get_CB_color_cycle() color_d = dict(zip(params2plot, CB_color_cycle)) # if limit_to_400nM: df = df.loc[df['Iodide'] < 400, :] filename += '_limited_400nM' # - Process data # Add binned mean # bins = np.arange(-70, 70, 10 ) bins = np.arange(-80, 90, 10) # groups = df.groupby( np.digitize(df[u'Latitude'], bins) ) groups = df.groupby(pd.cut(df['Latitude'], bins)) # Take means of groups # groups_avg = groups.mean() groups_des = groups.describe().unstack() # - setup plotting fig, ax = plt.subplots(dpi=dpi) # - Plot up X = groups_des['Latitude']['mean'].values # groups_des.index # X =bins print(groups_des) # plot groups for var_ in params2plot: # Get quartiles Q1 = groups_des[var_]['25%'].values Q3 = groups_des[var_]['75%'].values # Add median ax.plot(X, groups_des[var_]['50%'].values, color=color_d[var_], label=rename_titles[var_]) # add shading for Q1/Q3 ax.fill_between(X, Q1, Q3, alpha=0.2, color=color_d[var_]) # - Plot observations # Highlight coastal obs tmp_df = df.loc[df['Coastal'] == True, :] X = tmp_df['Latitude'].values Y = tmp_df['Iodide'].values plt.scatter(X, Y, color='k', marker='D', facecolor='none', s=3, label='Coastal obs.') # non-coastal obs tmp_df = df.loc[df['Coastal'] == False, :] X = tmp_df['Latitude'].values Y = tmp_df['Iodide'].values plt.scatter(X, Y, color='k', marker='D', facecolor='k', s=3, label='Non-coastal obs.') # - Beautify # Add legend plt.legend() # Limit plotted y axis extent plt.ylim(-20, 420) plt.ylabel('[I$^{-}_{aq}$] (nM)') plt.xlabel('Latitude ($^{\\rm o}$N)') plt.savefig(filename, dpi=dpi) if show_plot: plt.show() plt.close() def plot_up_data_locations_OLD_and_new(save_plot=True, show_plot=False, extension='eps', dpi=720): """ Plot up old and new data on map """ import seaborn as sns sns.reset_orig() # - Setup plot figsize = (11, 5) fig, ax = plt.subplots(figsize=figsize, dpi=dpi) p_size = 25 alpha = 0.5 window = True axis_titles = False # - Get all observational data df, md_df = obs.get_iodide_obs() # Seperate into new and old data ChanceStr = 'In Chance2014?' df[ChanceStr] = None for ds in list(set(md_df['Data_Key'])): bool = df['Data_Key'] == ds IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0] df.loc[bool, ChanceStr] = IsChance new_metadata_df = md_df.loc[ md_df['In Chance2014?'] == 'N' ] new_Data_Keys = new_metadata_df['Data_Key'].values bool = df['Data_Key'].isin(new_Data_Keys) # old data df1 = df.loc[~bool] # new data df2 = df.loc[bool] # --- add existing data # Get existing data... (Chance et al 2014 ) # folder = utils.get_file_locations('data_root') # f = 'Iodine_obs_WOA.csv' # df1 = pd.read_csv(folderf, encoding='utf-8' ) # Select lons and lats lats1 = df1['Latitude'].values lons1 = df1['Longitude'].values # Plot up and return basemap axis label = 'Chance et al. (2014) (N={})'.format( df1['Iodide'].dropna().shape[0]) m = AC.plot_lons_lats_spatial_on_map(lons=lons1, lats=lats1, fig=fig, ax=ax, color='blue', label=label, alpha=alpha, window=window, axis_titles=axis_titles, return_axis=True, p_size=p_size) # - Add in new data following Chance2014? # this is ~ 5 samples from the Atlantic (and some from Indian ocean?) # ... get this at a later date... # - Add in SOE-9 data # f = 'Iodine_climatology_ISOE9.xlsx' # df2 = pd.read_excel(folder'/Liselotte_data/'+f, skiprows=1 ) # Data from SOE-9 lats2 = df2['Latitude'].values lons2 = df2['Longitude'].values color = 'red' label = 'Additional data (N={})' label = label.format(df2['Iodide'].dropna().shape[0]) m.scatter(lons2, lats2, edgecolors=color, c=color, marker='o', s=p_size, alpha=alpha, label=label) # - Save out / show leg = plt.legend(fancybox=True, loc='upper right') leg.get_frame().set_alpha(0.95) if save_plot: savename = 'Oi_prj_Obs_locations.{}'.format(extension) plt.savefig(savename, bbox_inches='tight', dpi=dpi) if show_plot: plt.show() def plot_up_data_locations_OLD_and_new_CARTOPY(save_plot=True, show_plot=False, extension='eps', dpi=720): """ Plot up old and new data on map """ import seaborn as sns sns.reset_orig() # - Setup plot # figsize = (11, 5) figsize = (11*2, 5*2) fig = plt.figure(figsize=figsize, dpi=dpi) # fig, ax = plt.subplots(figsize=figsize, dpi=dpi) fig, ax = None, None p_size = 15 alpha = 0.5 window = True axis_titles = False # - Get all observational data df, md_df = obs.get_iodide_obs() # Seperate into new and old data ChanceStr = 'In Chance2014?' df[ChanceStr] = None for ds in list(set(md_df['Data_Key'])): bool = df['Data_Key'] == ds IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0] df.loc[bool, ChanceStr] = IsChance new_metadata_df = md_df.loc[ md_df['In Chance2014?'] == 'N' ] new_Data_Keys = new_metadata_df['Data_Key'].values bool = df['Data_Key'].isin(new_Data_Keys) # old data df1 = df.loc[~bool] # new data df2 = df.loc[bool] # --- add existing data # Get existing data... (Chance et al 2014 ) # folder = utils.get_file_locations('data_root') # f = 'Iodine_obs_WOA.csv' # df1 = pd.read_csv(folderf, encoding='utf-8' ) # Select lons and lats lats1 = df1['Latitude'].values lons1 = df1['Longitude'].values # Plot up and return basemap axis label = 'Chance et al. (2014) (N={})'.format( df1['Iodide'].dropna().shape[0]) ax = plot_lons_lats_spatial_on_map_CARTOPY(lons=lons1, lats=lats1, fig=fig, ax=ax, color='blue', label=label, alpha=alpha, dpi=dpi, # window=window, axis_titles=axis_titles, # return_axis=True, # add_detailed_map=True, add_background_image=False, add_gridlines=False, s=p_size) # - Add in new data following Chance2014? # this is ~ 5 samples from the Atlantic (and some from Indian ocean?) # ... get this at a later date... # - Add in SOE-9 data # f = 'Iodine_climatology_ISOE9.xlsx' # df2 = pd.read_excel(folder'/Liselotte_data/'+f, skiprows=1 ) # Data from SOE-9 lats2 = df2['Latitude'].values lons2 = df2['Longitude'].values color = 'red' label = 'Additional data (N={})' label = label.format(df2['Iodide'].dropna().shape[0]) ax.scatter(lons2, lats2, edgecolors=color, c=color, marker='o', s=p_size, alpha=alpha, label=label, zorder=1000) # - Save out / show leg = plt.legend(fancybox=True, loc='upper right', prop={'size': 6}) leg.get_frame().set_alpha(0.95) if save_plot: savename = 'Oi_prj_Obs_locations.{}'.format(extension) plt.savefig(savename, bbox_inches='tight', dpi=dpi) if show_plot: plt.show() def map_plot_of_locations_of_obs(): """ Plot up locations of observations of data to double check """ import matplotlib.pyplot as plt # - Settings plot_all_as_one_plot = True show = True # - Get data folder = utils.get_file_locations('data_root') f = 'Iodine_obs_WOA.csv' df = pd.read_csv(folder+f, encoding='utf-8') # only consider non-coastal locations print(df.shape) # df = df[ df['Coastal'] == 1.0 ] # select coastal locations # df = df[ df['Coastal'] == 0.0 ] # select non coastal locations # only consider locations with salinity > 30 df = df[df['Salinity'] > 30.0] # select coastal locations print(df.shape) # Get coordinate values all_lats = df['Latitude'].values all_lons = df['Longitude'].values # Get sub lists of unique identifiers for datasets datasets = list(set(df['Data_Key'])) n_datasets = len(datasets) # - Setup plot # f_size = 10 marker = 'o' p_size = 75 dpi = 600 c_list = AC.color_list(int(n_datasets*1.25)) print(c_list, len(c_list)) # plot up white background arr = np.zeros((72, 46)) vmin, vmax = 0, 0 # - just plot up all sites to test if plot_all_as_one_plot: # Setup a blank basemap plot fig = plt.figure(figsize=(12, 6), dpi=dpi, facecolor='w', edgecolor='k') ax1 = fig.add_subplot(111) plt, m = AC.map_plot(arr.T, return_m=True, cmap=plt.cm.binary, f_size=f_size*2, fixcb=[ vmin, vmax], ax=ax1, no_cb=True, resolution='c', ylabel=True, xlabel=True) # Scatter plot of points. m.scatter(all_lons, all_lats, edgecolors=c_list[1], c=c_list[1], marker=marker, s=p_size, alpha=1,) # Save and show? plt.savefig('Iodide_dataset_locations.png', dpi=dpi, transparent=True) if show: plt.show() else: chunksize = 5 chunked_list = AC.chunks(datasets, chunksize) counter = 0 for n_chunk_, chunk_ in enumerate(chunked_list): # Setup a blank basemap plot fig = plt.figure(figsize=(12, 6), dpi=dpi, facecolor='w', edgecolor='k') ax1 = fig.add_subplot(111) plt, m = AC.map_plot(arr.T, return_m=True, cmap=plt.cm.binary, f_size=f_size*2, fixcb=[vmin, vmax], ax=ax1, no_cb=True, resolution='c', ylabel=True, xlabel=True) # Loop all datasets for n_dataset_, dataset_ in enumerate(chunk_): print(n_chunk_, counter, dataset_, c_list[counter]) # df_sub = df[df['Data_Key'] == dataset_] lats = df_sub['Latitude'].values lons = df_sub['Longitude'].values # Plot up and save. color = c_list[n_chunk_::chunksize][n_dataset_] m.scatter(lons, lats, edgecolors=color, c=color, marker=marker, s=p_size, alpha=.5, label=dataset_) # add one to counter counter += 1 plt.legend() # save chunk... plt.savefig('Iodide_datasets_{}.png'.format(n_chunk_), dpi=dpi, transparent=True) if show: plt.show() def plot_up_parameterisations(df=None, save2pdf=True, show=False): """ Plot up parameterisations """ import matplotlib.pyplot as plt import seaborn as sns # Consider both Chance and MacDonald parameterisations params = [i for i in df.columns if ('Mac' in i)] params += [i for i in df.columns if ('Chance' in i)] # get details of parameterisations # filename='Chance_2014_Table2_PROCESSED_17_04_19.csv' filename = 'Chance_2014_Table2_PROCESSED.csv' folder = utils.get_file_locations('data_root') param_df = pd.read_csv(folder+filename) # only consider non-coastal locations? print(df.shape) # df = df[ df['Coastal'] == 1.0 ] # select coastal locations # df = df[ df['Coastal'] == 0.0 ] # select non coastal locations # only consider locations with salinity > 30 df = df[df['Salinity'] > 30.0] # select coastal locations print(df.shape) # df = df[ df['Iodide'] < 300 ] # Setup pdf if save2pdf: dpi = 320 savetitle = 'Chance2014_params_vs_recomputed_params' pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi) # - Loop parameterisations # for param in params[:2]: # Only loop two if debugging for param in params: # Get meta data for parameter sub_df = param_df[param_df['TMS ID'] == param] # Setup a new figure fig = plt.figure() # Extract Iodide and param data... # Take logs of data? iodide_var = 'Iodide' try: print(sub_df['ln(iodide)'].values[0]) if sub_df['ln(iodide)'].values[0] == 'yes': iodide_var = 'ln(Iodide)' print('Using log values for ', param) else: print('Not using log values for ', param) except: print('FAILED to try and use log data for ', param) X = df[iodide_var].values # And parameter data? Y = df[param].values # Remove nans... tmp_df = pd.DataFrame(np.array([X, Y]).T, columns=['X', 'Y']) print(tmp_df.shape) tmp_df = tmp_df.dropna() print(tmp_df.shape) X = tmp_df['X'].values Y = tmp_df['Y'].values # PLOT UP as X vs. Y scatter... title = '{} ({})'.format(param, sub_df['Independent variable'].values) ax = mk_X_Y_scatter_plot_param_vs_iodide(X=X, Y=Y, title=title, iodide_var=iodide_var) # Add Chance2014's R^2 to plot... try: R2 = str(sub_df['R2'].values[0]) c = str(sub_df['c'].values[0]) m = str(sub_df['m'].values[0]) eqn = 'y={}x+{}'.format(m, c) print(R2, c, m, eqn) alt_text = 'Chance et al (2014) R$^2$'+':{} ({})'.format(R2, eqn) ax.annotate(alt_text, xy=(0.5, 0.90), textcoords='axes fraction', fontsize=10) except: print('FAILED to get Chance et al values for', param) # plt.text( 0.75, 0.8, alt_text, ha='center', va='center') # show/save? if save2pdf: # Save out figure AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) if show: plt.show() del fig # save entire pdf if save2pdf: AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi) plt.close("all") def mk_X_Y_scatter_plot_param_vs_iodide(X=None, Y=None, iodide_var=None, title=None): """ Plots up a X vs. Y plot for a parameterisation of iodine (Y) against obs iodide (X) """ import matplotlib.pyplot as plt import seaborn as sns # Plot up plt.scatter(X, Y, marker='+', alpha=0.5) plt.title(title) plt.ylabel('Param. [Iodide], nM') plt.xlabel('Obs. [{}], nM'.format(iodide_var)) # Add a trendline ax = plt.gca() AC.Trendline(ax, X=X, Y=Y, color='green') # Adjust x and y axis limits round_max_X = AC.myround(max(X), 50, round_up=True) round_max_Y = AC.myround(max(Y), 50, round_up=True) if iodide_var == 'ln(Iodide)': round_max_X = AC.myround(max(X), 5, round_up=True) round_max_Y = AC.myround(max(Y), 5, round_up=True) plt.xlim(-(round_max_X/40), round_max_X) plt.ylim(-(round_max_Y/40), round_max_Y) # Add an N value to plot alt_text = '(N={})'.format(len(X)) ax.annotate(alt_text, xy=(0.8, 0.10), textcoords='axes fraction', fontsize=10) return ax def compare_obs_ancillaries_with_extracted_values_WINDOW(dpi=320, df=None): """ Plot up a window plot of the observed vs. climatological ancillaries """ import seaborn as sns sns.set(color_codes=True) current_palette = sns.color_palette("colorblind") sns.set_style("darkgrid") sns.set_context("paper", font_scale=0.75) # Get the observational data if isinstance(df, type(None)): df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM # - Map observational variables to their shared extracted variables all_vars = df.columns.tolist() # Dictionary obs_var_dict = { # Temperature 'WOA_TEMP': 'Temperature', # Chlorophyll-a 'SeaWIFs_ChlrA': 'Chl-a', # Nitrate 'WOA_Nitrate': 'Nitrate', # Salinity 'WOA_Salinity': 'Salinity' # There is also 'Nitrite' and 'Ammonium' } # units dict? units_dict = { 'SeaWIFs_ChlrA': "mg m$^{-3}$", # Chance et al uses micro g/L 'WOA_Salinity': 'PSU', # https://en.wikipedia.org/wiki/Salinity 'WOA_Nitrate': "$\mu$M", 'WOA_TEMP': '$^{o}$C', } # Colors to use CB_color_cycle = AC.get_CB_color_cycle() # set the order the dict keys are accessed vars_sorted = list(sorted(obs_var_dict.keys()))[::-1] # setup plot fig = plt.figure(dpi=dpi, figsize=(5, 7.35)) # - 1st plot Salinity ( all and >30 PSU ) # - All above var2plot = 'WOA_Salinity' plot_n = 1 color = CB_color_cycle[0] # Make a new axis ax = fig.add_subplot(3, 2, plot_n, aspect='equal') # Get the data df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna() N_ = int(df_tmp[[var2plot]].shape[0]) MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2) RMSE_ = np.sqrt(MSE_) print(N_, MSE_, RMSE_) X = df_tmp[obs_var_dict[var2plot]].values Y = df_tmp[var2plot].values # Plot up the data as a scatter ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5) # Label Y axis if plot_n in np.arange(1, 6)[::2]: ax.set_ylabel('Extracted') # Title the plots title = 'Salinity (all, {})'.format(units_dict[var2plot]) ax.text(0.5, 1.05, title, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) # Add N value stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_) ax.text(0.05, 0.9, stats_str, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes) # Add a 1:1 line ax_max = df_tmp.max().max() ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05 ax_min = df_tmp.min().min() ax_min = ax_min - (ax_max*0.05) x_121 = np.arange(ax_min, ax_max*1.5) ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--') # Add ODR line xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121, return_model=False, maxit=10000) ax.plot(xvalues, Y_ODR, color=color, ls='--') # Force axis extents ax.set_aspect('equal') ax.set_xlim(ax_min, ax_max) ax.set_ylim(ax_min, ax_max) ax.set_aspect('equal') # - All above var2plot = 'WOA_Salinity' plot_n = 2 color = CB_color_cycle[0] # Make a new axis ax = fig.add_subplot(3, 2, plot_n, aspect='equal') # Get the data df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna() # Select only data greater that 30 PSU df_tmp = df_tmp.loc[df_tmp[obs_var_dict[var2plot]] >= 30, :] N_ = int(df_tmp[[var2plot]].shape[0]) MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2) RMSE_ = np.sqrt(MSE_) print(N_, MSE_, RMSE_) X = df_tmp[obs_var_dict[var2plot]].values Y = df_tmp[var2plot].values # plot up ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5) # label Y axis if plot_n in np.arange(1, 6)[::2]: ax.set_ylabel('Extracted') # title the plots title = 'Salinity ($\geq$ 30, PSU)'.format(units_dict[var2plot]) ax.text(0.5, 1.05, title, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) # Add N value stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_) ax.text(0.05, 0.9, stats_str, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes) # add a 1:1 line ax_max = df_tmp.max().max() ax_max = AC.myround(ax_max, 1, round_up=True) * 1.05 ax_min = 29 x_121 = np.arange(ax_min, ax_max*1.5) ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--') # add ODR line xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121, return_model=False, maxit=10000) ax.plot(xvalues, Y_ODR, color=color, ls='--') # Force axis extents ax.set_aspect('equal') ax.set_xlim(ax_min, ax_max) ax.set_ylim(ax_min, ax_max) ax.set_aspect('equal') # --- Loop and plot for n_var2plot, var2plot in enumerate(['WOA_TEMP', 'WOA_Nitrate', ]): plot_n = 2 + 1 + n_var2plot color = CB_color_cycle[plot_n] # Make a new axis ax = fig.add_subplot(3, 2, plot_n, aspect='equal') # Get the data df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna() N_ = int(df_tmp[[var2plot]].shape[0]) MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2) RMSE_ = np.sqrt(MSE_) print(N_, MSE_, RMSE_) X = df_tmp[obs_var_dict[var2plot]].values Y = df_tmp[var2plot].values # plot up ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5) # label Y axis if plot_n in np.arange(1, 6)[::2]: ax.set_ylabel('Extracted') # title the plots title = '{} ({})'.format(obs_var_dict[var2plot], units_dict[var2plot]) ax.text(0.5, 1.05, title, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) # Add N value stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_) ax.text(0.05, 0.9, stats_str, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes) # add a 1:1 line ax_max = df_tmp.max().max() ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05 ax_min = df_tmp.min().min() ax_min = ax_min - (ax_max*0.05) x_121 = np.arange(ax_min, ax_max*1.5) ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--') # Add a line for orthogonal distance regression (ODR) xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121, return_model=False, maxit=10000) ax.plot(xvalues, Y_ODR, color=color, ls='--') # Force axis extents ax.set_aspect('equal') ax.set_xlim(ax_min, ax_max) ax.set_ylim(ax_min, ax_max) ax.set_aspect('equal') # --- 1st plot Salinity ( all and >30 PSU ) # - All above var2plot = 'SeaWIFs_ChlrA' plot_n = 5 color = CB_color_cycle[5] # Make a new axis ax = fig.add_subplot(3, 2, plot_n, aspect='equal') # Get the data df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna() N_ = int(df_tmp[[var2plot]].shape[0]) MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2) RMSE_ = np.sqrt(MSE_) print(N_, MSE_, RMSE_) X = df_tmp[obs_var_dict[var2plot]].values Y = df_tmp[var2plot].values # plot up ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5) # label Y axis if plot_n in np.arange(1, 6)[::2]: ax.set_ylabel('Extracted') ax.set_xlabel('Observed') # title the plots title = 'ChlrA (all, {})'.format(units_dict[var2plot]) ax.text(0.5, 1.05, title, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) # Add N value stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_) ax.text(0.05, 0.9, stats_str, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes) # add a 1:1 line ax_max = df_tmp.max().max() ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05 ax_min = df_tmp.min().min() ax_min = ax_min - (ax_max*0.05) x_121 = np.arange(ax_min, ax_max*1.5) ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--') # add ODR line xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121, return_model=False, maxit=10000) ax.plot(xvalues, Y_ODR, color=color, ls='--') # Force axis extents ax.set_aspect('equal') ax.set_xlim(ax_min, ax_max) ax.set_ylim(ax_min, ax_max) ax.set_aspect('equal') # - All above var2plot = 'SeaWIFs_ChlrA' plot_n = 6 color = CB_color_cycle[5] # Make a new axis ax = fig.add_subplot(3, 2, plot_n, aspect='equal') # Get the data df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna() # Select only data greater that 30 PSU df_tmp = df_tmp.loc[df_tmp[obs_var_dict[var2plot]] <= 5, :] N_ = int(df_tmp[[var2plot]].shape[0]) MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2) RMSE_ = np.sqrt(MSE_) print(N_, MSE_, RMSE_) X = df_tmp[obs_var_dict[var2plot]].values Y = df_tmp[var2plot].values # plot up ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5) # label Y axis if plot_n in np.arange(1, 6)[::2]: ax.set_ylabel('Extracted') ax.set_xlabel('Observed') # title the plots units = units_dict[var2plot] title = 'ChlrA ($\leq$5 {})'.format(units) ax.text(0.5, 1.05, title, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) # Add N value stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_) ax.text(0.05, 0.9, stats_str, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes) # add a 1:1 line ax_max = df_tmp.max().max() ax_max = AC.myround(ax_max, 1, round_up=True) * 1.05 ax_min = df_tmp.min().min() ax_min = ax_min - (ax_max*0.05) x_121 = np.arange(ax_min, ax_max*1.5) ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--') # add ODR line xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121, return_model=False, maxit=10000) ax.plot(xvalues, Y_ODR, color=color, ls='--') # Force axis extents ax.set_aspect('equal') ax.set_xlim(ax_min, ax_max) ax.set_ylim(ax_min, ax_max) ax.set_aspect('equal') # -- adjust figure and save # Adjust plot left = 0.075 right = 0.975 wspace = 0.05 hspace = 0.175 top = 0.95 bottom = 0.075 fig.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) # Save filename = 'Oi_prj_Chance2014_Obs_params_vs_NEW_extracted_params_WINDOW' plt.savefig(filename, dpi=dpi) def compare_obs_ancillaries_with_extracted_values(df=None, save2pdf=True, show=False, dpi=320): """ Some species in the dataframe have observed as well as climatology values. For these species, plot up X/Y and latitudinal comparisons """ import seaborn as sns sns.set(color_codes=True) current_palette = sns.color_palette("colorblind") sns.set_style("darkgrid") # Get the observational data if isinstance(df, type(None)): df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM # - Map observational variables to their shared extracted variables all_vars = df.columns.tolist() # Dictionary obs_var_dict = { # Temperature 'WOA_TEMP': 'Temperature', # Chlorophyll-a 'SeaWIFs_ChlrA': 'Chl-a', # Nitrate 'WOA_Nitrate': 'Nitrate', # Salinity 'WOA_Salinity': 'Salinity' # There is also 'Nitrite' and 'Ammonium' } # Dict of units for variables units_dict = { 'SeaWIFs_ChlrA': "mg m$^{-3}$", # Chance et al uses micro g/L 'WOA_Salinity': 'PSU', # https://en.wikipedia.org/wiki/Salinity 'WOA_Nitrate': "$\mu$M", 'WOA_TEMP': '$^{o}$C', } # sort dataframe by latitude # df = df.sort_values('Latitude', axis=0, ascending=True) # set the order the dict keys are accessed vars_sorted = list(sorted(obs_var_dict.keys()))[::-1] # Setup pdf if save2pdf: savetitle = 'Oi_prj_Chance2014_Obs_params_vs_NEW_extracted_params' pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi) # - Get variables and confirm which datasets are being used for plot dfs = {} for key_ in vars_sorted: print(obs_var_dict[key_], key_) # drop nans... index2use = df[[obs_var_dict[key_], key_]].dropna().index dfs[key_] = df.loc[index2use, :] # Check which datasets are being used ptr_str = 'For variable: {} (#={})- using: {} \n' for key_ in vars_sorted: datasets = list(set(dfs[key_]['Data_Key'])) dataset_str = ', '.join(datasets) print(ptr_str.format(key_, len(datasets), dataset_str)) # - Loop variables and plot as a scatter plot... for key_ in vars_sorted: print(obs_var_dict[key_], key_) # new figure fig = plt.figure() # drop nans... df_tmp = df[[obs_var_dict[key_], key_]].dropna() N_ = int(df_tmp[[key_]].shape[0]) print(N_) # Plot up sns.regplot(x=obs_var_dict[key_], y=key_, data=df_tmp) # Add title plt.title('X-Y plot of {} (N={})'.format(obs_var_dict[key_], N_)) plt.ylabel('Extracted ({}, {})'.format(key_, units_dict[key_])) plt.xlabel('Obs. ({}, {})'.format( obs_var_dict[key_], units_dict[key_])) # Save out figure &/or show? if save2pdf: AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) if show: plt.show() plt.close() # - Loop variables and plot verus lat (with difference) for key_ in vars_sorted: print(obs_var_dict[key_], key_) # New figure fig = plt.figure() # Drop nans... df_tmp = df[[obs_var_dict[key_], key_, 'Latitude']].dropna() N_ = int(df_tmp[[key_]].shape[0]) print(N_) # Get data to analyse obs = df_tmp[obs_var_dict[key_]].values climate = df_tmp[key_].values X = df_tmp['Latitude'].values # Plot up plt.scatter(X, obs, label=obs_var_dict[key_], color='red', marker="o") plt.scatter(X, climate, label=key_, color='blue', marker="o") plt.scatter(X, climate-obs, label='diff', color='green', marker="o") # Athesetics of plot? plt.legend() plt.xlim(-90, 90) plt.ylabel('{} ({})'.format(obs_var_dict[key_], units_dict[key_])) plt.xlabel('Latitude ($^{o}$N)') plt.title('{} (N={}) vs. latitude'.format(obs_var_dict[key_], N_)) # Save out figure &/or show? if save2pdf: AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) if show: plt.show() plt.close() # Save entire pdf if save2pdf: AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi) def plot_up_lat_STT_var(restrict_data_max=True, restrict_min_salinity=True): """ Plot up a "pretty" plot of STT vs Lat, with scatter sizes and color by var. """ # - Get data as a DataFrame df = obs.get_processed_df_obs_mod() if restrict_data_max: # df = df[ df['Iodide']< 450. ] df = df[df['Iodide'] < 400.] # Updated to use 400 nM as upper value if restrict_min_salinity: df = df[df['WOA_Salinity'] > 30.] # Add modulus df["Latitude (Modulus)"] = np.sqrt(df["Latitude"].copy()**2) # - Local vars X_varname = "Latitude (Modulus)" Y_varname = "WOA_TEMP" S_varname = 'Iodide' S_label = S_varname C_varname = S_varname # - plot fig, ax = plt.subplots(facecolor='w', edgecolor='w') df.plot(kind="scatter", x=X_varname, y=Y_varname, alpha=0.4, s=df[S_varname], label=S_label, figsize=(10, 7), c=S_varname, cmap=plt.get_cmap("jet"), colorbar=True, sharex=False, ax=ax, fig=fig) plt.show() def plot_up_lat_varI_varII(restrict_data_max=True, restrict_min_salinity=True): """ Plot up a "pretty" plot of STT vs Lat, with scatter sizes and color by var. """ # - Get data as a DataFrame df = obs.get_processed_df_obs_mod() if restrict_data_max: # df = df[ df['Iodide']< 450. ] df = df[df['Iodide'] < 400.] # Updated to use 400 nM as upper value if restrict_min_salinity: df = df[df['WOA_Salinity'] > 30.] df["Latitude (Modulus)"] = np.sqrt(df["Latitude"].copy()**2) # - Local variables # override? (unhashed) varI = 'Iodide' varII = "WOA_TEMP" # name local vars X_varname = "Latitude (Modulus)" Y_varname = varI S_varname = varII S_label = S_varname C_varname = S_varname # - plot up fig, ax = plt.subplots(facecolor='w', edgecolor='w') df.plot(kind="scatter", x=X_varname, y=Y_varname, alpha=0.4, s=df[S_varname], label=S_label, figsize=(10, 7), c=S_varname, cmap=plt.get_cmap("jet"), colorbar=True, sharex=False, ax=ax, fig=fig) plt.ylim(-5, 500) plt.show() def plot_chance_param(df=None, X_var='Temperature', Y_var='Iodide', data_str='(Obs.) data'): """ Plot up chance et al (2014) param vs. data in DataFrame """ # Only include finite data points for temp # ( NOTE: down to 1/3 of data of obs. data?! ) df = df[np.isfinite(df[X_var])] # Add a variable for C**2 fit Xvar2plot = X_var+'($^{2}$)' df[Xvar2plot] = df[X_var].loc[:].values**2 # Plot up data and param. fig, ax = plt.subplots(facecolor='w', edgecolor='w') # Plot up df.plot(kind='scatter', x=Xvar2plot, y=Y_var, ax=ax) # Add a line of best fit reported param. actual_data = df[Xvar2plot].values test_data = np.linspace(AC.myround(actual_data.min()), AC.myround(actual_data.max()), 20) m = 0.225 c = 19.0 plt.plot(test_data, ((test_data*m)+c), color='green', ls='--', label='Chance et al (2014) param.') # Limit axis to data plt.xlim(-50, AC.myround(df[Xvar2plot].values.max(), 1000)) plt.ylim(-20, AC.myround(df[Y_var].values.max(), 50, round_up=True)) # Add title and axis labels N = actual_data.shape[0] title = 'Linear param vs. {} (N={})'.format(data_str, N) plt.title(title) plt.xlabel(X_var + ' ($^{o}$C$^{2}$)') plt.ylabel(Y_var + ' (nM)') plt.legend(loc='upper left') # And show/save tmp_str = data_str.replace(" ", '_').replace("(", "_").replace(")", "_") savetitle = 'Chance_param_vs_{}.png'.format(tmp_str) plt.savefig(savetitle) plt.show() def plot_macdonald_param(df=None, X_var='Temperature', Y_var='Iodide', data_str='(Obs.) data'): """ Plot up MacDonald et al (2014) param vs. data in DataFrame """ # Only include finite data points for temp # ( NOTE: down to 1/3 of data of obs. data?! ) df = df[np.isfinite(df[X_var])] # Add a variable for Xvar2plot = '1/'+X_var df[Xvar2plot] = 1. / (df[X_var].loc[:].values+273.15) Y_var2plot = 'ln({})'.format(Y_var) df[Y_var2plot] = np.log(df[Y_var].values) # Plot up data and param. fig, ax = plt.subplots(facecolor='w', edgecolor='w') df.plot(kind='scatter', x=Xvar2plot, y=Y_var2plot, ax=ax) # Add a line of best fit reported param. # (run some numbers through this equation... ) actual_data = df[X_var].values + 273.15 test_data = np.linspace(actual_data.min(), actual_data.max(), 20) test_data_Y = 1.46E6*(np.exp((-9134./test_data))) * 1E9 plt.plot(1./test_data, np.log(test_data_Y), color='green', ls='--', label='MacDonald et al (2014) param.') # Limit axis to data plt.xlim(df[Xvar2plot].values.min()-0.000025, df[Xvar2plot].values.max()+0.000025) plt.ylim(0, 7) # Add title and axis labels N = actual_data.shape[0] title = 'Arrhenius param vs. {} (N={})'.format(data_str, N) plt.title(title) plt.xlabel(Xvar2plot + ' ($^{o}$K)') plt.ylabel(Y_var2plot + ' (nM)') plt.legend(loc='lower left') # And show/save tmp_str = data_str.replace(" ", '_').replace("(", "_").replace(")", "_") savetitle = 'MacDonald_parameterisation_vs_{}.png'.format(tmp_str) plt.savefig(savetitle) plt.show() def plot_current_parameterisations(): """ Plot up a comparison of Chance et al 2014 and MacDonald et al 2014 params. """ # - Get obs and processed data # get raw obs raw_df = get_core_Chance2014_obs() # don't consider iodide values above 30 raw_df = raw_df[raw_df['Iodide'] > 30.] # - get processed obs. pro_df = obs.get_processed_df_obs_mod() restrict_data_max, restrict_min_salinity = True, True if restrict_data_max: # pro_df = pro_df[ pro_df['Iodide'] < 450. ] # used for July Oi! mtg. # restrict below 400 (per. com. RJC) pro_df = pro_df[pro_df['Iodide'] < 400.] if restrict_min_salinity: pro_df = pro_df[pro_df['WOA_Salinity'] > 30.] # - Plots with raw obs. # Plot up "linear" fit of iodide and temperature. (Chance et al 2014) # plot up Chance # plot_chance_param(df=raw_df.copy()) # Plot up "Arrhenius" fit of iodide and temperature. ( MacDonald et al 2014) plot_macdonald_param(df=raw_df.copy()) # - Plots with extract Vars. # Plot up "linear" fit of iodide and temperature. (Chance et al 2014) # plot_chance_param(df=pro_df.copy(), data_str='Extracted data', # X_var='WOA_TEMP') # Plot up "Arrhenius" fit of iodide and temperature. ( MacDonald et al 2014) plot_macdonald_param(df=pro_df.copy(), data_str='Extracted data', X_var='WOA_TEMP') # --------------------------------------------------------------------------- # ---------------- Misc. Support for iodide project ------------------------ # --------------------------------------------------------------------------- def explore_diferences_for_Skagerak(): """ Explore how the Skagerak data differs from the dataset as a whole """ # - Get the observations and model output folder = utils.get_file_locations('data_root') filename = 'Iodine_obs_WOA_v8_5_1_ENSEMBLE_csv__avg_nSkag_nOutliers.csv' dfA = pd.read_csv(folder+filename, encoding='utf-8') # - Local variables diffvar = 'Salinity diff' ds_str = 'Truesdale_2003_I' obs_var_dict = { # Temperature 'WOA_TEMP': 'Temperature', # Chlorophyll-a 'SeaWIFs_ChlrA': 'Chl-a', # Nitrate 'WOA_Nitrate': 'Nitrate', # Salinity 'WOA_Salinity': 'Salinity' # There is also 'Nitrite' and 'Ammonium' } # - Analysis / updates to DataFrames dfA[diffvar] = dfA['WOA_Salinity'].values - dfA['diffvar'].values # - Get just the Skagerak dataset df = dfA.loc[dfA['Data_Key'] == ds_str] prt_str = 'The general stats on the Skagerak dataset ({}) are: ' print(prt_str.format(ds_str)) # general stats on the iodide numbers stats = df['Iodide'].describe() for idx in stats.index.tolist(): vals = stats[stats.index == idx].values[0] print('{:<10}: {:<10}'.format(idx, vals)) # - stats on the in-situ data print('\n') prt_str = 'The stats on the Skagerak ({}) in-situ ancillary obs. are: ' print(prt_str.format(ds_str)) # which in-situ variables are there vals = df[obs_var_dict.values()].count() prt_str = "for in-situ variable '{:<15}' there are N={} values" for idx in vals.index.tolist(): vals2prt = vals[vals.index == idx].values[0] print(prt_str.format(idx, vals2prt)) def check_numbers4old_chance_and_new_chance(): """ Do checks on which datasets have changed between versions """ # - Get all observational data NIU, md_df = obs.get_iodide_obs() folder = '/work/home/ts551/data/iodide/' filename = 'Iodide_data_above_20m_v8_5_1.csv' df = pd.read_csv(folder+filename) df = df[np.isfinite(df['Iodide'])] # remove NaNs verOrig = 'v8.5.1' NOrig = df.shape[0] # Add the is chance flag to the dataset ChanceStr = 'In Chance2014?' df[ChanceStr] = None for ds in list(set(md_df['Data_Key'])): bool = df['Data_Key'] == ds IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0] df.loc[bool, ChanceStr] = IsChance # Where are the new iodide data points newLODds = set(df.loc[df['ErrorFlag'] == 7]['Data_Key']) prt_str = 'The new datasets from ErrorFlag 7 are in: {}' print(prt_str.format(' , '.join(newLODds))) # Versions with a different number of iodide values filename = 'Iodide_data_above_20m_v8_2.csv' df2 = pd.read_csv(folder + filename) df2 = convert_old_Data_Key_names2new(df2) # Use data descriptor names df2 = df2[np.isfinite(df2['Iodide'])] # remove NaNs ver = '8.2' prt_str = 'Version {} of the data - N={} (vs {} N={})' print(prt_str.format(ver, df2.shape[0], verOrig, NOrig)) # Do analysis by dataset for ds in list(set(md_df['Data_Key'])): N0 = df.loc[df['Data_Key'] == ds, :].shape[0] N1 = df2.loc[df2['Data_Key'] == ds, :].shape[0] IsChance = list(set(df.loc[df['Data_Key'] == ds, ChanceStr]))[0] prt_str = "DS: '{}' (Chance2014={}) has changed by {} to {} ({} vs. {})" if N0 != N1: print(prt_str.format(ds, IsChance, N0-N1, N0, verOrig, ver)) def get_numbers_for_data_paper(): """ Get various numbers/analysis for data descriptor paper """ # - Get the full iodide sea-surface dataset filename = 'Iodide_data_above_20m.csv' folder = utils.get_file_locations('s2s_root')+'/Iodide/inputs/' df = pd.read_csv(folder + filename, encoding='utf-8') # Exclude non finite data points. df = df.loc[np.isfinite(df['Iodide']), :] # Save the full data set as .csv for use in Data Descriptor paper cols2use = [ u'Data_Key', u'Data_Key_ID', 'Latitude', u'Longitude', # u'\xce\xb4Iodide', 'Year', # u'Month (Orig.)', # This is RAW data, therefore Month is observation one u'Month', 'Day', 'Iodide', u'Iodide', 'ErrorFlag', 'Method', 'Coastal', u'LocatorFlag', ] df = df[cols2use] # Map references to final .csv from metadata md_df = obs.get_iodide_obs_metadata() col2use = u'Reference' Data_keys = set(df['Data_Key'].values) for Data_key in Data_keys: # Get ref for dataset from metadata bool_ = md_df[u'Data_Key'] == Data_key REF = md_df.loc[bool_, :][col2use].values[0].strip() # Add to main data array bool_ = df[u'Data_Key'] == Data_key df.loc[bool_, col2use] = REF # Round up the iodide values df['Iodide'] = df['Iodide'].round(1).values df[u'Iodide'] = df[u'Iodide'].round(1).values df[u'Longitude'] = df[u'Longitude'].round(6).values df[u'Latitude'] = df[u'Latitude'].round(6).values # Now lock in values by settings to strings. df[cols2use] = df[cols2use].astype(str) # save the resultant file out filename = 'Oi_prj_Iodide_obs_surface4DataDescriptorPaper.csv' df.to_csv(filename, encoding='utf-8') # Get number of samples of iodide per dataset md_df = obs.get_iodide_obs_metadata() md_df.index = md_df['Data_Key'] s = pd.Series() Data_Keys = md_df['Data_Key'] for Data_Key in Data_Keys: df_tmp = df.loc[df['Data_Key'] == Data_Key] s[Data_Key] = df_tmp.shape[0] md_df['n'] = s md_df.index = np.arange(md_df.shape[0]) md_df.to_csv('Oi_prj_metadata_with_n.csv', encoding='utf-8') # Check sum for assignment? prt_str = '# Assigned values ({}) should equal original DataFrame size:{}' print(prt_str.format(md_df['n'].sum(), str(df.shape[0]))) # Get number of samples of iodide per obs. technique Methods = set(df['Method']) s_ds = pd.Series() s_n = pd.Series() for Method in Methods: df_tmp = df.loc[df['Method'] == Method] s_n[Method] = df_tmp.shape[0] s_ds[Method] = len(set(df_tmp['Data_Key'])) # Combine and save dfS = pd.DataFrame() dfS['N'] = s_n dfS['datasets'] = s_ds dfS.index.name = 'Method' # Reset index index2use = [str(i) for i in sorted(pd.to_numeric(dfS.index))] dfS = dfS.reindex(index2use) dfS.to_csv('Oi_prj_num_in_Methods.csv', encoding='utf-8') # Check sum on assignment of methods prt_str = '# Assigned methods ({}) should equal original DataFrame size:{}' print(prt_str.format(dfS['N'].sum(), str(df.shape[0]))) prt_str = '# Assigned datasets ({}) should equal # datasets: {}' print(prt_str.format(dfS['datasets'].sum(), len(set(df['Data_Key'])))) # Check which methods are assign to each dataset dfD = pd.DataFrame(index=sorted(set(df['Method'].values))) S = [] for Data_Key in Data_Keys: df_tmp = df.loc[df['Data_Key'] == Data_Key] methods_ = set(df_tmp['Method'].values) dfD[Data_Key] = pd.Series(dict(zip(methods_, len(methods_)*[True]))) # Do any datasets have more than one method? print('These datasets have more than one method: ') print(dfD.sum(axis=0)[dfD.sum(axis=0) > 1]) def mk_PDF_plot_for_Data_descriptor_paper(): """ Make a PDF plot for the data descriptor paper """ import seaborn as sns sns.set(color_codes=True) # Get the data df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM # df = df.loc[df['Iodide'] <400, : ] # split data into all, Coastal and Non-Coastal dfs = {} dfs['All'] = df.copy() dfs['Coastal'] = df.loc[df['Coastal'] == 1, :] dfs['Non-coastal'] = df.loc[df['Coastal'] != 1, :] # if hist=True, use a count instead of density hist = False # Loop and plot axlabel = '[I$^{-}_{aq}$] (nM)' fig, ax = plt.subplots() vars2plot = dfs.keys() for key in vars2plot: sns.distplot(dfs[key]['Iodide'].values, ax=ax, axlabel=axlabel, label=key, hist=hist) # force y axis extend to be correct ax.autoscale() # Add a legend plt.legend() # Add a label for the Y axis plt.ylabel('Density') # save plot if hist: savename = 'Oi_prj_Data_descriptor_PDF' else: savename = 'Oi_prj_Data_descriptor_PDF_just_Kernal' plt.savefig(savename+'.png', dpi=dpi) def mk_pf_files4Iodide_cruise(dfs=None, test_input_files=False, mk_column_output_files=False, num_tracers=103): """ Make planeflight input files for iodide cruises """ # Get locations for cruises as if isinstance(dfs, type(None)): dfs = get_iodide_cruise_data_from_Anoop_txt_files() # Test the input files? if test_input_files: test_input_files4Iodide_cruise_with_plots(dfs=dfs) # Make planeflight files for DataFrames of cruises data (outputting columns values) if mk_column_output_files: # slist = ['O3', 'IO', 'BrO', 'CH2O'] slist = ['TRA_002', 'TRA_046', 'TRA_092', 'TRA_020', 'GLYX'] met_vars = [ 'GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND' ] slist = slist + met_vars for key_ in dfs.keys(): print(key_, dfs[key_].shape) df = dfs[key_].dropna() print(df.shape) # add TYPE flag df['TYPE'] = 'IDC' # Grid box level centers [hPa] alts_HPa = AC.gchemgrid('c_hPa_geos5_r') # Loop and add in column values dfs_all = [] for n_alt, hPa_ in enumerate(alts_HPa): print(hPa_, n_alt) df_ = df.copy() df_['PRESS'] = hPa_ dfs_all += [df_] df = pd.concat(dfs_all) # make sure rows are in date order df.sort_values(['datetime', 'PRESS'], ascending=True, inplace=True) # now output files AC.prt_PlaneFlight_files(df=df, slist=slist) # Make planeflight files for DataFrames of cruises data # (outputting surface values) else: met_vars = [ 'GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND' ] assert isinstance(num_tracers, int), 'num_tracers must be an integer' slist = ['TRA_{:0>3}'.format(i) for i in np.arange(1, num_tracers+1)] species = ['OH', 'HO2', 'GLYX'] slist = slist + species + met_vars for key_ in dfs.keys(): print(key_) df = dfs[key_].dropna() # add TYPE flag df['TYPE'] = 'IDS' # df['PRESS'] = 1013.0 # now output files AC.prt_PlaneFlight_files(df=df, slist=slist) def test_input_files4Iodide_cruise_with_plots(dfs=None, show=False): """" Plot up maps of iodide cruise routes """ # Get locations for cruises as if isinstance(dfs, type(None)): dfs = get_iodide_cruise_data_from_Anoop_txt_files() # - Test input files # file to save? savetitle = 'GC_pf_input_iodide_cruises' dpi = 320 pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi) vars2test = ['LON', 'LAT'] for key_ in dfs.keys(): df = dfs[key_] for var_ in vars2test: # -- Plot X vs Y plot df_tmp = df[['datetime', var_]] # calc NaNs VAR_dropped_N = int(df_tmp.shape[0]) df_tmp = df_tmp.dropna() VAR_N_data = int(df_tmp.shape[0]) VAR_dropped_N = VAR_dropped_N-VAR_N_data # plot df_tmp.plot(x='datetime', y=var_) # title = "Timeseries of '{}' for '{}'".format(var_, key_) title += ' (ALL N={}, exc. {} NaNs)'.format(VAR_N_data, VAR_dropped_N) plt.title(title) # Save / show file2save_str = 'Iodide_input_file_{}_check_{}.png'.format( key_, var_) plt.savefig(file2save_str) if show: plt.show() print(df_tmp[var_].describe()) AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) # -- Plot up cruise track as map del df_tmp df_tmp = df.dropna() lons = df_tmp['LON'].values lats = df_tmp['LAT'].values title = "Cruise track for '{}'".format(key_) print('!'*100, 'plotting map for: ', key_) AC.plot_lons_lats_spatial_on_map(lons=lons, lats=lats, title=title) plt.ylim(AC.myround(lats.min()-20, 10, ), AC.myround(lats.max()+20, 10, round_up=True)) plt.xlim(AC.myround(lons.min()-20, 10, ), AC.myround(lons.max()+20, 10, round_up=True)) if show: plt.show() AC.plot2pdfmulti(pdff, savetitle, dpi=dpi) # Save entire pdf AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi) def get_iodide_cruise_data_from_Anoop_txt_files(verbose=False): """ Get observational data and locations from Anoop's txt files """ # - Local variables folder = utils.get_file_locations('data_root') folder += 'LOCS_Inamdar_Mahajan_cruise_x3/' cruise_files = { # 1 8th Southern Ocean Expedition (SOE-8), possibly on the RV Sagar Nidhi # 'Iodide1': 'cruise1_2014.xlsx', 'SOE-8': 'cruise1_2014.xlsx', # 2 2nd International Indian Ocean Expedition (<-2), # possibly one of several cruises in this program # (IIOE-1 was decades ago). On board RV Sagar Nidhi. # 'Iodide2': 'cruise2_2015.xlsx', 'IIOE-1': 'cruise2_2015.xlsx', # 3 9th Southern Ocean Expedition (SOE-9), cruise Liselotte Tinel took samples on # Ship RV Agulhas. # 'Iodide3': 'cruise3_2016.xlsx', 'SOE-9': 'cruise3_2016.xlsx', } # - Extract data dfs = {} for cruise_name in cruise_files.keys(): print('Extracting: ', cruise_name, cruise_files[cruise_name]) # cruise_name = cruise_files.keys()[0] df = pd.read_excel(folder+cruise_files[cruise_name]) names_dict = { 'Date': 'date', 'UTC': 'date', 'time (UTC)': 'time', 'lat': 'LAT', 'lon': 'LON' } if verbose: print(df.head()) df.rename(columns=names_dict, inplace=True) if verbose: print(df.head()) # convert dates to datetime # def _convert_datetime(x): # return (270-atan2(x['date'],x['GMAO_UWND'])*180/pi)%360 # df['datetime'] = df.apply( f, axis=1) df['datetime'] = df['date'].astype(str)+' '+df['time'].astype(str) df['datetime'] = pd.to_datetime(df['datetime']) df.index = df['datetime'].values if verbose: print(df.head()) dfs[cruise_name] = df[['datetime', 'LON', 'LAT']] return dfs def TEST_AND_PROCESS_iodide_cruise_output(just_process_surface_data=False): """ Process, plot (test values), then save planeflight values to csv """ # Local variables wd = '/scratch/ts551/GC/v10-01_HAL/' files_dict = { 'SOE-8': wd+'run.ClBr.Iodide2015.SOE-8', 'IIOE-1': wd+'run.ClBr.Iodide2016.IIOE-1', 'SOE-9': wd+'run.ClBr.Iodide2017.SOE-9', } # Test surface output if just_process_surface_data: extra_str = 'surface' dfs = {} for key_ in files_dict.keys(): wd = files_dict[key_]+'/plane_flight_logs_{}/'.format(extra_str) df = process_planeflight_files(wd=wd) dfs[key_] = df get_test_plots_surface_pf_output(df=df, name='{} ({})'.format(key_, extra_str)) # Save the output as .csv for key_ in dfs.keys(): savetitle = 'GC_planeflight_compiled_output_for_{}_{}.csv' savetitle = savetitle.format(key_, extra_str) savetitle = AC.rm_spaces_and_chars_from_str(savetitle) dfs[key_].to_csv(savetitle) # - Process the output files for column values else: specs = ['O3', 'BrO', 'IO', 'CH2O'] extra_str = 'column' dfs = {} file_str = 'GC_planeflight_compiled_output_for_{}_{}_II.csv' for key_ in files_dict.keys(): # for key_ in ['IIOE-1']: print(key_) pf_wd = files_dict[key_]+'/plane_flight_logs_{}/'.format(extra_str) df = process_planeflight_files(wd=pf_wd) # now process to column values df = process_planeflight_column_files(wd=files_dict[key_], df=df) dfs[key_] = df # Save the output as .csv savetitle = file_str.format(key_, extra_str) df['datetime'] = df.index df.to_csv(AC.rm_spaces_and_chars_from_str(savetitle)) # Test plots? for key_ in files_dict.keys(): savetitle = file_str.format(key_, extra_str) df = pd.read_csv(AC.rm_spaces_and_chars_from_str(savetitle)) df.index = pd.to_datetime(df['datetime']) get_test_plots_surface_pf_output(df=df, name='{} ({})'.format( key_, extra_str), specs=specs, units='molec cm$^{-2}$', scale=1) def process_planeflight_column_files(wd=None, df=None, res='4x5', debug=False): """ Process column of v/v values into single values for total column """ # wd=files_dict[key_]; df = dfs[ key_ ]; res='4x5' specs = ['O3', u'BrO', u'IO', u'CH2O', u'GLYX'] timestamps = list(sorted(set(df.index))) timestamps_with_duplicates = [] RMM_air = AC.constants('RMM_air') AVG = AC.constants('AVG') specs = ['O3', 'BrO', 'IO', 'CH2O'] # get lon lat array of time in troposphere TPS = AC.get_GC_output(wd=wd+'/', vars=['TIME_TPS__TIMETROP'], trop_limit=True) # convert this to boolean (<1 == not strat) TPS[TPS != 1] = 9999.9 TPS[TPS == 1] = False TPS[TPS == 9999.9] = True # And dates CTM_DATES = AC.get_gc_datetime(wd=wd+'/') CTM_months = np.array([i.month for i in CTM_DATES]) # a EPOCH = datetime.datetime(1970,1,1) # CTM_EPOCH = np.array([ (i.month-EPOCH).total_seconds() for i in CTM_DATES ]) # Also get grid of surface area ( m^2 ) and convert to cm2 S_AREA = AC.get_surface_area(res=res) * 10000 A_M = AC.get_GC_output(wd, vars=['BXHGHT_S__AD'], trop_limit=True, dtype=np.float64) # VOL = AC.get_volume_np( wd=wd, res=res, s_area=S_AREA[...,None]) big_data_l = [] dates = [] # for ts in timestamps[::1000]: # Test processing on first 1000 points n_timestamps = len(timestamps) for n_ts, ts in enumerate(timestamps): print('progress= {:.3f} %'.format((float(n_ts) / n_timestamps)*100.)) tmp_df = df.loc[df.index == ts] if debug: print(ts, tmp_df.shape) # List of pressures (one set = 47 ) PRESS_ = tmp_df['PRESS'].values # special condition for where there is more than column set # for a timestamp # assert( len(PRESS) == 47 ) if len(PRESS_) != 47: timestamps_with_duplicates += [ts] prt_str = 'WARNING: DOUBLE UP IN TIMESTEP:{} ({}, shape={})' print(prt_str.format(ts, len(PRESS_), tmp_df.shape)) print('Just using 1st 47 values') tmp_df = tmp_df[0:47] dates += [ts] else: dates += [ts] # Now reverse data (as outputted from highest to lowest) tmp_df = tmp_df.loc[::-1] # select everyother value? # lon select locations LAT_ = tmp_df['LAT'].values LON_ = tmp_df['LON'].values # check there is only one lat and lon assert len(set(LAT_)) == 1 assert len(set(LON_)) == 1 # - Select 3D vars from ctm.nc file # get LON, LAT index of box LON_ind = AC.get_gc_lon(LON_[0], res=res) LAT_ind = AC.get_gc_lat(LAT_[0], res=res) # time_ind = AC.find_nearest( CTM_EPOCH, (ts-EPOCH).total_seconds() ) time_ind = AC.find_nearest(CTM_months, ts.month) # tropspause height? ('TIME_TPS__TIMETROP) TPS_ = TPS[LON_ind, LAT_ind, :, time_ind] # Select surface area of grid box S_AREA_ = S_AREA[LON_ind, LAT_ind, 0] # comput column by spec A_M_ = A_M[LON_ind, LAT_ind, :, time_ind] # Number of molecules per grid box MOLECS_ = (((A_M_*1E3) / RMM_air) * AVG) # Extract for species data_l = [] for spec in specs: # Get species in v/v data_ = tmp_df[spec].values # Mask for troposphere data_ = np.ma.array(data_[:38], mask=TPS_) # Get number of molecules data_ = (data_ * MOLECS_).sum() # Convert to molecs/cm2 data_ = data_ / S_AREA_ # Store data data_l += [data_] # Save location data_l += [LON_[0], LAT_[0]] # Save data for all specs big_data_l += [data_l] # Convert to DataFrame. df_col = pd.DataFrame(big_data_l) df_col.index = dates # timestamps[::1000] df_col.columns = specs + ['LON', 'LAT'] print(df_col.shape) return df_col def process_planeflight_files(wd=None): """ Process planeflight files to pd.DataFrame """ import glob import seaborn as sns sns.set_context("paper", font_scale=0.75) # Get planeflight data files = glob.glob(wd+'plane.log.*') print(wd, len(files), files[0]) names, POINTS = AC.get_pf_headers(files[0]) dfs = [AC.pf_csv2pandas(file=i, vars=names) for i in files] df = pd.concat(dfs) # Rename axis TRA_XXs = [i for i in df.columns if ('TRA_' in i)] TRA_dict = dict( zip(TRA_XXs, [v10_ClBrI_TRA_XX_2_name(i) for i in TRA_XXs])) df.rename(columns=TRA_dict, inplace=True) return df def get_test_plots_surface_pf_output(wd=None, name='Planeflight', df=None, specs=None, units=None, scale=1, show_plot=False): """ Test model output at surface for Indian sgip cruises """ import seaborn as sns sns.set(color_codes=True) # Get data if isinstance(df, type(None)): df = process_planeflight_files(wd=wd, name=name) # Now add summary plots dpi = 320 savetitle = 'GC_planeflight_summary_plots_for_{}_V'.format(name) savetitle = AC.rm_spaces_and_chars_from_str(savetitle) pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi, no_dstr=True) # Locations outputted for? title = 'Locations of {} output'.format(name) fig, ax = plt.subplots() AC.plot_lons_lats_spatial_on_map(title=title, f_size=15, lons=df['LON'].values, lats=df['LAT'].values, fig=fig, ax=ax) AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=True) if show_plot: plt.show() # Timeseries of key species if isinstance(specs, type(None)): key_spec = ['O3', 'NO', 'NO2', 'OH', 'HO2', 'IO', 'BrO'] extras = ['SO4', 'DMS', 'CH2O', ] species = ['OH', 'HO2', 'GLYX'] specs = key_spec + extras + species specs += ['LON', 'LAT'] met = ['GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND'] specs += met print(specs) for spec in specs: fig, ax = plt.subplots() if isinstance(units, type(None)): units, scale = AC.tra_unit(spec, scale=True) try: spec_LaTeX = AC.latex_spec_name(spec) except: spec_LaTeX = spec print(spec, units, spec_LaTeX, scale) dates = pd.to_datetime(df.index).values plt.plot(dates, df[spec].values*scale) plt.ylabel('{} ({})'.format(spec, units)) title_str = "Timeseries of modelled '{}' during {}" plt.title(title_str.format(spec_LaTeX, name)) plt.xticks(rotation=45) plt.subplots_adjust(bottom=0.15) AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=True) if show_plot: plt.show() plt.close() # Save entire pdf AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi, no_dstr=True) def mk_data_files4Indian_seasurface_paper(res='0.125x0.125'): """ Make data files for the indian ocean surface iodide paper """ AreasOfInterest = { 'SubT_NA': ('NASW', 'NATR', 'NASE', ), 'SubT_SA': ('SATL',), 'SubT_NP': (u'NPSW', 'NPTG'), 'SubT_SP': ('SPSG',), 'SubT_SI': ('ISSG',), } AreasOfInterest_Names = AreasOfInterest.copy() # Get dictionaries of province numbers and names num2prov = LonghurstProvinceFileNum2Province( None, invert=True, rtn_dict=True) MRnum2prov = MarineRegionsOrg_LonghurstProvinceFileNum2Province( None, invert=True, rtn_dict=True) Rnum2prov = RosieLonghurstProvinceFileNum2Province( None, invert=True, rtn_dict=True) # Convert regions to the LP numbers PrtStr = "{} = Requested province: {} - R's #={}, MIT(GitHub) #={}, LH(2010) #={}" for key_ in AreasOfInterest.keys(): for a_ in AreasOfInterest[key_]: print(PrtStr.format( key_, a_, Rnum2prov[a_], num2prov[a_], MRnum2prov[a_])) nums = [MRnum2prov[i] for i in AreasOfInterest[key_]] AreasOfInterest[key_] = nums # - Get data all together Filename = 'Oi_prj_predicted_iodide_0.125x0.125_No_Skagerrak_WITH_Provinces.nc' # folder = '/work/home/ts551/data/iodide/' folder = './' ds = xr.open_dataset(folder + Filename) params = ['Chance2014_STTxx2_I', 'MacDonald2014_iodide', 'Ensemble_Monthly_mean'] vars2use = params + ['LonghurstProvince'] ds = ds[vars2use] # Also add the features of interest Filename = 'Oi_prj_feature_variables_0.125x0.125_WITH_Provinces.nc' ds2 = xr.open_dataset(folder + Filename) vars2add = ['WOA_MLDpt', 'WOA_Nitrate', 'WOA_TEMP', 'WOA_Salinity'] for var in vars2add: ds[var] = ds2[var] # Add axis X/Y assignment attrs = ds['lat'].attrs attrs["axis"] = 'Y' ds['lat'].attrs = attrs attrs = ds['lon'].attrs attrs["axis"] = 'X' ds['lon'].attrs = attrs # - Now extract the data and check the locations being extracted # Make files with the data of interest. file_str = 'Oi_OS_Longhurst_provinces_{}_{}_{}.{}' for key_ in AreasOfInterest.keys(): nums = AreasOfInterest[key_] ds_tmp = ds.where(np.isin(ds.LonghurstProvince.values, nums)) # - Plot a diagnostic figure fig, ax = plt.subplots() ds_tmp['LonghurstProvince'].mean(dim='time').plot(ax=ax) #get names and numbers of assigned areas Names = AreasOfInterest_Names[key_] nums = [str(i) for i in AreasOfInterest[key_]] # Add a title nums = [str(i) for i in nums] title = "For '{}' ({}), \n plotting #(s)={}" title = title.format(key_, ', '.join(Names), ', '.join(nums)) plt.title(title) # Save to png png_filename = file_str.format(key_, '', res, 'png') plt.savefig(png_filename, dpi=dpi) plt.close() # - What is the area extent of the data var2use = 'WOA_Nitrate' ds_lat = ds_tmp[var2use].dropna(dim='lat', how='all') min_lat = ds_lat['lat'].min() - 2 max_lat = ds_lat['lat'].max() + 2 ds_lon = ds_tmp[var2use].dropna(dim='lon', how='all') min_lon = ds_lon['lon'].min() - 2 max_lon = ds_lon['lon'].max() + 2 # - Now save by species vars2save = [i for i in ds_tmp.data_vars if i != 'LonghurstProvince'] for var_ in vars2save: print(var_) da = ds_tmp[var_] # select the minimum area for the areas da = da.sel(lat=(da.lat >= min_lat)) da = da.sel(lat=(da.lat < max_lat)) if key_ in ('SubT_NP' 'SubT_SP'): print('just limiting lat for: {}'.format(key_)) else: da = da.sel(lon=(da.lon >= min_lon)) da = da.sel(lon=(da.lon < max_lon)) # Save the data to NetCDF. filename = file_str.format(key_, var_, res, '') filename = AC.rm_spaces_and_chars_from_str(filename) da.to_netcdf(filename+'.nc') # --------------------------------------------------------------------------- # --------------- Functions for Atmospheric impacts work ------------------- # --------------------------------------------------------------------------- def Do_analysis_and_mk_plots_for_EGU19_poster(): """ Driver function for analysis and plotting for EGU poster """ # - Get data # data locations and names as a dictionary wds = get_run_dict4EGU_runs() runs = list(sorted(wds.keys())) # Get emissions dsDH = GetEmissionsFromHEMCONetCDFsAsDatasets(wds=wds) # Process the datasets? # a = [ AC.get_O3_burden( wd=wds[i] ) for i in runs ] # Get datasets objects from directories and in a dictionary dsD = {} for run in runs: ds = xr.open_dataset(wds[run]+'ctm.nc') dsD[run] = ds # - Do analysis # Get summary emission stats Check_global_statistics_on_emissions(dsDH=dsDH) # Look at differences in surface concentration. extra_str = 'EGU_runs_surface_Iy_stats_' df = evalulate_burdens_and_surface_conc(run_dict=wds, extra_str=extra_str) # Get general statistics about the emissions vs. Macdoanld et al 2014 REF1 = 'Macdonald2014' extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1) df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1, extra_str=extra_str) # Get general statistics about the emissions vs. Macdoanld et al 2014 REF1 = 'Chance2014' extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1) df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1, extra_str=extra_str) # Get general statistics about the emissions vs. Macdoanld et al 2014 REF1 = 'ML_Iodide' extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1) df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1, extra_str=extra_str) # Get general statistics about the emissions vs. Macdoanld et al 2014 REF1 = 'No_HOI_I2' extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1) df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1, extra_str=extra_str) # - Get spatial plots # plot up emissions plot_up_surface_emissions(dsDH=dsDH) # - Do diferences plots # - look at the HOI/I2 surface values and IO. # species to look at? specs = ['O3', 'NO2', 'IO', 'HOI', 'I2'] # Chance vs. ML_iodide AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Chance2014', NEW='ML_Iodide', specs=specs, update_PyGChem_format2COARDS=True) # Macdonald vs. ML_iodide AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014', NEW='ML_Iodide', specs=specs, update_PyGChem_format2COARDS=True) # Macdonald vs. Chance AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014', NEW='Chance2014', specs=specs, update_PyGChem_format2COARDS=True) # Macdonald vs. No_HOI_I2 AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014', NEW='No_HOI_I2', specs=specs, update_PyGChem_format2COARDS=True) # ML_iodide vs. No_HOI_I2 AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='No_HOI_I2', NEW='ML_Iodide', specs=specs, update_PyGChem_format2COARDS=True) # ds_dict=dsD.copy(); BASE='Macdonald2014'; NEW='ML_Iodide' # - Get production figures. # surface ozone figure - made in powerpoint for now... # Plot up emissions for EGU presentation BASE = 'ML_Iodide' DIFF1 = 'Chance2014' DIFF2 = 'Macdonald2014' plot_up_EGU_fig05_emiss_change(ds_dict=dsD, BASE=BASE, DIFF1=DIFF1, DIFF2=DIFF2, update_PyGChem_format2COARDS=True) def plot_up_EGU_fig05_emiss_change(ds_dict=None, levs=[1], specs=[], BASE='', DIFF1='', DIFF2='', prefix='IJ_AVG_S__', update_PyGChem_format2COARDS=False): """ Plot up the change in emissions for EGU poster """ import cartopy.crs as ccrs import matplotlib.pyplot as plt # Species to plot vars2use = [prefix+i for i in specs] unit = None PDFfilenameStr = 'Oi_surface_change_{}_vs_{}_lev_{:0>2}' # Set datasets to use and Just include the variables to plot in the dataset title1 = BASE title2 = DIFF1 title2 = DIFF2 ds1 = ds_dict[BASE][vars2use].copy() ds2 = ds_dict[DIFF1][vars2use].copy() ds2 = ds_dict[DIFF2][vars2use].copy() # Average over time print(ds1, ds2, ds3) ds1 = ds1.mean(dim='time') ds2 = ds2.mean(dim='time') ds3 = ds3.mean(dim='time') # Remove vestigial coordinates. # (e.g. the time_0 coord... what is this?) vars2drop = ['time_0'] dsL = [ds1, ds2, ds3] for var2drop in vars2drop: for n, ds in enumerate(dsL): CoordVars = [i for i in ds.coords] if var2drop in CoordVars: ds = ds.drop(var2drop) dsL[n] = ds ds1, ds2, ds3 = dsL # Update dimension names if update_PyGChem_format2COARDS: ds1 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds1) ds2 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds2) ds3 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds3) # Setup plot # plot up map with mask present fig = plt.figure(figsize=(10, 6)) vmin = -100 vmax = 100 # Add initial plot axn = [1, 1, 1] ax = fig.add_subplot(*axn, projection=ccrs.Robinson(), aspect='auto') ax.plot.imshow(x='lon', y='lat', ax=ax, vmin=vmin, vmax=vmax, transform=ccrs.PlateCarree()) plt.title(savename) plt.savefig(savename+'.png') plt.close() def evalulate_burdens_and_surface_conc(run_dict=None, extra_str='', REF1=None, REF2=None, REF_wd=None, res='4x5', trop_limit=True, save2csv=True, prefix='GC_', run_names=None, debug=False): """ Check general statistics on the CTM model runs """ # Extract names and locations of data if isinstance(run_dict, type(None)): run_dict = get_run_dict4EGU_runs() if isinstance(run_names, type(None)): run_names = sorted(run_dict.keys()) wds = [run_dict[i] for i in run_names] # Mass unit scaling mass_scale = 1E3 mass_unit = 'Tg' # v/v scaling? ppbv_unit = 'ppbv' ppbv_scale = 1E9 pptv_unit = 'pptv' pptv_scale = 1E12 # Get shared variables from a single model run if isinstance(REF_wd, type(None)): REF_wd = wds[0] # get time in the troposphere diagnostic t_p = AC.get_GC_output(wd=REF_wd, vars=[u'TIME_TPS__TIMETROP'], trop_limit=True) # Temperature K = AC.get_GC_output(wd=REF_wd, vars=[u'DAO_3D_S__TMPU'], trop_limit=True) # airmass a_m = AC.get_air_mass_np(wd=REF_wd, trop_limit=True) # Surface area? s_area = AC.get_surface_area(res)[..., 0] # m2 land map # ---- # - Now build analysis in pd.DataFrame # # - Tropospheric burdens? # Get tropospheric burden for run varname = 'O3 burden ({})'.format(mass_unit) ars = [AC.get_O3_burden(i, t_p=t_p).sum() for i in wds] df = pd.DataFrame(ars, columns=[varname], index=run_names) # Get NO2 burden NO2_varname = 'NO2 burden ({})'.format(mass_unit) ars = [AC.get_trop_burden(spec='NO2', t_p=t_p, wd=i, all_data=False).sum() for i in wds] # convert to N equivalent ars = [i/AC.species_mass('NO2')*AC.species_mass('N') for i in ars] df[NO2_varname] = ars # Get NO burden NO_varname = 'NO burden ({})'.format(mass_unit) ars = [AC.get_trop_burden(spec='NO', t_p=t_p, wd=i, all_data=False).sum() for i in wds] # convert to N equivalent ars = [i/AC.species_mass('NO')*AC.species_mass('N') for i in ars] df[NO_varname] = ars # Combine NO and NO2 to get NOx burden NOx_varname = 'NOx burden ({})'.format(mass_unit) df[NOx_varname] = df[NO2_varname] + df[NO_varname] # Get HOI burden varname = 'HOI burden ({})'.format(mass_unit) ars = [AC.get_trop_burden(spec='HOI', t_p=t_p, wd=i, all_data=False).sum() for i in wds] # convert to I equivalent ars = [i/AC.species_mass('HOI')*AC.species_mass('I') for i in ars] df[varname] = ars # Get I2 burden varname = 'I2 burden ({})'.format(mass_unit) ars = [AC.get_trop_burden(spec='I2', t_p=t_p, wd=i, all_data=False).sum() for i in wds] # convert to I equivalent ars = [i/AC.species_mass('I2')*AC.species_mass('I') for i in ars] df[varname] = ars # Get I2 burden varname = 'IO burden ({})'.format(mass_unit) ars = [AC.get_trop_burden(spec='IO', t_p=t_p, wd=i, all_data=False).sum() for i in wds] # convert to I equivalent ars = [i/AC.species_mass('IO')*AC.species_mass('I') for i in ars] df[varname] = ars # Scale units for col_ in df.columns: if 'Tg' in col_: df.loc[:, col_] = df.loc[:, col_].values/mass_scale # - Surface concentrations? # Surface ozone O3_sur_varname = 'O3 surface ({})'.format(ppbv_unit) ars = [AC.get_avg_surface_conc_of_X(spec='O3', wd=i, s_area=s_area) for i in wds] df[O3_sur_varname] = ars # Surface NOx NO_sur_varname = 'NO surface ({})'.format(ppbv_unit) ars = [AC.get_avg_surface_conc_of_X(spec='NO', wd=i, s_area=s_area) for i in wds] df[NO_sur_varname] = ars NO2_sur_varname = 'NO2 surface ({})'.format(ppbv_unit) ars = [AC.get_avg_surface_conc_of_X(spec='NO2', wd=i, s_area=s_area) for i in wds] df[NO2_sur_varname] = ars NOx_sur_varname = 'NOx surface ({})'.format(ppbv_unit) df[NOx_sur_varname] = df[NO2_sur_varname] + df[NO_sur_varname] # Surface HOI HOI_sur_varname = 'HOI surface ({})'.format(pptv_unit) ars = [AC.get_avg_surface_conc_of_X(spec='HOI', wd=i, s_area=s_area) for i in wds] df[HOI_sur_varname] = ars # Surface I2 I2_sur_varname = 'I2 surface ({})'.format(pptv_unit) ars = [AC.get_avg_surface_conc_of_X(spec='I2', wd=i, s_area=s_area) for i in wds] df[I2_sur_varname] = ars # Surface I2 I2_sur_varname = 'IO surface ({})'.format(pptv_unit) ars = [AC.get_avg_surface_conc_of_X(spec='IO', wd=i, s_area=s_area) for i in wds] df[I2_sur_varname] = ars # - Scale units for col_ in df.columns: if 'ppbv' in col_: df.loc[:, col_] = df.loc[:, col_].values*ppbv_scale if 'pptv' in col_: df.loc[:, col_] = df.loc[:, col_].values*pptv_scale # - Processing and save? # Calculate % change from base case for each variable if not isinstance(REF1, type(None)): for col_ in df.columns: pcent_var = col_+' (% vs. {})'.format(REF1) df[pcent_var] = (df[col_]-df[col_][REF1]) / df[col_][REF1] * 100 if not isinstance(REF2, type(None)): for col_ in df.columns: pcent_var = col_+' (% vs. {})'.format(REF2) df[pcent_var] = (df[col_]-df[col_][REF2]) / df[col_][REF2] * 100 # Re-order columns df = df.reindex_axis(sorted(df.columns), axis=1) # Reorder index df = df.T.reindex_axis(sorted(df.T.columns), axis=1).T # Now round the numbers df = df.round(3) # Save csv to disk csv_filename = '{}_summary_statistics{}.csv'.format(prefix, extra_str) df.to_csv(csv_filename) # return the DataFrame too return df def Check_sensitivity_of_HOI_I2_param2WS(): """ Check the sensitivity of the Carpenter et al 2013 parameterisation to wind speed """ import seaborn as sns sns.set(color_codes=True) sns.set_context("paper", font_scale=1.75) import matplotlib.pyplot as plt # Core calculation for HOI emission def calc_HOI_flux_eqn_20(I=None, O3=None, WS=None, ): """ Eqn 20 from Carpenter et al 2013 """ return O3 * ((4.15E5 * (np.sqrt(I) / WS)) - (20.6 / WS) - (2.36E4 * np.sqrt(I))) # Slightly simpler calculation for HOI emission def calc_HOI_flux_eqn_21(I=None, O3=None, WS=None, ): """ Eqn 21 from Carpenter et al 2013 """ return O3 * np.sqrt(I) * ((3.56E5/WS) - 2.16E4) # Plot up values for windspeed WS_l = np.arange(5, 40, 0.1) # - plot up # Eqn 20 Y = [calc_HOI_flux_eqn_20(I=100E-9, O3=20, WS=i) for i in WS_l] plt.plot(WS_l, Y, label='Eqn 20') # Eqn 21 Y = [calc_HOI_flux_eqn_21(I=100E-9, O3=20, WS=i) for i in WS_l] plt.plot(WS_l, Y, label='Eqn 21') # Update aesthetics of plot and save plt.title('Flu HOI vs. wind speed') plt.ylabel('HOI flux, nmol m$^{-2}$ d$^{-1}$') plt.xlabel('Wind speed (ms)') plt.legend() plt.show() if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 198, 1212, 8265, 4909, 3781, 1760, 329, 262, 10692, 41080, 485, 357, 46, 72, 8133, 1628, 198, 198, 1212, 3407, 1...
2.032489
71,194
import pandas as pd from . import cashUtils as utils
[ 11748, 19798, 292, 355, 279, 67, 198, 6738, 764, 1330, 5003, 18274, 4487, 355, 3384, 4487, 628, 628 ]
3.111111
18
from distutils.core import Extension, setup from Cython.Build import cythonize from Cython.Compiler import Options Options.docstrings = False ext = Extension(name="cyt_module", sources=["cyt_module.pyx"]) setup( ext_modules = cythonize(ext), )
[ 6738, 1233, 26791, 13, 7295, 1330, 27995, 11, 9058, 198, 198, 6738, 327, 7535, 13, 15580, 1330, 3075, 400, 261, 1096, 198, 6738, 327, 7535, 13, 7293, 5329, 1330, 18634, 198, 198, 29046, 13, 15390, 37336, 796, 10352, 198, 198, 2302, 79...
2.976471
85
from flask import request, url_for from flask_api import FlaskAPI, status, exceptions from flask_cors import CORS, cross_origin import torch import json import numpy as np import torch from modeling_gptneo import GPTNeoForCausalLM from modeling_gpt2 import GPT2LMHeadModel from transformers import ( GPTNeoConfig, GPT2Config, GPT2Tokenizer ) import transformers from nltk import sent_tokenize import nltk nltk.download('punkt') ### Loading the model code_desired = "true" code_undesired = "false" model_type = 'gpt2' gen_type = "gedi" gen_model_name_or_path = "EleutherAI/gpt-neo-2.7B" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") MODEL_CLASSES = {"gptneo": (GPTNeoConfig, GPTNeoForCausalLM, GPT2Tokenizer), "gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),} config_class_n, model_class_n, tokenizer_class_n = MODEL_CLASSES["gptneo"] config_class_2, model_class_2, tokenizer_class_2 = MODEL_CLASSES["gpt2"] tokenizer = tokenizer_class_n.from_pretrained('EleutherAI/gpt-neo-2.7B', do_lower_case=False, additional_special_tokens=['[Prompt]']) model = model_class_n.from_pretrained(gen_model_name_or_path, load_in_half_prec=True) model = model.to(device) model = model.float() model.config.use_cache=True model.resize_token_embeddings(len(tokenizer)) gedi_model_name_or_path = 'fortune_gedi' gedi_model = model_class_2.from_pretrained(gedi_model_name_or_path) gedi_model.to(device) gedi_model.resize_token_embeddings(len(tokenizer)) gedi_model.resize_token_embeddings(50258) wte = gedi_model.get_input_embeddings() wte.weight.requires_grad=False wte.weight[len(tokenizer)-1, :]= wte.weight[len(tokenizer)-2, :] gedi_model.set_input_embeddings(wte) embed_cont = torch.load('./result_embedding_cont') embed_infill_front = torch.load('./result_embedding_infill_front') embed_infill_back = torch.load('./result_embedding_infill_back') embed_recognition = torch.load('./result_embedding_recognition') recognition_score = torch.load('./recog_score') model.set_input_embeddings(embed_cont.wte) # setting arguments for generation #max generation length gen_length = 40 #omega from paper, higher disc_weight means more aggressive topic steering disc_weight = 30 #1 - rho from paper, should be between 0 and 1 higher filter_p means more aggressive topic steering filter_p = 0.8 #tau from paper, preserves tokens that are classified as correct topic target_p = 0.8 #hyperparameter that determines class prior, set to uniform by default class_bias = 0 if gen_length>1024: length = 1024 else: length = gen_length def cut_into_sentences(text, do_cleanup=True): """ Cut text into sentences. \n are also regarded as a sentence. :param do_cleanup: if True, do cleanups. :param text: input text. :return: sentences. """ all_sentences = [] # print(text) # sentences_raw = text.split("\n") text = text.replace("[Prompt] [Prompt] [Prompt] [Prompt] ", "[Prompt] [Prompt] [Prompt] ") sentences_raw = text.split('[Prompt] [Prompt] [Prompt]') text = sentences_raw[len(sentences_raw)-1] text = text.replace("Start:", " ") text = text.replace("Characters:", " ") text = text.replace("Story after start:", " ") sentences_raw = [text.replace("\n", " ")] result = [] for item in sentences_raw: sentence_in_item = sent_tokenize(item) for item2 in sentence_in_item: all_sentences.append(item2.strip()) if do_cleanup: for item in all_sentences: item = item.replace('<|endoftext|>', '') if len(item) > 2: result.append(item) else: result = all_sentences return result def generate_one_sentence(sentence, control, length=50, disc_weight=30, temperature=0.8, gpt3_id=None): """ Generate one sentence based on input data. :param sentence: (string) context (prompt) used. :param topic: (dict) {topic: weight, topic:weight,...} topic that the sentence need to steer towards. :param extra_args: (dict) a dictionary that certain key will trigger additional functionality. disc_weight: Set this value to use a different control strength than default. get_gen_token_count: Return only how many tokens the generator has generated (for debug only). :return: sentence generated, or others if extra_args are specified. """ secondary_code = control if sentence == "": print("Prompt is empty! Using a dummy sentence.") sentence = "." # Specify prompt below prompt = sentence # Calculate oroginal input length. length_of_prompt = len(sentence) start_len = 0 text_ids = tokenizer.encode(prompt) length_of_prompt_in_tokens = len(text_ids) # print('text ids', text_ids) encoded_prompts = torch.LongTensor(text_ids).unsqueeze(0).to(device) if type(control) is str: multi_code = tokenizer.encode(secondary_code) elif type(control) is dict: multi_code = {} for item in secondary_code: encoded = tokenizer.encode(item)[0] # only take the first one multi_code[encoded] = secondary_code[item] else: raise NotImplementedError("topic data type of %s not supported... Supported: (str,dict)" % type(control)) # If 1, generate sentences towards a specific topic. attr_class = 1 print(multi_code) if int(control)!=-1: if gpt3_id is None: generated_sequence = model.generate(input_ids=encoded_prompts, pad_lens=None, max_length=length + length_of_prompt_in_tokens, top_k=None, top_p=None, repetition_penalty=1.2, rep_penalty_scale=10, eos_token_ids=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id, bad_token_ids = tokenizer.all_special_ids, do_sample=True, temperature = temperature, penalize_cond=True, gedi_model=gedi_model, tokenizer=tokenizer, disc_weight=disc_weight, filter_p=filter_p, target_p=target_p, class_bias=class_bias, attr_class=attr_class, code_0=code_undesired, code_1=code_desired, multi_code=multi_code, ) else: generated_sequence = model.generate(input_ids=encoded_prompts, pad_lens=None, max_length=length + length_of_prompt_in_tokens, top_k=None, top_p=None, repetition_penalty=1.2, rep_penalty_scale=10, eos_token_ids=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id, bad_token_ids = tokenizer.all_special_ids, do_sample=True, temperature = temperature, penalize_cond=True, gedi_model=gedi_model, tokenizer=tokenizer, disc_weight=disc_weight, filter_p=filter_p, target_p=target_p, class_bias=class_bias, attr_class=attr_class, code_0=code_undesired, code_1=code_desired, multi_code=multi_code, gpt3_api_key=gpt3_id, ) text = tokenizer.decode(generated_sequence.tolist()[0]) else: if gpt3_id is None: generated_sequence = model.generate(input_ids=encoded_prompts, pad_lens=None, max_length=length + length_of_prompt_in_tokens, top_k=None, top_p=None, repetition_penalty=1.2, rep_penalty_scale=10, eos_token_ids=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id, bad_token_ids = tokenizer.all_special_ids, do_sample=True, temperature = temperature, penalize_cond=True, gedi_model=None, tokenizer=tokenizer, disc_weight=disc_weight, class_bias=class_bias, attr_class=attr_class, ) text = tokenizer.decode(generated_sequence.tolist()[0]) else: import openai openai.api_key = gpt3_id completion = openai.Completion() response = completion.create(prompt=prompt, engine="curie", max_tokens=length, temperature=temperature,) text = response["choices"][0]["text"] text = cut_into_sentences(text) if len(text) == 0: print("Warning! No text generated.") return "" all_gen_text = text[0] return all_gen_text import numpy as np def continuing_generation(prompts, generation_controls, characters, temperatures, gpt3_id=None, disc_weight=30): """ Explanations on controls prompts: The prompt to be input. This is a list of sentences. generation_controls: Generation control in the list. If no control is given, -1 is given. """ model.set_input_embeddings(embed_cont) prompts = list(prompts) generated = [] character_prepend = '[Prompt][Prompt][Prompt]' for idx, character in enumerate(characters): if idx==0: character_prepend = character_prepend+character else: character_prepend = character_prepend+' '+character if idx != len(characters)-1: character_prepend = character_prepend + ',' prompt_start_idx = 0 for c_idx, generation_control in enumerate(generation_controls): temperature = temperatures[c_idx] while True: prompt_postpend = '[Prompt][Prompt][Prompt]' # prompt_postpend = 'Story: ' for i in range(prompt_start_idx, len(prompts)): prompt_postpend = prompt_postpend + prompts[i] if i != len(prompts)-1: prompt_postpend = prompt_postpend + ' ' # continue else: prompt_postpend = prompt_postpend prompt_input = prompt_postpend+character_prepend+ '[Prompt][Prompt][Prompt]' prompt_encoded = tokenizer.encode(prompt_input) length_of_prompt_in_tokens = len(prompt_encoded) if length_of_prompt_in_tokens>2048: prompt_start_idx = prompt_start_idx + 1 else: break print(prompt_input, generation_control) gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight) prompts.append(gen_sent) generated.append(gen_sent) for gen in generated: print('gen:', gen) print() return generated def infilling_generation(pre_prompts, post_prompts, generation_controls, characters, temperatures, is_front, gpt3_id=None, disc_weight=30): """ Explanations on controls prompts: The prompt to be input. This is a list of sentences. generation_controls: Generation control in the list. If no control is given, -1 is given. """ pre_prompts = list(pre_prompts) post_prompts = list(post_prompts) right = '' for idx, pp in enumerate(post_prompts): right = right + pp if idx!=len(post_prompts)-1: right = right + ' ' left = '' for idx, pp in enumerate(pre_prompts): left = left + pp if idx!=len(post_prompts)-1: left = left + ' ' generated = ['']*len(generation_controls) # gen_counter = 0 for gen_counter in range(len(generation_controls)): if is_front: generation_control = generation_controls[int(gen_counter/2)] temperature = temperatures[int(gen_counter/2)] model.set_input_embeddings(embed_infill_front) prompt_input = '[Prompt][Prompt][Prompt]'+right+'[Prompt][Prompt][Prompt]'+left+'[Prompt][Prompt][Prompt][Prompt]' gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight) generated[int(gen_counter/2)] =gen_sent print(gen_sent) left = left + ' ' + gen_sent else: generation_control = generation_controls[len(generated)-1-int(gen_counter/2)] temperature = temperatures[len(generated)-1-int(gen_counter/2)] model.set_input_embeddings(embed_infill_back) prompt_input = '[Prompt][Prompt][Prompt]'+left+'[Prompt][Prompt][Prompt]'+right + '[Prompt][Prompt][Prompt][Prompt]' gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight) generated[len(generated)-1-int(gen_counter/2)] =gen_sent print(gen_sent) right = gen_sent+' '+right for gen in generated: print('gen', gen) print() return generated app = FlaskAPI(__name__) # run_with_ngrok(app) CORS(app, resources={r"/*": {"origins": "*"}}) app.config['CORS_HEADERS'] = 'Content-Type' # Below is temporary function with sentiment analysis. # Hence, it needs to be updated later. if __name__=="__main__": app.run(host='0.0.0.0', port=11080)
[ 6738, 42903, 1330, 2581, 11, 19016, 62, 1640, 198, 6738, 42903, 62, 15042, 1330, 46947, 17614, 11, 3722, 11, 13269, 198, 6738, 42903, 62, 66, 669, 1330, 327, 20673, 11, 3272, 62, 47103, 198, 11748, 28034, 198, 11748, 33918, 198, 198, ...
1.915305
8,017
""" post_bands: post_bands extract data from static-o_DS3_EBANDS.agr and it will build the kpoints length: xcoord_k from the high symmetry line and the corresponding basis for reciprocal space. b1 = 1 / a1, b2 = 1 / a2 and b3 = 1 / a3. """ import os import numpy as np import matplotlib.pyplot as plt
[ 37811, 201, 198, 7353, 62, 21397, 25, 201, 198, 220, 220, 220, 1281, 62, 21397, 7925, 1366, 422, 9037, 12, 78, 62, 5258, 18, 62, 30195, 1565, 5258, 13, 363, 81, 290, 340, 481, 1382, 201, 198, 220, 220, 220, 262, 479, 13033, 4129, ...
2.585938
128
import logging import logging.config logging.config.fileConfig('./instance/logging.conf') # create logger logger = logging.getLogger('Cognitive-API') # 'application' code ''' logger.debug('debug message') logger.info('info message') logger.warning('warn message') logger.error('error message') logger.critical('critical message') '''
[ 198, 11748, 18931, 198, 11748, 18931, 13, 11250, 198, 198, 6404, 2667, 13, 11250, 13, 7753, 16934, 7, 4458, 14, 39098, 14, 6404, 2667, 13, 10414, 11537, 198, 198, 2, 2251, 49706, 198, 6404, 1362, 796, 18931, 13, 1136, 11187, 1362, 107...
3.209524
105
import json import unittest from buter.app.services import load_from_file, detect_app_name from buter.server import docker from buter.util.Utils import unzip from config import getConfig if __name__ == '__main__': unittest.main()
[ 11748, 33918, 198, 11748, 555, 715, 395, 198, 198, 6738, 475, 263, 13, 1324, 13, 30416, 1330, 3440, 62, 6738, 62, 7753, 11, 4886, 62, 1324, 62, 3672, 198, 6738, 475, 263, 13, 15388, 1330, 36253, 198, 6738, 475, 263, 13, 22602, 13, ...
3.051282
78
# -*-coding:utf-8-*- __author__ = 'Mason' import re import zipfile z = zipfile.ZipFile('channel.zip', mode='r') number = '90052' comments = [] while True: text = z.read(number + '.txt') number = re.findall('([0-9]+)', text) print number try: number = number[0] comments.append(z.getinfo(number + '.txt').comment) except: break print ''.join(comments)
[ 2, 532, 9, 12, 66, 7656, 25, 40477, 12, 23, 12, 9, 12, 198, 834, 9800, 834, 796, 705, 44, 888, 6, 198, 198, 11748, 302, 198, 11748, 19974, 7753, 198, 198, 89, 796, 19974, 7753, 13, 41729, 8979, 10786, 17620, 13, 13344, 3256, 423...
2.30814
172
from products.product import Product from notifications.notification import Notification from clients.client import Client
[ 6738, 3186, 13, 11167, 1330, 8721, 198, 6738, 19605, 13, 1662, 2649, 1330, 42808, 198, 6738, 7534, 13, 16366, 1330, 20985, 628 ]
5.636364
22
# https://atcoder.jp/contests/joi2008yo/tasks/joi2008yo_e R, C = list(map(int, input().split())) senbei_pos = [] ans = 0 for _ in range(R): pos = list(map(int, input().split())) senbei_pos.append(pos) for bit in range(2**R): total = 0 copied_pos = senbei_pos[:] # R101020 flip_row_pos = list(format(bit, '010b')) for j in range(C): column = [p[j] for p in copied_pos] one_count = sum([column[k] ^ int(flip_row_pos[10 - R + k]) for k in range(R)]) zero_count = R - one_count total += max(zero_count, one_count) ans = max(ans, total) print(ans)
[ 2, 3740, 1378, 265, 66, 12342, 13, 34523, 14, 3642, 3558, 14, 7639, 72, 11528, 8226, 14, 83, 6791, 14, 7639, 72, 11528, 8226, 62, 68, 198, 49, 11, 327, 796, 1351, 7, 8899, 7, 600, 11, 5128, 22446, 35312, 3419, 4008, 198, 6248, 1...
2.041935
310
#!/usr/env python3 import requests import os import glob import telegram from time import sleep token = "token" bot = telegram.Bot(token=token) # , bash youtube-dl -x --audio-format mp3 <link>, mp3 mp3_bot = BotHandler(token) if __name__ == '__main__': try: main() except KeyboardInterrupt: exit()
[ 2, 48443, 14629, 14, 24330, 21015, 18, 198, 11748, 7007, 198, 11748, 28686, 198, 11748, 15095, 198, 11748, 573, 30536, 198, 6738, 640, 1330, 3993, 198, 30001, 796, 366, 30001, 1, 198, 13645, 796, 573, 30536, 13, 20630, 7, 30001, 28, 3...
2.435714
140
#!/usr/bin/python # ex:set fileencoding=utf-8: from __future__ import unicode_literals from djangobmf.views import ModuleCreateView from djangobmf.views import ModuleUpdateView from djangobmf.views import ModuleDetailView from .forms import BMFTeamUpdateForm from .forms import BMFTeamCreateForm
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 409, 25, 2617, 2393, 12685, 7656, 28, 40477, 12, 23, 25, 198, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 42625, 648, 672, 76, 69, 13, 33571, 1330, ...
3.145833
96
from __future__ import division, print_function import sys, os, glob, time, warnings, gc # import matplotlib.pyplot as plt import numpy as np from astropy.table import Table, vstack, hstack import fitsio from astropy.io import fits from scipy.interpolate import interp1d output_path = '/global/cfs/cdirs/desi/users/rongpu/desi_mask/w1_bright-2mass-lrg_mask_v1.fits' # WISE mask w1_mags = [0, 0.5, 1, 1.5, 2, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0] w1_radii = [600, 600, 550, 500, 475, 425, 400, 400, 390, 392.5, 395, 370, 360, 330, 275, 240, 210, 165, 100, 75, 60] w1_max_mag = 10.0 f_radius = interp1d(w1_mags, w1_radii, bounds_error=False, fill_value='extrapolate') wise_path = '/global/cfs/cdirs/desi/users/rongpu/desi_mask/w1_bright-2mass-13.3-dr9.fits' wise = Table(fitsio.read(wise_path)) # print(len(wise)) wise['w1ab'] = np.array(wise['W1MPRO']) + 2.699 mask = wise['w1ab']<w1_max_mag wise['radius'] = 0. wise['radius'][mask] = f_radius(wise['w1ab'][mask]) wise.write(output_path)
[ 6738, 11593, 37443, 834, 1330, 7297, 11, 3601, 62, 8818, 198, 11748, 25064, 11, 28686, 11, 15095, 11, 640, 11, 14601, 11, 308, 66, 198, 2, 1330, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 1...
2.199153
472
import telepot # No criei um bot no telegram ainda, dessa forma este codigo no funciona # TODO: Criar bot no telegram e pegar chave bot = telepot.Bot("Aqui vai minha chave do Telegram") bot.message_loop(recebendoMsg) while True: pass
[ 11748, 5735, 13059, 198, 198, 2, 1400, 269, 5034, 72, 23781, 10214, 645, 573, 30536, 257, 22261, 11, 288, 21411, 1296, 64, 43577, 14873, 14031, 645, 25439, 32792, 198, 2, 16926, 46, 25, 327, 380, 283, 10214, 645, 573, 30536, 304, 613,...
2.677778
90
# Support for english (EN) language helper_commands = { "AUTHORIZE": "Usage:\n/authorize @<username>\n/authorize <user id>", "DEAUTHORIZE": "Usage:\n/deauthorize @<username>\n/deauthorize <user id>", "GIVEN": "Usage:\n/given <amount> @<username> <description>", "SPENT": "Usage:\n/spent <amount> <description>.\nPayees are all the members of the group, including the payer.", "MYID": "Usage: /myid\nshow your user id, useful if you have no username", "START": "Show the initial message", "LAST_GROUP_EXPENSES": "See the last expenses in a group. \n" "Usage:\n" " /last_expenses (show max 5 expenses)\n" " /last_expenses <n max expenses to show>", "LAST_CHARGES": "Use this command in private chat to see the last charges on your cembot account. \n" "Usage:\n" " /last_charges (show max 5 charges)\n" " /last_charges <n max charges to show>", "LAST_LOANS": "Use this command in private chat to see the last loans you did \n" "Usage:\n" " /last_loans (show max 5 loans)\n" " /last loans <n max loans to show>" } info = { "start": missing_translation("start"), "guide": missing_translation("start"), "introduced_in_group": "Hello everyone!\nI'm cembot, and I'll help you administrating your expenses!\n" "Each member of this group now should introduce yourself. " "People added after this message can avoid to introduce themselves.\n" "Do it with the command /hereIam", "each_member_introduced": missing_translation("each_member_introduced"), "person_missing": "1 person is missing.", "people_missing": " people are missing.", "transaction_succeed": "Transaction added successfully!", "authorized_confirm(user)": "User @%s has been authorized.", "deauthorized_confirm(user)": "The authorization of user @%s has been revoked.", "your_id_is(id)": "Your Telegram id is %s. You can add in Telegram settings an username and use cembot more easily.", "balance_with_other_user(user,balance)": "Your balance with the user %s is %s", "header_balance_credit": " Credits\n", "header_balance_debit": " Debits\n", "commands": missing_translation("commands"), "these_are_the_last_group_expenses": missing_translation("these_are_the_last_group_expenses"), "these_are_the_last_individual_charges": missing_translation("these_are_the_last_individual_charges"), "these_are_the_last_group_charges": missing_translation("these_are_the_last_group_charges"), "no_charges_yet": missing_translation("no_charges_yet"), "these_are_the_last_individual_loans": missing_translation("these_are_the_last_individual_loans"), "these_are_the_last_group_loans": missing_translation("these_are_the_last_group_loans") } error = { "command_unavailable_for_private": "For using this command open a private chat with @en_cembot.", "command_unavailable_for_group": "For using this command add @en_cembot in a group.", "amount_money_not_valid": "Amount of money not valid.", "waiting_for_all_users": "Someone did not present themselves yet.\n" "Present yourself with /hereIam before adding expenses.", "lack_of_authorization(user)": "The user @%s has not authorized you for charging expenses.", "user_unregistered(user)": "The user @%s that you want to add as a payee is not registered on our system", "can't_deauthorize_cause_not_authorized_yet": "You have not already authorized this user. You can't deauthorize it.", "have_authorized_yet_this_user": "You have already authorized this user.", "maybe_you_wrote_an_username_instead_id": "This is not a numeric id. If you intended to write an username write it with a @ at the beginning.", "insert_a_correct_number": "Insert a correct number and retry" } # commands private_commands = { "start": "START", "commands": "COMMANDS", "authorize": "AUTHORIZE", "revoke": "DEAUTHORIZE", "given": "GIVEN", "myid": "MYID", "balance": "BALANCE", "last_charges": "LAST_CHARGES", "last_loans": "LAST_LOANS", "guide": "GUIDE" } group_commands = { "spent": "SPENT", "spent@en_cembot": "SPENT", # version with @[language]_cembot "hereIam": "PRESENTATION", "hereIam@en_cembot": "PRESENTATION", # version with @[language]_cembot "last_expenses": "LAST_GROUP_EXPENSES", "last_expenses@en_cembot": "LAST_GROUP_EXPENSES", # version with @[language]_cembot }
[ 2, 7929, 329, 46932, 357, 1677, 8, 3303, 198, 198, 2978, 525, 62, 9503, 1746, 796, 1391, 198, 197, 1, 32, 24318, 1581, 35400, 1298, 366, 28350, 7479, 77, 14, 9800, 1096, 2488, 27, 29460, 29, 59, 77, 14, 9800, 1096, 1279, 7220, 468...
2.660725
1,683