text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import torch.optim.lr_scheduler as scheduler
import numpy as np
from vel.api import Callback, SchedulerFactory
class LadderScheduler(Callback):
""" Scheduler defined by a set of learning rates after reaching given number of iterations """
def __init__(self, optimizer, ladder, last_epoch):
self.schedule_limits = np.cumsum([x[0] for x in ladder])
self.schedule_numbers = np.array([float(x[1]) for x in ladder])
self.scheduler = scheduler.LambdaLR(optimizer, self.lambda_fn, last_epoch=last_epoch)
def lambda_fn(self, epoch_idx):
idx = np.minimum(np.searchsorted(self.schedule_limits, epoch_idx), len(self.schedule_limits) - 1)
return self.schedule_numbers[idx]
def on_epoch_begin(self, epoch_info):
self.scheduler.step(epoch=epoch_info.global_epoch_idx)
class LadderSchedulerFactory(SchedulerFactory):
""" Factory class for ladder scheduler """
def __init__(self, ladder):
self.ladder = ladder
def instantiate(self, optimizer, last_epoch=-1) -> LadderScheduler:
return LadderScheduler(optimizer, self.ladder, last_epoch)
def create(ladder):
""" Vel factory function """
return LadderSchedulerFactory(ladder)
|
{"hexsha": "0699c266722934fd86252e62b0a2b9f77ec115c6", "size": 1219, "ext": "py", "lang": "Python", "max_stars_repo_path": "vel/scheduler/ladder.py", "max_stars_repo_name": "galatolofederico/vel", "max_stars_repo_head_hexsha": "0473648cffb3f34fb784d12dbb25844ab58ffc3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 273, "max_stars_repo_stars_event_min_datetime": "2018-09-01T08:54:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T13:22:51.000Z", "max_issues_repo_path": "vel/scheduler/ladder.py", "max_issues_repo_name": "braincorp/vel", "max_issues_repo_head_hexsha": "bdf9d9eb6ed66278330e8cbece307f6e63ce53c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 47, "max_issues_repo_issues_event_min_datetime": "2018-08-17T11:27:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:26:55.000Z", "max_forks_repo_path": "vel/scheduler/ladder.py", "max_forks_repo_name": "braincorp/vel", "max_forks_repo_head_hexsha": "bdf9d9eb6ed66278330e8cbece307f6e63ce53c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 37, "max_forks_repo_forks_event_min_datetime": "2018-10-11T22:56:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-06T19:53:05.000Z", "avg_line_length": 34.8285714286, "max_line_length": 105, "alphanum_fraction": 0.7251845775, "include": true, "reason": "import numpy", "num_tokens": 262}
|
#https://codegolf.stackexchange.com/questions/10701/fastest-code-to-find-the-next-prime
import sys
import numpy as np
import tqdm
min_order = int(sys.argv[1])
max_order = int(sys.argv[2])
primes_order = int(sys.argv[3])
max_gap = int(sys.argv[4])
N_core = int(sys.argv[5])
N_order = max_order-min_order+1
N_primes = pow(10,primes_order)
# legendre symbol (a|m)
# note: returns m-1 if a is a non-residue, instead of -1
def legendre(a, m):
return pow(a, (m-1) >> 1, m)
# strong probable prime
def is_sprp(n, b=2):
d = n-1
s = 0
while d&1 == 0:
s += 1
d >>= 1
x = pow(b, d, n)
if x == 1 or x == n-1:
return True
for r in range(1, s):
x = (x * x)%n
if x == 1:
return False
elif x == n-1:
return True
return False
# lucas probable prime
# assumes D = 1 (mod 4), (D|n) = -1
def is_lucas_prp(n, D):
P = 1
Q = (1-D) >> 2
# n+1 = 2**r*s where s is odd
s = n+1
r = 0
while s&1 == 0:
r += 1
s >>= 1
# calculate the bit reversal of (odd) s
# e.g. 19 (10011) <=> 25 (11001)
t = 0
while s > 0:
if s&1:
t += 1
s -= 1
else:
t <<= 1
s >>= 1
# use the same bit reversal process to calculate the sth Lucas number
# keep track of q = Q**n as we go
U = 0
V = 2
q = 1
# mod_inv(2, n)
inv_2 = (n+1) >> 1
while t > 0:
if t&1 == 1:
# U, V of n+1
U, V = ((U + V) * inv_2)%n, ((D*U + V) * inv_2)%n
q = (q * Q)%n
t -= 1
else:
# U, V of n*2
U, V = (U * V)%n, (V * V - 2 * q)%n
q = (q * q)%n
t >>= 1
# double s until we have the 2**r*sth Lucas number
while r > 0:
U, V = (U * V)%n, (V * V - 2 * q)%n
q = (q * q)%n
r -= 1
# primality check
# if n is prime, n divides the n+1st Lucas number, given the assumptions
return U == 0
# primes less than 212
small_primes = set([
2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
73, 79, 83, 89, 97,101,103,107,109,113,
127,131,137,139,149,151,157,163,167,173,
179,181,191,193,197,199,211])
# pre-calced sieve of eratosthenes for n = 2, 3, 5, 7
indices = [
1, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83,
89, 97,101,103,107,109,113,121,127,131,
137,139,143,149,151,157,163,167,169,173,
179,181,187,191,193,197,199,209]
# distances between sieve values
offsets = [
10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6,
6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4,
2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6,
4, 2, 4, 6, 2, 6, 4, 2, 4, 2,10, 2]
max_int = 2147483647
# an 'almost certain' primality check
def is_prime(n):
if n < 212:
return n in small_primes
for p in small_primes:
if n%p == 0:
return False
# if n is a 32-bit integer, perform full trial division
if n <= max_int:
i = 211
while i*i < n:
for o in offsets:
i += o
if n%i == 0:
return False
return True
# Baillie-PSW
# this is technically a probabalistic test, but there are no known pseudoprimes
if not is_sprp(n): return False
a = 5
s = 2
while legendre(a, n) != n-1:
s = -s
a = s-a
return is_lucas_prp(n, a)
# next prime strictly larger than n
def next_prime(n):
if n < 2:
return 2
# first odd larger than n
n = (n + 1) | 1
if n < 212:
while True:
if n in small_primes:
return n
n += 2
# find our position in the sieve rotation via binary search
x = int(n%210)
s = 0
e = 47
m = 24
while m != e:
if indices[m] < x:
s = m
m = (s + e + 1) >> 1
else:
e = m
m = (s + e) >> 1
i = int(n + (indices[m] - x))
# adjust offsets
offs = offsets[m:]+offsets[:m]
while True:
for o in offs:
if is_prime(i):
return i
i += o
def prime_freq(start,store):
n = max(2,start-max_gap)
while n < start:
n = next_prime(n)
N = 0
while N < N_primes:
nextn = next_prime(n)
diff = nextn - n
n = nextn
store[diff] += 1
N += 1
# Run grid
import multiprocessing
def mp_worker(args):
order_index = args[0]
start = pow(10,order_index)
store = np.zeros(max_gap+1,dtype=int)
prime_freq(start,store)
return (store,)
def mp_handler():
pool = multiprocessing.Pool(N_core)
_input = [(order_index,) for order_index in range(min_order,max_order+1)]
result = list(tqdm.tqdm(pool.imap(mp_worker, _input), total=N_order))
return result
output = mp_handler()
gap_distribution = np.stack([output[step_index][0] for step_index in range(N_order)])
##### Output
import h5py
with h5py.File(f"primegaps_sampling_{min_order}_{max_order}.hdf5", "w") as f:
f.create_dataset("counts", data=gap_distribution, compression="gzip", compression_opts=9, chunks = True, dtype = np.uint64, fletcher32 = False, shuffle = True, scaleoffset=0)
#f.create_dataset("starts", data=np.array([pow(10,order_index) for order_index in range(min_order,max_order+1)]))
f.create_dataset("gaps", data=np.arange(max_gap+1))
#output_box = {'log_integrals':_log_integrals,'n':n,'k':k,'alpha':np.logspace(-1,4,100),'beta':np.logspace(-1,4,100)}
#save_as_pickled_object(output_box,'censored_binomial_fully_log_grid_100.p')
|
{"hexsha": "849e623e729cb6dcb0030749c7e7cd1cdb808676", "size": 5699, "ext": "py", "lang": "Python", "max_stars_repo_path": "PrimeGaps/primegaps_sampling.py", "max_stars_repo_name": "DouglasBoubert/VisualisationEveryWeek", "max_stars_repo_head_hexsha": "aed332c3ae5706f9826a6e5460986a2b3df68b76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-04-28T13:33:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T15:57:32.000Z", "max_issues_repo_path": "PrimeGaps/primegaps_sampling.py", "max_issues_repo_name": "DouglasBoubert/VisualisationEveryWeek", "max_issues_repo_head_hexsha": "aed332c3ae5706f9826a6e5460986a2b3df68b76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PrimeGaps/primegaps_sampling.py", "max_forks_repo_name": "DouglasBoubert/VisualisationEveryWeek", "max_forks_repo_head_hexsha": "aed332c3ae5706f9826a6e5460986a2b3df68b76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4419642857, "max_line_length": 178, "alphanum_fraction": 0.5255307949, "include": true, "reason": "import numpy", "num_tokens": 2063}
|
(* Title: HOL/Auth/n_g2kAbsAfter_lemma_inv__28_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_g2kAbsAfter Protocol Case Study*}
theory n_g2kAbsAfter_lemma_inv__28_on_rules imports n_g2kAbsAfter_lemma_on_inv__28
begin
section{*All lemmas on causal relation between inv__28*}
lemma lemma_inv__28_on_rules:
assumes b1: "r \<in> rules N" and b2: "(f=inv__28 )"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)\<or>
(\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)\<or>
(r=n_n_SendReqS_j1 )\<or>
(r=n_n_SendReqEI_i1 )\<or>
(r=n_n_SendReqES_i1 )\<or>
(r=n_n_RecvReq_i1 )\<or>
(r=n_n_SendInvE_i1 )\<or>
(r=n_n_SendInvS_i1 )\<or>
(r=n_n_SendInvAck_i1 )\<or>
(r=n_n_RecvInvAck_i1 )\<or>
(r=n_n_SendGntS_i1 )\<or>
(r=n_n_SendGntE_i1 )\<or>
(r=n_n_RecvGntS_i1 )\<or>
(r=n_n_RecvGntE_i1 )\<or>
(r=n_n_ASendReqIS_j1 )\<or>
(r=n_n_ASendReqSE_j1 )\<or>
(r=n_n_ASendReqEI_i1 )\<or>
(r=n_n_ASendReqES_i1 )\<or>
(r=n_n_SendReqEE_i1 )\<or>
(r=n_n_ARecvReq_i1 )\<or>
(r=n_n_ASendInvE_i1 )\<or>
(r=n_n_ASendInvS_i1 )\<or>
(r=n_n_ASendInvAck_i1 )\<or>
(r=n_n_ARecvInvAck_i1 )\<or>
(r=n_n_ASendGntS_i1 )\<or>
(r=n_n_ASendGntE_i1 )\<or>
(r=n_n_ARecvGntS_i1 )\<or>
(r=n_n_ARecvGntE_i1 )"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_Store_i1Vsinv__28) done
}
moreover {
assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_AStore_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_SendReqS_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqS_j1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_SendReqEI_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqEI_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_SendReqES_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqES_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_RecvReq_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvReq_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_SendInvE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvE_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_SendInvS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvS_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_SendInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvAck_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_RecvInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvInvAck_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_SendGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendGntS_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_SendGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendGntE_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_RecvGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvGntS_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_RecvGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvGntE_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ASendReqIS_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqIS_j1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ASendReqSE_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqSE_j1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ASendReqEI_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqEI_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ASendReqES_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqES_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_SendReqEE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqEE_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ARecvReq_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvReq_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ASendInvE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvE_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ASendInvS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvS_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ASendInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvAck_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ARecvInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvInvAck_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ASendGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendGntS_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ASendGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendGntE_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ARecvGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvGntS_i1Vsinv__28) done
}
moreover {
assume d1: "(r=n_n_ARecvGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvGntE_i1Vsinv__28) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
{"author": "lyj238Gmail", "repo": "newParaVerifier", "sha": "5c2d49bf8e6c46c60efa53c98b0ba5c577d59618", "save_path": "github-repos/isabelle/lyj238Gmail-newParaVerifier", "path": "github-repos/isabelle/lyj238Gmail-newParaVerifier/newParaVerifier-5c2d49bf8e6c46c60efa53c98b0ba5c577d59618/examples/n_g2kAbsAfter/n_g2kAbsAfter_lemma_inv__28_on_rules.thy"}
|
import csv
import pandas
import scipy.stats
class TimeDataSet:
sortTypes = ["BubbleSort", "InsertionSort", "MergeSort", "QuickSort", "SelectionSort"]
def __init__(self, fileName):
file = open(fileName)
reader = csv.DictReader(file, fieldnames=self.sortTypes)
self.sortTimes = {}
for sortType in self.sortTypes:
self.sortTimes[sortType] = [int(row[sortType]) for row in reader]
file.seek(0)
runConfigurations = ["Dynamic", "Static", "MemoryWasteDynamic", "MemoryWasteStatic"]
dataSets = {}
for fileName in runConfigurations:
dataSets[fileName] = TimeDataSet("data/" + fileName + ".csv")
outputFile = open("data/output.txt", "a")
outputFile.write("DESCRIPTIONS OF SPEEDS FOR EACH RUN CONFIGURATION\n")
for configuration in runConfigurations:
for sortType in TimeDataSet.sortTypes:
outputFile.write("Description of " + sortType + " in " + configuration + ":\n" + repr(pandas.Series(dataSets[configuration].sortTimes[sortType]).describe()) + "\n")
outputFile.write("\n\nCOMPARISON BETWEEN RUN CONFIGURATIONS UNDER EACH SORT\n")
requiredPValueForSignificance = 0.01
evaluatedConfigurationPairs = []
significantConfigurations = []
for configuration1 in runConfigurations:
for configuration2 in runConfigurations:
configurationPair = set([configuration1, configuration2])
if configuration1 == configuration2 or configurationPair in evaluatedConfigurationPairs:
continue
for sortType in TimeDataSet.sortTypes:
testStatistic, pValue = scipy.stats.ttest_ind(dataSets[configuration1].sortTimes[sortType], dataSets[configuration2].sortTimes[sortType])
evaluatedConfigurationPairs.append(configurationPair)
outputFile.write("p-Value for " + sortType + " in " + configuration1 + " vs " + configuration2 + ": " + repr(pValue) + "\n")
if pValue < requiredPValueForSignificance:
significantConfigurations.append(sortType + " in " + configuration1 + " vs " + configuration2)
outputFile.write("\n\nSIGNIFICANT SPEED DIFFERENCES\n")
for configuration in significantConfigurations:
outputFile.write(configuration + "\n")
|
{"hexsha": "ff090498557bfa499634545d7df4577aea069b92", "size": 2194, "ext": "py", "lang": "Python", "max_stars_repo_path": "EvaluateData.py", "max_stars_repo_name": "SaurabhTotey/D-Language-Array-Measurements", "max_stars_repo_head_hexsha": "cc431df411adea1912ce4cd3deac159f030753f4", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-12T15:08:05.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-12T15:08:05.000Z", "max_issues_repo_path": "EvaluateData.py", "max_issues_repo_name": "SaurabhTotey/D-Language-Array-Measurements", "max_issues_repo_head_hexsha": "cc431df411adea1912ce4cd3deac159f030753f4", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EvaluateData.py", "max_forks_repo_name": "SaurabhTotey/D-Language-Array-Measurements", "max_forks_repo_head_hexsha": "cc431df411adea1912ce4cd3deac159f030753f4", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.6956521739, "max_line_length": 172, "alphanum_fraction": 0.7142206016, "include": true, "reason": "import scipy", "num_tokens": 494}
|
"""Unit tests for the `src.milannotations.datasets` submodule."""
import csv
import shutil
from tests import conftest
from src.milannotations import datasets
import numpy
import pytest
import torch
from PIL import Image
@pytest.fixture
def top_images():
"""Return TopImages for testing."""
return datasets.TopImages(
layer='layer',
unit=0,
images=torch.rand(conftest.N_TOP_IMAGES_PER_UNIT,
*conftest.IMAGE_SHAPE),
masks=torch.randint(2,
size=conftest.TOP_IMAGES_MASKS_SHAPE,
dtype=torch.float),
)
@pytest.mark.parametrize('opacity', (0., .5, 1.))
def test_top_images_as_masked_images_tensor(top_images, opacity):
"""Test TopImages.as_masked_image_tensor returns correct shape."""
actual = top_images.as_masked_images_tensor(opacity=opacity)
assert actual.shape == (conftest.N_TOP_IMAGES_PER_UNIT,
*conftest.IMAGE_SHAPE)
@pytest.mark.parametrize('opacity', (-1, 2))
def test_top_images_as_masked_images_tensor_bad_opacity(top_images, opacity):
"""Test TopImages.as_masked_images_tensor dies on bad opacity."""
with pytest.raises(ValueError, match=f'.*{opacity}.*'):
top_images.as_masked_images_tensor(opacity=opacity)
def test_top_images_as_pil_images(top_images):
"""Test TopImages.as_pil_images returns PIL Images."""
actuals = top_images.as_pil_images()
for actual in actuals:
assert isinstance(actual, Image.Image)
@pytest.mark.parametrize('limit', (None, 2))
def test_top_images_as_pil_image_grid(top_images, limit):
"""Test TopImages.as_pil_image_grid returns a PIL Image."""
actual = top_images.as_pil_image_grid(limit=limit)
assert isinstance(actual, Image.Image)
@pytest.mark.parametrize('limit', (0, -1))
def test_top_images_as_pil_image_grid_bad_limit(top_images, limit):
"""Test TopImages.as_pil_image_grid dies on bad limit."""
with pytest.raises(ValueError, match=f'.*{limit}.*'):
top_images.as_pil_image_grid(limit=limit)
@pytest.mark.parametrize('device', (None, 'cpu', torch.device('cpu')))
def test_top_images_dataset_init(top_images_root, device):
"""Test TopImagesDataset.__init__ eagerly reads data."""
dataset = datasets.TopImagesDataset(top_images_root,
display_progress=False,
device=device)
assert dataset.root == top_images_root
assert str(top_images_root).endswith(dataset.name)
assert dataset.layers == tuple(
f'layer-{i}' for i in range(conftest.N_LAYERS))
assert dataset.device is device
assert len(dataset.samples) == conftest.N_SAMPLES
for sample in dataset.samples:
assert sample.images.dtype is torch.float
assert sample.images.min() >= 0
assert sample.images.max() <= 1
assert sample.masks.dtype is torch.float
assert sample.masks.min() >= 0
assert sample.masks.max() <= 1
def test_top_images_dataset_init_with_units_file(top_images_root):
"""Test TopImagesDataset.__init__ properly reads units file."""
layer = conftest.layer(0)
units = range(conftest.N_UNITS_PER_LAYER - 1)
layer_dir = top_images_root / layer
units_file = layer_dir / 'units.npy'
numpy.save(str(units_file), numpy.array(units))
dataset = datasets.TopImagesDataset(top_images_root,
display_progress=False)
assert dataset.root == top_images_root
assert str(top_images_root).endswith(dataset.name)
assert dataset.layers == tuple(
f'layer-{i}' for i in range(conftest.N_LAYERS))
assert len(dataset.samples) == conftest.N_SAMPLES - 1
for sample in dataset.samples:
if sample.layer == layer:
assert sample.unit != conftest.N_UNITS_PER_LAYER - 1
assert sample.images.dtype is torch.float
assert sample.images.min() >= 0
assert sample.images.max() <= 1
assert sample.masks.dtype is torch.float
assert sample.masks.min() >= 0
assert sample.masks.max() <= 1
@pytest.mark.parametrize('subpath,error_pattern', (
('', '.*root directory not found.*'),
(f'{conftest.layer(0)}/images.npy', '.*missing images.*'),
(f'{conftest.layer(0)}/masks.npy', '.*missing masks.*'),
))
def test_top_images_dataset_init_missing_files(top_images_root, subpath,
error_pattern):
"""Test TopImagesDataset.__init__ dies when files are missing."""
path = top_images_root / subpath
if path.is_dir():
shutil.rmtree(path)
else:
assert path.is_file()
path.unlink()
with pytest.raises(FileNotFoundError, match=error_pattern):
datasets.TopImagesDataset(top_images_root)
@pytest.mark.parametrize('images,masks,error_pattern', (
(
torch.rand(5, 3, 224, 224),
None,
'.*5D images.*',
),
(
None,
torch.randint(1, size=(5, 1, 224, 224), dtype=torch.uint8),
'.*5D masks.*',
),
(
torch.rand(10, 5, 3, 224, 224),
torch.randint(1, size=(8, 5, 1, 224, 224), dtype=torch.uint8),
'.*masks/images.*',
),
(
torch.rand(10, 5, 3, 224, 224),
torch.randint(1, size=(10, 4, 1, 224, 224), dtype=torch.uint8),
'.*masks/images.*',
),
(
torch.rand(10, 5, 3, 223, 224),
torch.randint(1, size=(10, 5, 1, 224, 224), dtype=torch.uint8),
'.*height/width.*',
),
(
torch.rand(10, 5, 3, 224, 223),
torch.randint(1, size=(10, 5, 1, 224, 224), dtype=torch.uint8),
'.*height/width.*',
),
))
def test_top_images_dataset_init_bad_images_or_masks(top_images_root,
top_image_tensors,
top_image_masks, images,
masks, error_pattern):
"""Test TopImagesDataset.__init__ dies when images/masks misshapen."""
if images is None:
images = top_image_tensors[0]
if masks is None:
masks = top_image_masks[0]
for name, tensor in (('images', images), ('masks', masks)):
numpy.save(top_images_root / conftest.layer(0) / f'{name}.npy', tensor)
with pytest.raises(ValueError, match=error_pattern):
datasets.TopImagesDataset(top_images_root)
@pytest.mark.parametrize('units,error_pattern', (
(torch.randint(conftest.N_UNITS_PER_LAYER, size=()), '.*0D.*'),
(torch.randint(conftest.N_UNITS_PER_LAYER, size=(1, 2)), '.*2D.*'),
))
def test_top_images_dataset_init_bad_units(top_images_root, units,
error_pattern):
"""Test TopImagesDataset.__init__ dies when images/masks misshapen."""
numpy.save(top_images_root / conftest.layer(0) / 'units.npy', units)
with pytest.raises(ValueError, match=error_pattern):
datasets.TopImagesDataset(top_images_root)
def test_top_images_dataset_getitem(top_images_root, top_image_tensors,
top_image_masks):
"""Test TopImagesDataset.__getitem__ returns samples in right order."""
dataset = datasets.TopImagesDataset(top_images_root,
display_progress=False,
device='cpu')
for layer in range(conftest.N_LAYERS):
for unit in range(conftest.N_UNITS_PER_LAYER):
index = layer * conftest.N_UNITS_PER_LAYER + unit
sample = dataset[index]
assert sample.layer == f'layer-{layer}'
assert sample.unit == unit
assert sample.images.dtype is torch.float
assert sample.images.allclose(
top_image_tensors[layer][unit].float() / 255, atol=1e-3)
assert sample.masks.dtype is torch.float
assert sample.masks.equal(top_image_masks[layer][unit].float())
def test_top_images_dataset_len(top_images_dataset):
"""Test TopImagesDataset.__len__ returns correct length."""
assert len(top_images_dataset) == conftest.N_SAMPLES
def test_top_images_dataset_lookup(top_images_dataset, top_image_tensors,
top_image_masks):
"""Test TopImagesDataset.lookup finds correct layer and unit."""
for layer_index in range(conftest.N_LAYERS):
layer = conftest.layer(layer_index)
for unit in range(conftest.N_UNITS_PER_LAYER):
actual = top_images_dataset.lookup(layer, unit)
assert actual.layer == layer
assert actual.unit == unit
assert actual.images.allclose(
top_image_tensors[layer_index][unit] / 255, atol=1e-3)
assert actual.masks.equal(
top_image_masks[layer_index][unit].float())
@pytest.mark.parametrize('layer,unit,error_pattern', (
('layer-10000', 0, '.*"layer-10000" does not exist.*'),
('layer-0', 100000, '.*unit 100000.*'),
))
def test_top_images_dataset_lookup_bad_key(top_images_dataset, layer, unit,
error_pattern):
"""Test TopImagesDataset.lookup dies when given a bad key."""
with pytest.raises(KeyError, match=error_pattern):
top_images_dataset.lookup(layer, unit)
def test_top_images_dataset_k(top_images_dataset):
"""Test TopImagesDataset.k returns number of top images."""
assert top_images_dataset.k == conftest.N_TOP_IMAGES_PER_UNIT
@pytest.fixture
def annotated_top_images(top_images):
"""Return AnnotatedTopImages for testing."""
return datasets.AnnotatedTopImages(*top_images, annotations=('foo',))
@pytest.mark.parametrize('opacity', (0, .5, 1))
def test_annotated_top_images_as_pil_image_grid(annotated_top_images, opacity):
"""Test AnnotatedTopImages.as_pil_image_grid returns PIL image."""
actual = annotated_top_images.as_pil_image_grid(opacity=opacity)
assert isinstance(actual, Image.Image)
@pytest.mark.parametrize('annotation_count', (None, 1))
def test_annotated_top_images_dataset_init_annotation_count(
top_images_root, top_images_annotations_csv_file,
top_image_annotations, annotation_count):
"""Test AnnotatedTopImagesDataset.__init__, setting annotation_count."""
# Remove all L0 annotations.
banned = conftest.layer(0)
rows = [conftest.HEADER]
rows += [anno for anno in top_image_annotations if anno[0] != banned]
# Add an extra one for L1.
expanded = conftest.layer(1)
rows += [anno for anno in top_image_annotations if anno[0] == expanded]
# Overwrite annotations file with our janky modifications.
with top_images_annotations_csv_file.open('w') as handle:
writer = csv.writer(handle)
writer.writerows(rows)
annotated_top_images_dataset = datasets.AnnotatedTopImagesDataset(
top_images_root,
annotations_csv_file=top_images_annotations_csv_file,
layer_column=conftest.LAYER_COLUMN,
unit_column=conftest.UNIT_COLUMN,
annotation_column=conftest.ANNOTATION_COLUMN,
annotation_count=annotation_count,
display_progress=False)
assert str(top_images_root).endswith(annotated_top_images_dataset.name)
# Yeah, yeah, yeah, this is bad practice, I know...
if annotation_count is None:
assert len(annotated_top_images_dataset.samples) == conftest.N_SAMPLES
actuals = [
sample for sample in annotated_top_images_dataset.samples
if sample.layer == banned
]
assert len(actuals) == conftest.N_UNITS_PER_LAYER
for actual in actuals:
assert actual.annotations == ()
actuals = [
sample for sample in annotated_top_images_dataset.samples
if sample.layer == expanded
]
assert len(actuals) == conftest.N_UNITS_PER_LAYER
for actual in actuals:
assert len(actual.annotations) == 2
else:
actual = len(annotated_top_images_dataset.samples)
expected = (conftest.N_LAYERS - 1) * conftest.N_UNITS_PER_LAYER
assert actual == expected
layers = {
sample.layer for sample in annotated_top_images_dataset.samples
}
assert banned not in layers
assert expanded in layers
lengths = {
len(sample.annotations)
for sample in annotated_top_images_dataset.samples
}
assert lengths == {annotation_count}
def test_annotated_top_images_dataset_getitem(annotated_top_images_dataset,
top_image_tensors,
top_image_masks,
top_image_annotations):
"""Test AnnotatedTopImagesDataset.__getitem__ returns right samples."""
for layer in range(conftest.N_LAYERS):
for unit in range(conftest.N_UNITS_PER_LAYER):
index = layer * conftest.N_UNITS_PER_LAYER + unit
sample = annotated_top_images_dataset[index]
assert sample.layer == conftest.layer(layer)
assert sample.unit == unit
assert sample.images.dtype is torch.float
assert sample.images.allclose(
top_image_tensors[layer][unit].float() / 255, atol=1e-3)
assert sample.masks.dtype is torch.float
assert sample.masks.equal(top_image_masks[layer][unit].float())
assert sample.annotations == (top_image_annotations[index][-1],)
def test_annotated_top_images_dataset_len(annotated_top_images_dataset):
"""Test AnnotatedTopImagesDataset.__len__ returns correct length."""
assert len(annotated_top_images_dataset) == conftest.N_SAMPLES
def test_annotated_top_images_dataset_lookup(annotated_top_images_dataset,
top_image_tensors,
top_image_masks,
top_image_annotations):
"""Test AnnotatedTopImagesDataset.lookup finds correct sample."""
for layer_index in range(conftest.N_LAYERS):
layer = conftest.layer(layer_index)
for unit in range(conftest.N_UNITS_PER_LAYER):
actual = annotated_top_images_dataset.lookup(layer, unit)
assert actual.layer == layer
assert actual.unit == unit
assert actual.images.allclose(
top_image_tensors[layer_index][unit] / 255, atol=1e-3)
assert actual.masks.equal(
top_image_masks[layer_index][unit].float())
index = layer_index * conftest.N_UNITS_PER_LAYER + unit
assert actual.annotations == (top_image_annotations[index][-1],)
def test_annotated_top_images_dataset_lookup_bad_key(
annotated_top_images_dataset):
"""Test AnnotatedTopImagesDataset.lookup dies on bad key."""
bad = ('layer-10000', 0)
with pytest.raises(KeyError, match=f'.*{bad}.*'):
annotated_top_images_dataset.lookup(*bad)
def test_annotated_top_images_dataset_k(annotated_top_images_dataset):
"""Test AnnotatedTopImagesDataset.k returns correct value."""
assert annotated_top_images_dataset.k == conftest.N_TOP_IMAGES_PER_UNIT
|
{"hexsha": "b6e7d767f3f1c1742d5522f257b87c867deebcb9", "size": 15273, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/milannotations/datasets_test.py", "max_stars_repo_name": "ericotjo001/neuron-descriptions", "max_stars_repo_head_hexsha": "744fbf65c6538edd2fa423108eca7e2cd72f8b59", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-02-22T21:58:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T16:19:14.000Z", "max_issues_repo_path": "tests/milannotations/datasets_test.py", "max_issues_repo_name": "ericotjo001/neuron-descriptions", "max_issues_repo_head_hexsha": "744fbf65c6538edd2fa423108eca7e2cd72f8b59", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-02-27T06:43:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T08:30:30.000Z", "max_forks_repo_path": "tests/milannotations/datasets_test.py", "max_forks_repo_name": "ericotjo001/neuron-descriptions", "max_forks_repo_head_hexsha": "744fbf65c6538edd2fa423108eca7e2cd72f8b59", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-27T05:18:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-27T05:18:30.000Z", "avg_line_length": 39.9816753927, "max_line_length": 79, "alphanum_fraction": 0.6520657369, "include": true, "reason": "import numpy", "num_tokens": 3326}
|
# This code is based on: https://github.com/msmsajjadi/precision-recall-distributions/blob/master/prd_score.py
"""Precision and recall computation based on samples from two distributions.
Given a set of generated samples and samples from the test set, both embedded in some feature space (say, embeddings of
Inception Net), it computes the precision and recall via the algorithm presented in [arxiv.org/abs/1806.00035]."""
from matplotlib import pyplot as plt
import numpy as np
import sklearn.cluster
def compute_prd(eval_dist, ref_dist, num_angles=1001, epsilon=1e-10):
"""Computes the PRD curve for discrete distributions.
This function computes the PRD curve for the discrete distribution [eval_dist] with respect to the reference
distribution [ref_dist]. This implements the algorithm in [arxiv.org/abs/1806.2281349]. The PRD will be computed for
an equiangular grid of [num_angles] values between [0, pi/2].
Args:
eval_dist: 1D NumPy array or list of floats with probabilities of the states under distribution to be evaluated.
ref_dist: 1D NumPy array or list of floats with probabilities of the states under the reference distribution.
num_angles:Number of angles for which to compute PRD. Must be in [3, 1e6]. The default value is 1001.
epsilon: Angle for PRD computation in the edge cases 0 and pi/2. The PRD will be computed for epsilon and
pi/2-epsilon, respectively. The default value is 1e-10.
Returns:
precision: NumPy array of shape [num_angles] with the precision for the different ratios.
recall: NumPy array of shape [num_angles] with the recall for the different ratios.
Raises:
ValueError: If not 0 < epsilon <= 0.1.
ValueError: If num_angles < 3."""
if not (epsilon > 0 and epsilon < 0.1):
raise ValueError('epsilon must be in (0, 0.1] but is %s.' % str(epsilon))
if not (num_angles >= 3 and num_angles <= 1e6):
raise ValueError('num_angles must be in [3, 1e6] but is %d.' % num_angles)
# Compute slopes for linearly spaced angles between [0, pi/2]
angles = np.linspace(epsilon, np.pi/2 - epsilon, num=num_angles)
slopes = np.tan(angles)
# Broadcast slopes so that second dimension will be states of the distribution
slopes_2d = np.expand_dims(slopes, 1)
# Broadcast distributions so that first dimension represents the angles
ref_dist_2d = np.expand_dims(ref_dist, 0)
eval_dist_2d = np.expand_dims(eval_dist, 0)
# Compute precision and recall for all angles in one step via broadcasting
precision = np.minimum(ref_dist_2d*slopes_2d, eval_dist_2d).sum(axis=1)
recall = precision / slopes
# Handle numerical instabilities leaing to precision/recall just above 1
max_val = max(np.max(precision), np.max(recall))
if max_val > 1.001:
raise ValueError('Detected value > 1.001, this should not happen.')
precision = np.clip(precision, 0, 1)
recall = np.clip(recall, 0, 1)
return precision, recall
def _cluster_into_bins(eval_data, ref_data, num_clusters):
"""Clusters the union of the data points and returns the cluster distribution.
Clusters the union of [eval_data] and [ref_data] into [num_clusters] using minibatch k-means.
Then, for each cluster, it computes the number of points from [eval_data] and [ref_data].
Args:
eval_data: NumPy array of data points from the distribution to be evaluated.
ref_data: NumPy array of data points from the reference distribution.
num_clusters: Number of cluster centers to fit.
Returns:
Two NumPy arrays, each of size [num_clusters], where i-th entry is number of points assigned to i-th cluster."""
cluster_data = np.vstack([eval_data, ref_data])
kmeans = sklearn.cluster.MiniBatchKMeans(n_clusters=num_clusters, n_init=10)
labels = kmeans.fit(cluster_data).labels_
eval_labels = labels[:len(eval_data)]
ref_labels = labels[len(eval_data):]
eval_bins = np.histogram(eval_labels, bins=num_clusters, range=[0, num_clusters], density=True)[0]
ref_bins = np.histogram(ref_labels, bins=num_clusters, range=[0, num_clusters], density=True)[0]
return eval_bins, ref_bins
def compute_prd_from_embedding(eval_data, ref_data, num_clusters=20, num_angles=1001, num_runs=10,enforce_balance=True):
"""Computes PRD data from sample embeddings.
The points from both distributions are mixed and then clustered. This leads to a pair of histograms of discrete
distributions over the cluster centers on which the PRD algorithm is executed.
The number of points in [eval_data] and [ref_data] must be equal since unbalanced distributions bias the clustering
towards the larger dataset. The check can be disabled by setting [enforce_balance] to False (not recommended).
Args:
eval_data: NumPy array of data points from the distribution to be evaluated.
ref_data: NumPy array of data points from the reference distribution.
num_clusters: Number of cluster centers to fit. The default value is 20.
num_angles: Number of angles for which to compute PRD. Must be in [3, 1e6]. The default value is 1001.
num_runs: Number of independent runs over which to average the PRD data.
enforce_balance: If enabled, throws exception if [eval_data] and [ref_data] do not have the same length.
Returns:
precision: NumPy array of shape [num_angles] with the precision for the different ratios.
recall: NumPy array of shape [num_angles] with the recall for the different ratios.
Raises:
ValueError: If len(eval_data) != len(ref_data) and enforce_balance is set to True."""
if enforce_balance and len(eval_data) != len(ref_data):
raise ValueError(
'The number of points in eval_data %d is not equal to the number of points in ref_data %d. To disable this '
'exception, set enforce_balance to False (not recommended).' % (len(eval_data), len(ref_data))
)
eval_data = np.array(eval_data, dtype=np.float64)
ref_data = np.array(ref_data, dtype=np.float64)
precisions = []
recalls = []
for _ in range(num_runs):
eval_dist, ref_dist = _cluster_into_bins(eval_data, ref_data, num_clusters)
precision, recall = compute_prd(eval_dist, ref_dist, num_angles)
precisions.append(precision)
recalls.append(recall)
precision = np.mean(precisions, axis=0)
recall = np.mean(recalls, axis=0)
return precision, recall
#-----------------------------------------------------------------------------------------------------------#
def plot(precision_recall_pairs, labels=None, legend_loc='lower left', dpi=300):
"""Plots precision recall curves for distributions.
Creates the PRD plot for the given data and stores the plot in a given path.
Args:
precision_recall_pairs: List of prd_data to plot. Each item in this list is
a 2D array of precision and recall values for the
same number of ratios.
labels: Optional list of labels of same length as list_of_prd_data. The
default value is None.
legend_loc: Location of the legend. The default value is 'lower left'.
dpi: Dots per inch (DPI) for the figure. The default value is 150.
Raises:
ValueError: If labels is a list of different length than list_of_prd_data.
"""
if labels is not None and len(labels) != len(precision_recall_pairs):
raise ValueError(
'Length of labels %d must be identical to length of '
'precision_recall_pairs %d.'
% (len(labels), len(precision_recall_pairs)))
fig = plt.figure(figsize=(3.5, 3.5), dpi=dpi)
plot_handle = fig.add_subplot(111)
plot_handle.tick_params(axis='both', which='major', labelsize=12)
for i in range(len(precision_recall_pairs)):
precision, recall = precision_recall_pairs[i]
label = labels[i] if labels is not None else None
plt.plot(recall, precision, label=label, alpha=0.5, linewidth=3)
if labels is not None:
plt.legend(loc=legend_loc)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('Recall', fontsize=12)
plt.ylabel('Precision', fontsize=12)
# plt.xscale('log')
# plt.yscale('log')
plt.tight_layout()
return fig
|
{"hexsha": "4c16fe10911b92edfc021251b091775065215eb7", "size": 8297, "ext": "py", "lang": "Python", "max_stars_repo_path": "eval/precision_recall.py", "max_stars_repo_name": "i-supermario/Cifar100_CL", "max_stars_repo_head_hexsha": "6c22151ea2c4c3014a569112fdf8a549331b27c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 164, "max_stars_repo_stars_event_min_datetime": "2020-08-13T08:24:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T07:09:10.000Z", "max_issues_repo_path": "eval/precision_recall.py", "max_issues_repo_name": "i-supermario/Cifar100_CL", "max_issues_repo_head_hexsha": "6c22151ea2c4c3014a569112fdf8a549331b27c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-09-21T11:28:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-17T11:36:13.000Z", "max_forks_repo_path": "eval/precision_recall.py", "max_forks_repo_name": "i-supermario/Cifar100_CL", "max_forks_repo_head_hexsha": "6c22151ea2c4c3014a569112fdf8a549331b27c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 51, "max_forks_repo_forks_event_min_datetime": "2020-08-17T05:40:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T07:09:28.000Z", "avg_line_length": 49.6826347305, "max_line_length": 120, "alphanum_fraction": 0.7056767506, "include": true, "reason": "import numpy", "num_tokens": 2008}
|
using TerminalExtensions
function Base.display(disp::TerminalExtensions.iTerm2.InlineDisplay, p::PredictionFrame)
Base.display(disp, [p])
end
function Base.display(disp::TerminalExtensions.iTerm2.InlineDisplay, frames::Vector{PredictionFrame})
print_frame_table(image_display_callback, frames)
end
function image_display_callback(buf, row_frames)
aspect_ratios = [size(p.img, 1) / size(p.img, 2) for p in row_frames]
# An extra 17/7 which seems to be the default aspect ratio of
# an iterm terminal cell.
max_image_height = maximum(round.(Int, aspect_ratios .* (inner_width/(17/7))))
for i = 1:max_image_height
print(buf, "│")
for i = 1:length(row_frames)
print(buf, CSI, string(inner_width), 'C', "│")
end
println(buf)
end
print(buf, CSI, string(1), 'A')
print(buf, CSI, string(1), 'C')
for p in row_frames
print(buf, CSI, string(max_image_height-1), 'A')
display_img(buf, p.img, height=string(max_image_height), width = string(inner_width))
end
println(buf)
end
function display_img(io::IO, img; kwargs...)
buf = IOBuffer()
show(buf,MIME"image/png"(),img)
TerminalExtensions.iTerm2.display_file(take!(buf); io=io,filename="image",inline=true,preserveAspectRatio=true,kwargs...)
end
|
{"hexsha": "86b8e99c38de449ab8c484a477030a833cc1f820", "size": 1312, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/display/terminal_extensions.jl", "max_stars_repo_name": "UnofficialJuliaMirror/Metalhead.jl-dbeba491-748d-5e0e-a39e-b530a07fa0cc", "max_stars_repo_head_hexsha": "b61ddec642a6e9dfb88961c4ceb76ec64837a3a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-08T04:53:37.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-08T04:53:37.000Z", "max_issues_repo_path": "src/display/terminal_extensions.jl", "max_issues_repo_name": "UnofficialJuliaMirror/Metalhead.jl-dbeba491-748d-5e0e-a39e-b530a07fa0cc", "max_issues_repo_head_hexsha": "b61ddec642a6e9dfb88961c4ceb76ec64837a3a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/display/terminal_extensions.jl", "max_forks_repo_name": "UnofficialJuliaMirror/Metalhead.jl-dbeba491-748d-5e0e-a39e-b530a07fa0cc", "max_forks_repo_head_hexsha": "b61ddec642a6e9dfb88961c4ceb76ec64837a3a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4594594595, "max_line_length": 125, "alphanum_fraction": 0.6875, "num_tokens": 345}
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ndlinear/gsl_multifit_ndlinear.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_sf_laguerre.h>
#include <gsl/gsl_sf_legendre.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_multifit.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_bspline.h>
#include <gsl/gsl_statistics.h>
/* dimension of fit */
#define N_DIM 3
/* number of basis functions for each variable */
#define N_SUM_R 10
#define N_SUM_THETA 10
#define N_SUM_PHI 9
#define R_MAX 3.0
double
psi_real_exact(int k, int l, int m, double r, double theta, double phi)
{
double R, T, P;
R = pow(r, (double) l) *
exp(-r*r) *
gsl_sf_laguerre_n(k, l + 0.5, 2 * r * r);
T = gsl_sf_legendre_sphPlm(l, m, cos(theta));
P = cos(m * phi);
return (R * T * P);
}
/* basis functions for each variable */
int
basis_r(double r, double y[], void *params)
{
gsl_bspline_workspace *bw = params;
gsl_vector_view v = gsl_vector_view_array(y, N_SUM_R);
int s;
/* use B-splines for r dependence */
s = gsl_bspline_eval(r, &v.vector, bw);
return s;
}
int
basis_theta(double theta, double y[], void *params)
{
size_t i;
/* use Legendre polynomials for theta dependence */
for (i = 0; i < N_SUM_THETA; ++i)
y[i] = gsl_sf_legendre_Pl(i, cos(theta));
return GSL_SUCCESS;
}
int
basis_phi(double phi, double y[], void *params)
{
size_t i;
/* use standard Fourier basis (sin/cos) for phi dependence */
for (i = 0; i < N_SUM_PHI; ++i)
{
if ((i % 2) == 0)
y[i] = cos((double)(i/2) * phi);
else
y[i] = sin((double)((i+1)/2) * phi);
}
return GSL_SUCCESS;
}
int
main(int argc, char *argv[])
{
const size_t ndim = N_DIM; /* dimension of fit */
const size_t ndata = 3000; /* number of data points to fit */
size_t N[N_DIM]; /* upper bounds on model sums */
int (*u[N_DIM])(double x, double y[], void *params);
size_t i; /* looping */
int k, l, m; /* quantum numbers */
gsl_rng *rng_p;
gsl_bspline_workspace *bspline_p;
gsl_multifit_linear_workspace *multifit_p;
gsl_multifit_ndlinear_workspace *ndlinear_p;
gsl_vector *data; /* psi data */
gsl_matrix *vars; /* parameters corresponding to psi data */
gsl_matrix *X; /* matrix for least squares fit */
gsl_vector *coeffs; /* fit coefficients */
gsl_matrix *cov; /* covariance matrix */
double chisq; /* chi^2 */
double Rsq; /* R^2 */
size_t ncoeffs; /* total number of fit coefficients */
gsl_rng_env_setup();
k = 5;
l = 4;
m = 2;
N[0] = N_SUM_R;
N[1] = N_SUM_THETA;
N[2] = N_SUM_PHI;
u[0] = &basis_r;
u[1] = &basis_theta;
u[2] = &basis_phi;
rng_p = gsl_rng_alloc(gsl_rng_default);
bspline_p = gsl_bspline_alloc(4, N_SUM_R - 2);
ndlinear_p = gsl_multifit_ndlinear_alloc(ndim, N, u, bspline_p);
ncoeffs = gsl_multifit_ndlinear_ncoeffs(ndlinear_p);
multifit_p = gsl_multifit_linear_alloc(ndata, ncoeffs);
data = gsl_vector_alloc(ndata);
vars = gsl_matrix_alloc(ndata, ndim);
X = gsl_matrix_alloc(ndata, ncoeffs);
coeffs = gsl_vector_alloc(ncoeffs);
cov = gsl_matrix_alloc(ncoeffs, ncoeffs);
gsl_bspline_knots_uniform(0.0, R_MAX, bspline_p);
/* this is the data to be fitted */
for (i = 0; i < ndata; ++i)
{
double r = gsl_rng_uniform(rng_p) * R_MAX;
double theta = gsl_rng_uniform(rng_p) * M_PI;
double phi = gsl_rng_uniform(rng_p) * 2.0 * M_PI;
double psi = psi_real_exact(k, l, m, r, theta, phi);
double dpsi = gsl_ran_gaussian(rng_p, 0.05 * psi);
/* keep track of (r, theta, phi) points */
gsl_matrix_set(vars, i, 0, r);
gsl_matrix_set(vars, i, 1, theta);
gsl_matrix_set(vars, i, 2, phi);
/* fill in RHS data vector */
gsl_vector_set(data, i, psi + dpsi);
}
/* construct the design matrix X */
gsl_multifit_ndlinear_design(vars, X, ndlinear_p);
/* now do the actual least squares fit */
gsl_multifit_linear(X, data, coeffs, cov, &chisq, multifit_p);
/* compute R^2 */
Rsq = 1.0 - chisq / gsl_stats_tss(data->data, 1, data->size);
fprintf(stderr, "chisq = %e, Rsq = %f\n", chisq, Rsq);
/* now print out the model and the exact solution and compute rms error */
{
double eps_rms = 0.0;
double volume = 0.0;
double r, theta, phi;
double dr = 0.05;
double dtheta = 5.0 * M_PI / 180.0;
double dphi = 5.0 * M_PI / 180.0;
double x[N_DIM];
gsl_vector_view xv = gsl_vector_view_array(x, N_DIM);
double psi;
double psi_model;
double err;
for (r = 0.01; r < R_MAX; r += dr)
{
for (theta = 0.0; theta < M_PI; theta += dtheta)
{
for (phi = 0.0; phi < 2.0 * M_PI; phi += dphi)
{
double dV = r * r * sin(theta) * dr * dtheta * dphi;
x[0] = r;
x[1] = theta;
x[2] = phi;
/* compute model value for this (r, theta, phi) */
psi_model = gsl_multifit_ndlinear_calc(&xv.vector,
coeffs,
ndlinear_p);
/* compute exact value for this (r, theta, phi) */
psi = psi_real_exact(k, l, m, r, theta, phi);
err = psi_model - psi;
eps_rms += err * err * dV;
volume += dV;
if (phi == 0.0)
printf("%e %e %e %e\n", r, theta, psi, psi_model);
}
}
printf("\n");
}
eps_rms /= volume;
eps_rms = sqrt(eps_rms);
fprintf(stderr, "rms error over all parameter space = %e\n", eps_rms);
}
gsl_rng_free(rng_p);
gsl_bspline_free(bspline_p);
gsl_multifit_ndlinear_free(ndlinear_p);
gsl_multifit_linear_free(multifit_p);
gsl_vector_free(data);
gsl_matrix_free(vars);
gsl_matrix_free(X);
gsl_vector_free(coeffs);
gsl_matrix_free(cov);
return 0;
} /* main() */
|
{"hexsha": "a4872bf1a77cf2a2be99b4c83307d434f54fb98d", "size": 6164, "ext": "c", "lang": "C", "max_stars_repo_path": "src/BodyComponents/archive/ndlinear-1.0/doc/examples/harmosc.c", "max_stars_repo_name": "rennhak/Keyposes", "max_stars_repo_head_hexsha": "e5ffe4c849b0894f27d58985b41ec8edd3432be1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2016-11-26T07:28:56.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-05T12:45:52.000Z", "max_issues_repo_path": "src/BodyComponents/archive/ndlinear-1.0/doc/examples/harmosc.c", "max_issues_repo_name": "rennhak/Keyposes", "max_issues_repo_head_hexsha": "e5ffe4c849b0894f27d58985b41ec8edd3432be1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/BodyComponents/archive/ndlinear-1.0/doc/examples/harmosc.c", "max_forks_repo_name": "rennhak/Keyposes", "max_forks_repo_head_hexsha": "e5ffe4c849b0894f27d58985b41ec8edd3432be1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9170305677, "max_line_length": 76, "alphanum_fraction": 0.5850097339, "num_tokens": 1866}
|
import unittest
from mltoolkit.mldp.steps.transformers.nlp import WindowSlider
from mltoolkit.mldp.steps.transformers.nlp.helpers import create_new_field_name
from mltoolkit.mldp.utils.tools import DataChunk
import numpy as np
class TestWindowSlider(unittest.TestCase):
def setUp(self):
self.field_name = "dummy"
self.suffix = "window"
self.new_field_name = create_new_field_name(self.field_name,
suffix=self.suffix)
# TODO: more descriptive method names would be nice to have
def test_scenario1(self):
window_size = 2
step_size = 1
only_full_windows = False
input_seqs = np.array([list(range(6)), list(range(2))])
input_chunk = DataChunk(**{self.field_name: input_seqs})
expect_seqs = np.array([
[[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5]],
[[0, 1]]])
expected_output_chunk = DataChunk(**{self.field_name: input_seqs,
self.new_field_name: expect_seqs})
self._test_window_setup(input_chunk, expected_output_chunk,
field_name=self.field_name, suffix=self.suffix,
window_size=window_size, step_size=step_size,
only_full_windows=only_full_windows)
def test_scenario2(self):
window_size = 3
step_size = 3
only_full_windows = False
input_seqs = np.array([list(range(7)), list(range(2))])
input_chunk = DataChunk(**{self.field_name: input_seqs})
expect_seqs = np.array([
[[0, 1, 2], [3, 4, 5], [6]],
[[0, 1]]])
expected_output_chunk = DataChunk(**{self.field_name: input_seqs,
self.new_field_name: expect_seqs})
self._test_window_setup(input_chunk, expected_output_chunk,
field_name=self.field_name, suffix=self.suffix,
window_size=window_size, step_size=step_size,
only_full_windows=only_full_windows)
def test_scenario3(self):
window_size = 3
step_size = 10
only_full_windows = False
input_seqs = np.array([list(range(3)), list(range(2))])
input_chunk = DataChunk(**{self.field_name: input_seqs})
expect_seqs = np.empty(2, dtype="object")
expect_seqs[0] = [[0, 1, 2]]
expect_seqs[1] = [[0, 1]]
expected_output_chunk = DataChunk(**{self.field_name: input_seqs,
self.new_field_name: expect_seqs})
self._test_window_setup(input_chunk, expected_output_chunk,
field_name=self.field_name, suffix=self.suffix,
window_size=window_size, step_size=step_size,
only_full_windows=only_full_windows)
def test_scenario4(self):
window_size = 2
step_size = 1
only_full_windows = True
input_seqs = np.array([list(range(6)), list(range(3)), list(range(1))])
input_chunk = DataChunk(**{self.field_name: input_seqs})
expect_seqs = np.array([
[[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]],
[[0, 1], [1, 2]],
[]
])
expected_output_chunk = DataChunk(**{self.field_name: input_seqs,
self.new_field_name: expect_seqs})
self._test_window_setup(input_chunk, expected_output_chunk,
field_name=self.field_name, suffix=self.suffix,
window_size=window_size, step_size=step_size,
only_full_windows=only_full_windows)
def _test_window_setup(self, input_chunk, expected_output_chunk,
field_name, suffix,
window_size, step_size,
only_full_windows):
window_slider = WindowSlider(field_names=field_name,
window_size=window_size,
step_size=step_size,
new_window_field_name_suffix=suffix,
only_full_windows=only_full_windows)
actual_output_chunk = window_slider(input_chunk)
self.assertTrue(expected_output_chunk == actual_output_chunk)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "b91c61a752ad81bb3ad2ba5a7e0f42efd643c33d", "size": 4569, "ext": "py", "lang": "Python", "max_stars_repo_path": "mltoolkit/mldp/tests/transformers/test_window_slider.py", "max_stars_repo_name": "mancunian1792/FewSum", "max_stars_repo_head_hexsha": "c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2020-10-12T19:05:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T01:19:29.000Z", "max_issues_repo_path": "mltoolkit/mldp/tests/transformers/test_window_slider.py", "max_issues_repo_name": "mancunian1792/FewSum", "max_issues_repo_head_hexsha": "c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-30T01:52:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-19T08:04:54.000Z", "max_forks_repo_path": "mltoolkit/mldp/tests/transformers/test_window_slider.py", "max_forks_repo_name": "mancunian1792/FewSum", "max_forks_repo_head_hexsha": "c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-10-29T14:01:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T18:33:10.000Z", "avg_line_length": 44.359223301, "max_line_length": 79, "alphanum_fraction": 0.5548260013, "include": true, "reason": "import numpy", "num_tokens": 951}
|
"""This module defines classes of the package."""
# Author: Henri Gérard <hgerard.proy@gmail.com>
# License: MIT
abstract type RegressionModel end
# Define an object to
type LinearRegression <: RegressionModel
function LinearRegression()
return new()
end
end
type LogisticRegression <: RegressionModel
function LogisticRegression()
return new()
end
end
abstract type ParallelParam end
type Sequential <: ParallelParam
function Sequential()
return new()
end
end
type Parallel <: ParallelParam
function Parallel()
return new()
end
end
abstract type AlgoParams end
type OptParams <: AlgoParams
itmax::Int
stability::Float64
learning_rate::Float64
verbosity::Int
function OptParams(itmax, stability, learning_rate; verbosity = 0)
return new(itmax, stability, learning_rate, verbosity)
end
end
type ProjParams <: AlgoParams
ITER_MAX::Int
precision::Float64
sample::Int
para_proj::ParallelParam
para_inter::ParallelParam
function ProjParams(ITER_MAX, precision, sample; para_proj=Sequential(), para_inter=Sequential())
return new(ITER_MAX, precision, sample, para_proj, para_inter)
end
end
abstract type AmbiguitySet end
type DivergenceSet <: AmbiguitySet
function DivergenceSet()
return new()
end
end
type WassersteinSet <: AmbiguitySet
function WassersteinSet()
return new()
end
end
abstract type GeneralConstraint end
type PositiveConstraint <: GeneralConstraint
function PositiveConstraint()
return new()
end
end
type DROConstraint <: GeneralConstraint
function DROConstraint()
return new()
end
end
type EntropicConstraint <: GeneralConstraint
function EntropicConstraint()
return new()
end
end
abstract type DivergenceConstraint<:GeneralConstraint end
type KLConstraint <: DivergenceConstraint
function KLConstraint()
return new()
end
end
type RobustModel
descent_direction::Array{Float64,1}
I0::UnitRange{Int64}
name::GeneralConstraint
regressionModel::RegressionModel
function RobustModel(N, nb_features, ϵ, ambiguity, regressionModel)
if ambiguity == "KLdivergence"
descent_direction = [ϵ; 1; zeros(1:nb_features); ones(N)/N]
I0 = 1:N
dim = N+1
name = KLConstraint()
elseif ambiguity == "wasserstein"
descent_direction = [ϵ; zeros(1:nb_features); ones(N)/N]
I0 = 1:N^2
dim = N^2+1
name = DROConstraint()
elseif ambiguity == "entropic"
descent_direction = [1; zeros(1:nb_features); ones(N)/N]
I0 = 1:N
dim = N+1
name = EntropicConstraint()
end
return new(descent_direction, I0, name, regressionModel)
end
end
|
{"hexsha": "e1ebc908db23e84fe6397438d5a1168ad661d0f9", "size": 2879, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "tests/test_objects.jl", "max_stars_repo_name": "Henri-Gerard/robox", "max_stars_repo_head_hexsha": "8ad29add8641e3a1255a22185907124cb8afa050", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_objects.jl", "max_issues_repo_name": "Henri-Gerard/robox", "max_issues_repo_head_hexsha": "8ad29add8641e3a1255a22185907124cb8afa050", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_objects.jl", "max_forks_repo_name": "Henri-Gerard/robox", "max_forks_repo_head_hexsha": "8ad29add8641e3a1255a22185907124cb8afa050", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.8623188406, "max_line_length": 101, "alphanum_fraction": 0.667940257, "num_tokens": 691}
|
\subsection{Complexity Estimate} \label{complexity_estimate}
The Complexity Estimate (CE) is a complexity measure for time series and the authors of \cite{batista2011complexity}
introduced one possible approach of a CE implementation. Given is a time series $Q = (q_1, q_2, \dots, q_i, \dots, q_l)$
with length $l$ over the domain set $\mathbb{U}$ and a distance measure function $d$ with
$d: \mathbb{U} \times \mathbb{U} \to \mathbb{R}$.
\begin{equation}
CE(Q) = \sqrt[2]{\sum \limits_{i=1}^{l-1} d(q_i, q_{i + 1})^2}
\end{equation}
The CE would be a suitable time series measure for a measure based filter as mentioned in \ref{sliding_window_filter}.
But the measure has been created in \cite{batista2011complexity} under the assumption that two time series have the same
length. Therefore a length normalized version of CE fits better as underlying time series measure for a filter. The
length normalized CE (LNCE) can be calculated as follows.
\begin{equation}
LNCE(Q) = \frac{1}{l-1}CE(Q)
\end{equation}
|
{"hexsha": "9e23a2af7f334bb0e83e00889ff261177e031f36", "size": 1020, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "bachelor-thesis/background_and_notation/complexity_estimate.tex", "max_stars_repo_name": "GordonLesti/SlidingWindowFilter", "max_stars_repo_head_hexsha": "22c11f2912a5c523ae8ad85a849e2d0b123536ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-06-22T09:37:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-14T11:43:53.000Z", "max_issues_repo_path": "bachelor-thesis/background_and_notation/complexity_estimate.tex", "max_issues_repo_name": "GordonLesti/SlidingWindowFilter", "max_issues_repo_head_hexsha": "22c11f2912a5c523ae8ad85a849e2d0b123536ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bachelor-thesis/background_and_notation/complexity_estimate.tex", "max_forks_repo_name": "GordonLesti/SlidingWindowFilter", "max_forks_repo_head_hexsha": "22c11f2912a5c523ae8ad85a849e2d0b123536ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-01-11T23:15:57.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-11T23:15:57.000Z", "avg_line_length": 53.6842105263, "max_line_length": 120, "alphanum_fraction": 0.7470588235, "num_tokens": 299}
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 12 16:34:44 2019
@author: Alexandre
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.control import robotcontrollers
from pyro.dynamic import manipulator
###############################################################################
torque_controlled_robot = manipulator.TwoLinkManipulator()
# Target
q_desired = np.array([0.5,0.5])
r_desired = torque_controlled_robot.forward_kinematic_effector( q_desired )
# effector PID
dof = 2
effector_pid = robotcontrollers.EndEffectorPID( torque_controlled_robot )
effector_pid.rbar = r_desired
effector_pid.kp = np.array([100, 100 ])
effector_pid.kd = np.array([ 0, 0 ])
effector_pid.ki = np.array([ 50, 50 ])
# Closed-loops
robot_with_effector_pid = effector_pid + torque_controlled_robot
# Simulations
tf = 20
robot_with_effector_pid.x0 = np.array([0,0,0,0,0,0])
robot_with_effector_pid.compute_trajectory( tf )
robot_with_effector_pid.plot_trajectory('xu')
robot_with_effector_pid.plot_trajectory_with_internal_states()
robot_with_effector_pid.animate_simulation()
|
{"hexsha": "da658709686ee2f9d2ecac853d71bf02af38f0c5", "size": 1239, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/robot_arms/twolinkrobot_effector_pid_controller.py", "max_stars_repo_name": "gabrielcabana21/pyro", "max_stars_repo_head_hexsha": "a3107d7b676a0fe1afb89a18a5a63d08fe9f0998", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/robot_arms/twolinkrobot_effector_pid_controller.py", "max_issues_repo_name": "gabrielcabana21/pyro", "max_issues_repo_head_hexsha": "a3107d7b676a0fe1afb89a18a5a63d08fe9f0998", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/robot_arms/twolinkrobot_effector_pid_controller.py", "max_forks_repo_name": "gabrielcabana21/pyro", "max_forks_repo_head_hexsha": "a3107d7b676a0fe1afb89a18a5a63d08fe9f0998", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8139534884, "max_line_length": 79, "alphanum_fraction": 0.6182405165, "include": true, "reason": "import numpy", "num_tokens": 291}
|
import numpy as np
from l5kit.data import ChunkedDataset, get_agents_slice_from_frames, get_tl_faces_slice_from_frames
def insert_agent(agent: np.ndarray, frame_idx: int, dataset: ChunkedDataset) -> None:
"""Insert an agent in one frame.
Assumptions:
- the dataset has only 1 scene
- the dataset is in numpy format and not zarr anymore
:param agent: the agent info to be inserted
:param frame_idx: the frame where we want to insert the agent
:param dataset: the single-scene dataset.
"""
if not len(dataset.scenes) == 1:
raise ValueError(f"dataset should have a single scene, got {len(dataset.scenes)}")
if not isinstance(dataset.agents, np.ndarray):
raise ValueError("dataset agents should be an editable np array")
if not isinstance(dataset.frames, np.ndarray):
raise ValueError("dataset frames should be an editable np array")
if not frame_idx < len(dataset.frames):
raise ValueError(f"can't set frame {frame_idx} in dataset with len {len(dataset.frames)}")
frame = dataset.frames[frame_idx]
agents_slice = get_agents_slice_from_frames(frame)
agents_frame = dataset.agents[agents_slice]
idx_set = np.argwhere(agent["track_id"] == agents_frame["track_id"])
assert len(idx_set) in [0, 1]
if len(idx_set):
# CASE 1
# the agent is already there and we can just update it
# we set also label_probabilities from the current one to ensure it is high enough
idx_set = int(idx_set[0])
agents_frame[idx_set: idx_set + 1] = agent
else:
# CASE 2
# we need to insert the agent and move everything
dataset.agents = np.concatenate(
[dataset.agents[0: agents_slice.stop], agent, dataset.agents[agents_slice.stop:]], 0
)
# move end of the current frame and all other frames start and end
dataset.frames[frame_idx]["agent_index_interval"] += (0, 1)
dataset.frames[frame_idx + 1:]["agent_index_interval"] += 1
def disable_agents(dataset: ChunkedDataset, allowlist: np.ndarray) -> None:
"""Disable all agents in dataset except for the ones in allowlist
Assumptions:
- the dataset has only 1 scene
- the dataset is in numpy format and not zarr anymore
:param dataset: the single-scene dataset
:param allowlist: 1D np array of track_ids to keep
"""
if not len(dataset.scenes) == 1:
raise ValueError(f"dataset should have a single scene, got {len(dataset.scenes)}")
if not isinstance(dataset.agents, np.ndarray):
raise ValueError("dataset agents should be an editable np array")
if not len(allowlist.shape) == 1:
raise ValueError("allow list should be 1D")
agent_track_ids = dataset.agents["track_id"]
mask_disable = ~np.in1d(agent_track_ids, allowlist)
# this will set those agents as invisible
# we also zeroes their pose and extent
dataset.agents["centroid"][mask_disable] *= 0
dataset.agents["yaw"][mask_disable] *= 0
dataset.agents["extent"][mask_disable] *= 0
dataset.agents["label_probabilities"][mask_disable] = -1
def get_frames_subset(dataset: ChunkedDataset, frame_start_idx: int, frame_end_idx: int) -> ChunkedDataset:
"""Get a new dataset with frames between start (included) and end (excluded).
Assumptions:
- the dataset has only 1 scene
- the dataset is in numpy format and not zarr anymore
:param dataset: the single-scene dataset.
:param frame_start_idx: first frame to keep.
:param frame_end_idx: where to stop taking frames (excluded).
"""
if not len(dataset.scenes) == 1:
raise ValueError(f"dataset should have a single scene, got {len(dataset.scenes)}")
if not isinstance(dataset.agents, np.ndarray):
raise ValueError("dataset agents should be an editable np array")
if not isinstance(dataset.tl_faces, np.ndarray):
raise ValueError("dataset tls should be an editable np array")
if not isinstance(dataset.frames, np.ndarray):
raise ValueError("dataset frames should be an editable np array")
if frame_start_idx >= len(dataset.frames):
raise ValueError(f"frame start {frame_start_idx} is over the length of the dataset")
if frame_end_idx > len(dataset.frames):
raise ValueError(f"frame end {frame_end_idx} is over the length of the dataset")
if frame_start_idx >= frame_end_idx:
raise ValueError(f"end frame {frame_end_idx} should be higher than start {frame_start_idx}")
if frame_start_idx < 0:
raise ValueError(f"start frame {frame_start_idx} should be positive")
new_dataset = ChunkedDataset("")
new_dataset.scenes = dataset.scenes.copy()
new_dataset.scenes[0]["start_time"] = dataset.frames[frame_start_idx]["timestamp"]
new_dataset.scenes[0]["end_time"] = dataset.frames[frame_end_idx - 1]["timestamp"]
new_dataset.frames = dataset.frames[frame_start_idx:frame_end_idx].copy()
new_dataset.scenes[0]["frame_index_interval"] = (0, len(new_dataset.frames))
agent_slice = get_agents_slice_from_frames(*dataset.frames[[frame_start_idx, frame_end_idx - 1]])
tls_slice = get_tl_faces_slice_from_frames(*dataset.frames[[frame_start_idx, frame_end_idx - 1]])
new_dataset.frames["agent_index_interval"] -= new_dataset.frames["agent_index_interval"][0, 0]
new_dataset.frames["traffic_light_faces_index_interval"] -= new_dataset.frames[
"traffic_light_faces_index_interval"
][0, 0]
new_dataset.agents = dataset.agents[agent_slice].copy()
new_dataset.tl_faces = dataset.tl_faces[tls_slice].copy()
return new_dataset
|
{"hexsha": "3660acac918f0a8864e6a8c083a30724c537cf53", "size": 5635, "ext": "py", "lang": "Python", "max_stars_repo_path": "l5kit/l5kit/simulation/utils.py", "max_stars_repo_name": "cdicle-motional/l5kit", "max_stars_repo_head_hexsha": "4dc4ee5391479bb71f0b373f39c316f9eef5a961", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-04T17:48:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T17:48:53.000Z", "max_issues_repo_path": "l5kit/l5kit/simulation/utils.py", "max_issues_repo_name": "cdicle-motional/l5kit", "max_issues_repo_head_hexsha": "4dc4ee5391479bb71f0b373f39c316f9eef5a961", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "l5kit/l5kit/simulation/utils.py", "max_forks_repo_name": "cdicle-motional/l5kit", "max_forks_repo_head_hexsha": "4dc4ee5391479bb71f0b373f39c316f9eef5a961", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-19T08:13:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-19T08:13:46.000Z", "avg_line_length": 45.4435483871, "max_line_length": 107, "alphanum_fraction": 0.7082519965, "include": true, "reason": "import numpy", "num_tokens": 1325}
|
import pytest
import os
import cv2
import numpy as np
from plantcv.plantcv.transform import (get_color_matrix, get_matrix_m, calc_transformation_matrix, apply_transformation_matrix,
save_matrix, load_matrix, correct_color, create_color_card_mask, quick_color_check,
find_color_card)
from plantcv.plantcv import outputs
def test_get_color_matrix(transform_test_data):
"""Test for PlantCV."""
# load in target_matrix
matrix_compare = transform_test_data.load_npz(transform_test_data.target_matrix_file)
# Read in rgb_img and gray-scale mask
rgb_img = cv2.imread(transform_test_data.target_img)
mask = cv2.imread(transform_test_data.colorcard_mask, -1)
# The result should be a len(np.unique(mask))-1 x 4 matrix
_, matrix = get_color_matrix(rgb_img, mask)
assert np.array_equal(matrix, matrix_compare)
def test_get_color_matrix_img(transform_test_data):
"""Test for PlantCV."""
# Read in two gray-scale images
rgb_img = cv2.imread(transform_test_data.colorcard_mask, -1)
mask = cv2.imread(transform_test_data.colorcard_mask, -1)
# The input for rgb_img needs to be an RGB image
with pytest.raises(RuntimeError):
_, _ = get_color_matrix(rgb_img, mask)
def test_get_color_matrix_mask(transform_test_data):
"""Test for PlantCV."""
# Read in two gray-scale images
rgb_img = cv2.imread(transform_test_data.target_img)
mask = cv2.imread(transform_test_data.colorcard_mask)
# The input for rgb_img needs to be an RGB image
with pytest.raises(RuntimeError):
_, _ = get_color_matrix(rgb_img, mask)
def test_get_matrix_m(transform_test_data):
"""Test for PlantCV."""
# load in comparison matrices
matrix_compare_m = transform_test_data.load_npz(transform_test_data.matrix_m1_file)
matrix_compare_b = transform_test_data.load_npz(transform_test_data.matrix_b1_file)
# read in matrices
t_matrix = transform_test_data.load_npz(transform_test_data.target_matrix_file)
s_matrix = transform_test_data.load_npz(transform_test_data.source1_matrix_file)
# apply matrices to function
_, matrix_m, matrix_b = get_matrix_m(t_matrix, s_matrix)
matrix_compare_m = np.rint(matrix_compare_m)
matrix_compare_b = np.rint(matrix_compare_b)
matrix_m = np.rint(matrix_m)
matrix_b = np.rint(matrix_b)
assert np.array_equal(matrix_m, matrix_compare_m) and np.array_equal(matrix_b, matrix_compare_b)
def test_get_matrix_m_unequal_data(transform_test_data):
"""Test for PlantCV."""
# load in comparison matrices
matrix_compare_m = transform_test_data.load_npz(transform_test_data.matrix_m2_file)
matrix_compare_b = transform_test_data.load_npz(transform_test_data.matrix_b2_file)
# read in matrices
t_matrix = transform_test_data.load_npz(transform_test_data.target_matrix_file)
s_matrix = transform_test_data.load_npz(transform_test_data.source2_matrix_file)
# apply matrices to function
_, matrix_m, matrix_b = get_matrix_m(t_matrix, s_matrix)
matrix_compare_m = np.rint(matrix_compare_m)
matrix_compare_b = np.rint(matrix_compare_b)
matrix_m = np.rint(matrix_m)
matrix_b = np.rint(matrix_b)
assert np.array_equal(matrix_m, matrix_compare_m) and np.array_equal(matrix_b, matrix_compare_b)
def test_calc_transformation_matrix(transform_test_data):
"""Test for PlantCV."""
# load in comparison matrices
matrix_compare = transform_test_data.load_npz(transform_test_data.transformation_matrix_file)
# read in matrices
matrix_m = transform_test_data.load_npz(transform_test_data.matrix_m1_file)
matrix_b = transform_test_data.load_npz(transform_test_data.matrix_b1_file)
# apply to function
_, matrix_t = calc_transformation_matrix(matrix_m, matrix_b)
matrix_t = np.rint(matrix_t)
matrix_compare = np.rint(matrix_compare)
assert np.array_equal(matrix_t, matrix_compare)
def test_calc_transformation_matrix_b_incorrect(transform_test_data):
"""Test for PlantCV."""
# read in matrices
matrix_m = transform_test_data.load_npz(transform_test_data.matrix_m1_file)
matrix_b = transform_test_data.load_npz(transform_test_data.matrix_b1_file)
matrix_b = np.asmatrix(matrix_b, float)
with pytest.raises(RuntimeError):
_, _ = calc_transformation_matrix(matrix_m, matrix_b.T)
def test_calc_transformation_matrix_not_mult(transform_test_data):
"""Test for PlantCV."""
# read in matrices
matrix_m = transform_test_data.load_npz(transform_test_data.matrix_m1_file)
matrix_b = transform_test_data.load_npz(transform_test_data.matrix_b1_file)
with pytest.raises(RuntimeError):
_, _ = calc_transformation_matrix(matrix_m, matrix_b[:3])
def test_calc_transformation_matrix_not_mat(transform_test_data):
"""Test for PlantCV."""
# read in matrices
matrix_m = transform_test_data.load_npz(transform_test_data.matrix_m1_file)
matrix_b = transform_test_data.load_npz(transform_test_data.matrix_b1_file)
with pytest.raises(RuntimeError):
_, _ = calc_transformation_matrix(matrix_m[:, 1], matrix_b[:, 1])
def test_apply_transformation(transform_test_data):
"""Test for PlantCV."""
# load corrected image to compare
corrected_compare = cv2.imread(transform_test_data.source_corrected)
# read in matrices
matrix_t = transform_test_data.load_npz(transform_test_data.transformation_matrix_file)
# read in images
target_img = cv2.imread(transform_test_data.target_img)
source_img = cv2.imread(transform_test_data.source1_img)
corrected_img = apply_transformation_matrix(source_img, target_img, matrix_t)
# assert source and corrected have same shape
assert np.array_equal(corrected_img, corrected_compare)
def test_apply_transformation_incorrect_t(transform_test_data):
"""Test for PlantCV."""
# read in matrices
matrix_t = transform_test_data.load_npz(transform_test_data.matrix_b1_file)
# read in images
target_img = cv2.imread(transform_test_data.target_img)
source_img = cv2.imread(transform_test_data.source1_img)
with pytest.raises(RuntimeError):
_ = apply_transformation_matrix(source_img, target_img, matrix_t)
def test_apply_transformation_incorrect_img(transform_test_data):
"""Test for PlantCV."""
# read in matrices
matrix_t = transform_test_data.load_npz(transform_test_data.transformation_matrix_file)
# read in images
target_img = cv2.imread(transform_test_data.target_img)
source_img = cv2.imread(transform_test_data.colorcard_mask, -1)
with pytest.raises(RuntimeError):
_ = apply_transformation_matrix(source_img, target_img, matrix_t)
def test_save_matrix(transform_test_data, tmpdir):
"""Test for PlantCV."""
# Create a test tmp directory
cache_dir = tmpdir.mkdir("cache")
# read in matrix
matrix_t = transform_test_data.load_npz(transform_test_data.transformation_matrix_file)
# .npz filename
filename = os.path.join(cache_dir, 'test.npz')
save_matrix(matrix_t, filename)
assert os.path.exists(filename) is True
def test_save_matrix_incorrect_filename(transform_test_data):
"""Test for PlantCV."""
# read in matrix
matrix_t = transform_test_data.load_npz(transform_test_data.transformation_matrix_file)
# .npz filename
filename = "test"
with pytest.raises(RuntimeError):
save_matrix(matrix_t, filename)
def test_load_matrix(transform_test_data):
"""Test for PlantCV."""
# read in matrix_t
matrix_t = transform_test_data.load_npz(transform_test_data.transformation_matrix_file)
# test load function with matrix_t
matrix_t_loaded = load_matrix(transform_test_data.transformation_matrix_file)
assert np.array_equal(matrix_t, matrix_t_loaded)
def test_correct_color(transform_test_data, tmpdir):
"""Test for PlantCV."""
# Create a test tmp directory
cache_dir = tmpdir.mkdir("cache")
# load corrected image to compare
corrected_compare = cv2.imread(transform_test_data.source_corrected)
# Read in target, source, and gray-scale mask
target_img = cv2.imread(transform_test_data.target_img)
source_img = cv2.imread(transform_test_data.source1_img)
mask = cv2.imread(transform_test_data.colorcard_mask, -1)
_, _, _, corrected_img = correct_color(target_img, mask, source_img, mask, cache_dir)
# assert source and corrected have same shape
assert all([np.array_equal(corrected_img, corrected_compare),
os.path.exists(os.path.join(cache_dir, "target_matrix.npz")) is True,
os.path.exists(os.path.join(cache_dir, "source_matrix.npz")) is True,
os.path.exists(os.path.join(cache_dir, "transformation_matrix.npz")) is True])
def test_correct_color_output_dne(transform_test_data, tmpdir):
"""Test for PlantCV."""
# Create a test tmp directory
tmp_dir = tmpdir.mkdir("cache")
cache_dir = os.path.join(tmp_dir, "outputs")
# load corrected image to compare
corrected_compare = cv2.imread(transform_test_data.source_corrected)
# Read in target, source, and gray-scale mask
target_img = cv2.imread(transform_test_data.target_img)
source_img = cv2.imread(transform_test_data.source1_img)
mask = cv2.imread(transform_test_data.colorcard_mask, -1)
_, _, _, corrected_img = correct_color(target_img, mask, source_img, mask, cache_dir)
# assert source and corrected have same shape
assert all([np.array_equal(corrected_img, corrected_compare),
os.path.exists(os.path.join(cache_dir, "target_matrix.npz")) is True,
os.path.exists(os.path.join(cache_dir, "source_matrix.npz")) is True,
os.path.exists(os.path.join(cache_dir, "transformation_matrix.npz")) is True])
def test_create_color_card_mask(transform_test_data):
"""Test for PlantCV."""
# Load target image
rgb_img = cv2.imread(transform_test_data.target_img)
mask = create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=(166, 166), spacing=(21, 21), nrows=6, ncols=4,
exclude=[20, 0])
assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110,
120, 130, 140, 150, 160, 170, 180, 190, 200, 210,
220], dtype=np.uint8)))
def test_quick_color_check(transform_test_data):
"""Test for PlantCV."""
# Load target image
target_matrix = transform_test_data.load_npz(transform_test_data.target_matrix_file)
source_matrix = transform_test_data.load_npz(transform_test_data.source1_matrix_file)
quick_color_check(target_matrix, source_matrix, num_chips=22)
assert True
def test_find_color_card(transform_test_data):
"""Test for PlantCV."""
# Load rgb image
rgb_img = cv2.imread(transform_test_data.target_img)
_, start, space = find_color_card(rgb_img=rgb_img, threshold_type='adaptgauss', blurry=False, threshvalue=90)
assert start == (210, 212) and space == (8, 8)
def test_find_color_card_optional_parameters(transform_test_data):
"""Test for PlantCV."""
# Clear previous outputs
outputs.clear()
# Load rgb image
rgb_img = cv2.imread(transform_test_data.colorcard_img)
# Test with threshold ='normal'
_, _, _ = find_color_card(rgb_img=rgb_img, threshold_type='normal', blurry=True, background='light',
threshvalue=90, label="prefix")
assert int(outputs.observations["prefix"]["color_chip_size"]["value"]) == 15626
def test_find_color_card_otsu(transform_test_data):
"""Test for PlantCV."""
# Clear previous outputs
outputs.clear()
# Load rgb image
rgb_img = cv2.imread(transform_test_data.colorcard_img)
# Test with threshold ='normal'
_, _, _ = find_color_card(rgb_img=rgb_img, threshold_type='otsu', blurry=True, background='light',
threshvalue=90, label="prefix")
assert int(outputs.observations["prefix"]["color_chip_size"]["value"]) == 15132
def test_find_color_card_optional_size_parameters(transform_test_data):
"""Test for PlantCV."""
# Clear previous outputs
outputs.clear()
# Load rgb image
rgb_img = cv2.imread(transform_test_data.colorcard_img)
_, _, _ = find_color_card(rgb_img=rgb_img, record_chip_size="mean")
assert int(outputs.observations["default"]["color_chip_size"]["value"]) == 15515
def test_find_color_card_optional_size_parameters_none(transform_test_data):
"""Test for PlantCV."""
# Clear previous outputs
outputs.clear()
# Load rgb image
rgb_img = cv2.imread(transform_test_data.colorcard_img)
_, _, _ = find_color_card(rgb_img=rgb_img, record_chip_size=None)
assert outputs.observations.get("default") is None
def test_find_color_card_bad_record_chip_size(transform_test_data):
"""Test for PlantCV."""
# Clear previous outputs
outputs.clear()
# Load rgb image
rgb_img = cv2.imread(transform_test_data.target_img)
_, _, _ = find_color_card(rgb_img=rgb_img, record_chip_size='averageeeed')
assert outputs.observations["default"]["color_chip_size"]["value"] is None
def test_find_color_card_bad_thresh_input(transform_test_data):
"""Test for PlantCV."""
# Load rgb image
rgb_img = cv2.imread(transform_test_data.target_img)
with pytest.raises(RuntimeError):
_, _, _ = find_color_card(rgb_img=rgb_img, threshold_type='gaussian')
def test_find_color_card_bad_background_input(transform_test_data):
"""Test for PlantCV."""
# Load rgb image
rgb_img = cv2.imread(transform_test_data.target_img)
with pytest.raises(RuntimeError):
_, _, _ = find_color_card(rgb_img=rgb_img, background='lite')
def test_find_color_card_none_found(transform_test_data):
"""Test for PlantCV."""
# Load rgb image
rgb_img = cv2.imread(transform_test_data.target_img)
with pytest.raises(RuntimeError):
_, _, _ = find_color_card(rgb_img=rgb_img, threshold_type="otsu")
|
{"hexsha": "48a8adca689ae7b1e9f6c39f927c5822193a31cd", "size": 14190, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/plantcv/transform/test_color_correction.py", "max_stars_repo_name": "ygarrot/plantcv", "max_stars_repo_head_hexsha": "e934a891e0d1bf8987ca6a9f982a4ac1f420bfe7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-03T12:08:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T12:08:59.000Z", "max_issues_repo_path": "tests/plantcv/transform/test_color_correction.py", "max_issues_repo_name": "HUISTENCOFFEE/plantcv", "max_issues_repo_head_hexsha": "f38f7de53663522eb770870b70823d5fc46d0c0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/plantcv/transform/test_color_correction.py", "max_forks_repo_name": "HUISTENCOFFEE/plantcv", "max_forks_repo_head_hexsha": "f38f7de53663522eb770870b70823d5fc46d0c0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3944954128, "max_line_length": 127, "alphanum_fraction": 0.736222692, "include": true, "reason": "import numpy", "num_tokens": 3215}
|
[STATEMENT]
lemma master1_automation:
assumes "g \<in> O(MASTER_BOUND'' p')" "1 < (\<Sum>i<k. as ! i * bs ! i powr p')"
"eventually (\<lambda>x. f x > 0) at_top"
shows "f \<in> \<Theta>(MASTER_BOUND p 0 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f \<in> \<Theta>(MASTER_BOUND p 0 0)
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. f \<in> \<Theta>(MASTER_BOUND p 0 0)
[PROOF STEP]
have A: "MASTER_BOUND p 0 0 \<in> \<Theta>(\<lambda>x::nat. x powr p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. MASTER_BOUND p 0 0 \<in> \<Theta>(\<lambda>x. real x powr p)
[PROOF STEP]
unfolding MASTER_BOUND_def[abs_def]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. real x powr p * ln (real x) powr 0 * ln (ln (real x)) powr 0) \<in> \<Theta>(\<lambda>x. real x powr p)
[PROOF STEP]
by (intro landau_real_nat_transfer bigthetaI_cong
eventually_mono[OF eventually_ge_at_top[of "3::real"]]) (auto dest!: ln_1_imp_less_3)
[PROOF STATE]
proof (state)
this:
MASTER_BOUND p 0 0 \<in> \<Theta>(\<lambda>x. real x powr p)
goal (1 subgoal):
1. f \<in> \<Theta>(MASTER_BOUND p 0 0)
[PROOF STEP]
have B: "O(MASTER_BOUND'' p') = O(\<lambda>x::nat. real x powr p')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. O(MASTER_BOUND'' p') = O(\<lambda>x. real x powr p')
[PROOF STEP]
using eventually_ge_at_top[of "2::nat"]
[PROOF STATE]
proof (prove)
using this:
eventually ((\<le>) 2) sequentially
goal (1 subgoal):
1. O(MASTER_BOUND'' p') = O(\<lambda>x. real x powr p')
[PROOF STEP]
by (intro landau_o.big.cong) (auto elim!: eventually_mono simp: MASTER_BOUND''_def)
[PROOF STATE]
proof (state)
this:
O(MASTER_BOUND'' p') = O(\<lambda>x. real x powr p')
goal (1 subgoal):
1. f \<in> \<Theta>(MASTER_BOUND p 0 0)
[PROOF STEP]
from landau_theta.cong_bigtheta[OF A] B assms(1) master1[OF _ assms(2-)]
[PROOF STATE]
proof (chain)
picking this:
\<Theta>(MASTER_BOUND p 0 0) = \<Theta>(\<lambda>x. real x powr p)
O(MASTER_BOUND'' p') = O(\<lambda>x. real x powr p')
g \<in> O(MASTER_BOUND'' p')
g \<in> O(\<lambda>x. real x powr p') \<Longrightarrow> f \<in> \<Theta>(\<lambda>x. real x powr p)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<Theta>(MASTER_BOUND p 0 0) = \<Theta>(\<lambda>x. real x powr p)
O(MASTER_BOUND'' p') = O(\<lambda>x. real x powr p')
g \<in> O(MASTER_BOUND'' p')
g \<in> O(\<lambda>x. real x powr p') \<Longrightarrow> f \<in> \<Theta>(\<lambda>x. real x powr p)
goal (1 subgoal):
1. f \<in> \<Theta>(MASTER_BOUND p 0 0)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
f \<in> \<Theta>(MASTER_BOUND p 0 0)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1242, "file": "Akra_Bazzi_Akra_Bazzi_Method", "length": 11}
|
'''Implementations of uniform distributions.'''
import numpy as np
from astropy import units
from astropy.coordinates import SkyCoord
TWO_PI = 2*np.pi
@units.quantity_input(area=units.sr)
def uniform_around(centre, area, size):
'''Uniform distribution of points around location.
Draws randomly distributed points from a circular region of the given area
around the centre point.
Parameters
----------
centre : `~astropy.coordinates.SkyCoord`
Centre of the sampling region.
area : `~astropy.units.Quantity`
Area of the sampling region as a `~astropy.units.Quantity` in units of
solid angle.
size : int
Number of points to draw.
Returns
-------
coords : `~astropy.coordinates.SkyCoord`
Randomly distributed points around the centre. The coordinates are
returned in the same frame as the input.
Examples
--------
See :ref:`User Documentation <skypy.position.uniform_around>`.
'''
# get cosine of maximum separation from area
cos_theta_max = 1 - area.to_value(units.sr)/TWO_PI
# randomly sample points within separation
theta = np.arccos(np.random.uniform(cos_theta_max, 1, size=size))
phi = np.random.uniform(0, TWO_PI, size=size)
# construct random sky coordinates around centre
return centre.directional_offset_by(phi, theta)
def uniform_in_pixel(nside, ipix, size, nest=False):
'''Uniform distribution of points over healpix pixel.
Draws randomly distributed points from the healpix pixel `ipix` for a map
with a given `nside` parameter.
Parameters
----------
nside : int
Healpix map `nside` parameter.
ipix : int
Healpix map pixel index.
size : int
Number of points to draw.
nest : bool, optional
If True assume ``NESTED`` pixel ordering, otherwise ``RING`` pixel
ordering. Default is ``RING`` pixel ordering.
Returns
-------
coords : `~astropy.coordinates.SkyCoord`
Randomly distributed points over the healpix pixel.
Warnings
--------
This function requires the ``healpy`` package.
Examples
--------
See :ref:`User Documentation <skypy.position.uniform_in_pixel>`.
'''
from healpy import pix2ang, max_pixrad, nside2pixarea, ang2pix
# get the centre of the healpix pixel as a SkyCoord
centre_lon, centre_lat = pix2ang(nside, ipix, nest=nest, lonlat=True)
centre = SkyCoord(centre_lon, centre_lat, unit=units.deg)
# get the maximum radius of a healpix pixel in radian
r = max_pixrad(nside)
# use that radius as the aperture of a spherical area in steradian
area = TWO_PI*(1 - np.cos(r))*units.sr
# oversampling factor = 1/(probability of the draw)
over = area.value/nside2pixarea(nside)
# the array of longitudes and latitudes of the sample
lon, lat = np.empty(0), np.empty(0)
# rejection sampling over irregularly shaped healpix pixels
miss = size
while miss > 0:
# get the coordinates in a circular aperture around centre
sample = uniform_around(centre, area, int(np.ceil(miss*over)))
# get longitude and latitude of the sample
sample_lon, sample_lat = sample.ra.deg, sample.dec.deg
# accept those positions that are inside the correct pixel
accept = ipix == ang2pix(nside, sample_lon, sample_lat, nest=nest, lonlat=True)
# store the new positions
lon = np.append(lon, np.extract(accept, sample_lon))
lat = np.append(lat, np.extract(accept, sample_lat))
miss = size - len(lon)
# construct the coordinates
return SkyCoord(lon[:size], lat[:size], unit=units.deg)
|
{"hexsha": "661d15d4e2613694bb21ccad12a3f7ce431621c7", "size": 3703, "ext": "py", "lang": "Python", "max_stars_repo_path": "skypy/position/_uniform.py", "max_stars_repo_name": "ArthurTolley/skypy", "max_stars_repo_head_hexsha": "5621877ada75c667b1af7e665b02a91026f7ef0f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skypy/position/_uniform.py", "max_issues_repo_name": "ArthurTolley/skypy", "max_issues_repo_head_hexsha": "5621877ada75c667b1af7e665b02a91026f7ef0f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skypy/position/_uniform.py", "max_forks_repo_name": "ArthurTolley/skypy", "max_forks_repo_head_hexsha": "5621877ada75c667b1af7e665b02a91026f7ef0f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8583333333, "max_line_length": 87, "alphanum_fraction": 0.6718876587, "include": true, "reason": "import numpy,from astropy", "num_tokens": 892}
|
ouRevolution is a current publication of AS Papers which caters to the interest of AfricanAmericans African American students at UC Davis. Its Chief Editor is Alyssa Munson.
Mission Statement
ouRevolution Our Voice is an AS PAPERS publication. We place special emphasis on the unique needs of the African American and African community and commit to display the talents, opinions, and concerns thereof. We recognize that there are many injustices and inequalities which manifest themselves in the daily lives of the Black Student. We also recognize that history has proven that those with resources place themselves in a position to strengthen their society. OROV strives to replace ignorance with knowledge, racism with acceptance, and limits with opportunity. Through the written word we seek to empower the existence of Black students at UCD, and share with the greater community our experience.
|
{"hexsha": "43561e430736c1f2ccab254379f45d0ef47d6c62", "size": 907, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/ouRevolution.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/ouRevolution.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/ouRevolution.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 113.375, "max_line_length": 709, "alphanum_fraction": 0.8235942668, "num_tokens": 171}
|
[STATEMENT]
lemma eval_fps_0 [simp]:
"eval_fps (0 :: 'a :: {banach, real_normed_div_algebra} fps) z = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. eval_fps 0 z = (0::'a)
[PROOF STEP]
by (simp only: fps_const_0_eq_0 [symmetric] eval_fps_const)
|
{"llama_tokens": 122, "file": null, "length": 1}
|
"""
Get candidate object boxes (both GT and candidates) to provide to detectron to compute appearance/object scores
The way we have implemented this :
1. Run the object detector to get candidate object (we used Detectron)
2. Create database object merging candidate detections and groundtruth
3. Forward the bounding boxes into the object detector to get appearance features for both detections and GT (that we use at training)
This script allows you to build the database object and create a file "proposals.pkl" that you can use as candidate proposals
"""
import _init_
import numpy as np
import cPickle as pickle
import os.path as osp
DATA_PATH = '/sequoia/data2/jpeyre/iccv19_final/datasets'
data_name = 'hico'
if data_name=='hico':
from datasets.hico_api import Hico as Dataset
splits = ['trainval','train','val','test']
elif data_name=='hicoforcocoa':
from datasets.hico_api import Hico as Dataset
splits = ['trainval','test']
elif data_name=='cocoa':
from datasets.cocoa_api import Cocoa as Dataset
splits = ['all']
data_path = osp.join(DATA_PATH, data_name)
image_path = osp.join(data_path, 'images')
cand_dir = osp.join(data_path, 'detections')
proposals = {}
for split in splits:
dataset = Dataset(data_path, image_path, split, cand_dir=cand_dir,\
thresh_file='', use_gt=False, add_gt=True, train_mode=False, jittering=False, store_ram=[])
for im_id in dataset.image_ids:
cand_boxes = dataset.get_boxes(im_id)
obj_id = dataset.get_obj_id(im_id)
proposals[im_id] = np.hstack((obj_id[:,None], cand_boxes))
assert len(np.unique(obj_id))==len(obj_id), 'Careful duplicate obj_id'
pickle.dump(proposals, open(osp.join(cand_dir, split + '_proposals.pkl'),'wb'))
|
{"hexsha": "d464943e00cc7b0c6c797bb743cb55cee46a0682", "size": 1788, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/compute_candidate_boxes.py", "max_stars_repo_name": "doulemint/analogy", "max_stars_repo_head_hexsha": "75d17812080fde74c9032fb338ef0c6ab2667f44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/compute_candidate_boxes.py", "max_issues_repo_name": "doulemint/analogy", "max_issues_repo_head_hexsha": "75d17812080fde74c9032fb338ef0c6ab2667f44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/compute_candidate_boxes.py", "max_forks_repo_name": "doulemint/analogy", "max_forks_repo_head_hexsha": "75d17812080fde74c9032fb338ef0c6ab2667f44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5090909091, "max_line_length": 134, "alphanum_fraction": 0.7220357942, "include": true, "reason": "import numpy", "num_tokens": 430}
|
#Generacion de la Linea de Muerte para la UBA año 2020
#Solo necesita 32GB de memoria RAM , 8 vCPU y una hora para correr
#limpio la memoria
rm( list=ls() ) #remove all objects
gc() #garbage collection
require("data.table")
require("lightgbm")
require("DiceKriging")
require("mlrMBO")
#en estos archivos queda el resultado
kbayesiana <- paste0("~/buckets/b2/opt_bayesiana_lgbm/linea_de_muerte.RDATA")
kBO_iter <- 10 #cantidad de iteraciones de la Optimizacion Bayesiana
#------------------------------------------------------------------------------
#esta es la funcion de ganancia, que se busca optimizar
#se usa internamente a LightGBM
#se calcula internamente la mejor ganancia para todos los puntos de corte posibles
fganancia_logistic_lightgbm <- function(probs, data)
{
vlabels <- getinfo(data, "label")
tbl <- as.data.table( list( "prob"=probs, "gan"= ifelse( vlabels==1, 29250, -750 ) ) )
setorder( tbl, -prob )
tbl[ , gan_acum := cumsum( gan ) ]
gan <- max( tbl$gan_acum )
return( list( name= "ganancia", value= gan, higher_better= TRUE ) )
}
#------------------------------------------------------------------------------
#funcion que va a optimizar la Bayesian Optimization
estimar_lightgbm <- function( x )
{
modelo <- lgb.train(data= dBO_train,
objective= "binary", #la clase es binaria
eval= fganancia_logistic_lightgbm, #esta es la fuciona optimizar
valids= list( valid1= dBO_test1 ),
first_metric_only= TRUE,
metric= "custom", #ATENCION tremendamente importante
num_iterations= 999999, #un numero muy grande
early_stopping_rounds= 200,
min_data_in_leaf= as.integer( x$pmin_data_in_leaf ),
feature_fraction= 0.25,
learning_rate= 0.02,
feature_pre_filter= FALSE,
verbose= -1,
seed= 102191
)
ganancia1 <- unlist(modelo$record_evals$valid1$ganancia$eval)[ modelo$best_iter ]
#esta es la forma de devolver un parametro extra
attr(ganancia1 ,"extras" ) <- list("pnum_iterations"= modelo$best_iter )
cat( modelo$best_iter, ganancia1, "\n" )
return( ganancia1 )
}
#------------------------------------------------------------------------------
#Aqui comienza el programa
dataset <- fread("~/buckets/b1/datasets/fe_exthist.txt.gz")
campos_lags <- setdiff( colnames(dataset) , c("clase_ternaria","clase01", "numero_de_cliente","foto_mes") )
#agreglo los lags de orden 1
setorderv( dataset, c("numero_de_cliente","foto_mes") )
dataset[, paste0( campos_lags, "_lag1") :=shift(.SD, 1, NA, "lag"), by=numero_de_cliente, .SDcols= campos_lags]
#agrego los deltas de los lags, de una forma nada elegante
for( vcol in campos_lags )
{
dataset[, paste0(vcol, "_delta1") := get( vcol) - get(paste0( vcol, "_lag1"))]
}
#paso la clase a binaria que tome valores {0,1} enteros
dataset[ , clase01 := ifelse( clase_ternaria=="BAJA+2", 1L, 0L) ]
#los campos que se van a utilizar, intencionalmente no uso numero_de_cliente
campos_buenos <- setdiff( colnames(dataset) , c("clase_ternaria","clase01","numero_de_cliente") )
#hago undersampling de los negativos
#me quedo con TODOS los positivos, pero con solo el 5% de los negativos
set.seed(102191)
dataset[ , azar:= runif( nrow(dataset) ) ]
dataset[ ( foto_mes>=201701 & foto_mes<=202003 & foto_mes!=201912 & ( clase01==1 | azar<=0.05) ), BO_train := 1L]
#Testeo en 201902, el mismo mes pero un año antes
dataset[ foto_mes==201912, BO_test1 := 1L]
#dejo los datos en el formato que necesita LightGBM
dBO_train <- lgb.Dataset( data = data.matrix( dataset[ BO_train==1, campos_buenos, with=FALSE]),
label = dataset[ BO_train==1, clase01],
free_raw_data = TRUE
)
dBO_test1 <- lgb.Dataset( data = data.matrix( dataset[ BO_test1==1, campos_buenos, with=FALSE]),
label = dataset[ BO_test1==1, clase01],
free_raw_data = TRUE
)
dataset_aplicacion <- copy( dataset[ foto_mes==202005, ] )
#libero la memoria borrando el dataset
rm(dataset)
gc()
#Aqui comienza la configuracion de la Bayesian Optimization
configureMlr(show.learner.output = FALSE)
#configuro la busqueda bayesiana, los hiperparametros que se van a optimizar
#por favor, no desesperarse por lo complejo
obj.fun <- makeSingleObjectiveFunction(
name = "OptimBayesiana", #un nombre que no tiene importancia
fn = estimar_lightgbm, #aqui va la funcion que quiero optimizar
minimize= FALSE, #quiero maximizar la ganancia
par.set = makeParamSet(
makeNumericParam("pmin_data_in_leaf", lower= 10, upper= 30000 )
),
has.simple.signature = FALSE, #porque le pase los parametros con makeParamSet
noisy= TRUE
)
ctrl <- makeMBOControl( save.on.disk.at.time = 600, save.file.path = kbayesiana )
ctrl <- setMBOControlTermination(ctrl, iters = kBO_iter )
ctrl <- setMBOControlInfill(ctrl, crit = makeMBOInfillCritEI())
surr.km <- makeLearner("regr.km", predict.type= "se", covtype= "matern3_2", control = list(trace = FALSE))
if(!file.exists(kbayesiana))
{
#lanzo la busqueda bayesiana
run <- mbo(obj.fun, learner = surr.km, control = ctrl)
} else {
#retoma el procesamiento en donde lo dejo
run <- mboContinue( kbayesiana )
}
#En run$x$pmin_data_in_leaf ha quedo el optimo
#------------------------------------------------------------------------------
#calculo el modelo final
modelo_final <- lgb.train(data= dBO_train,
objective= "binary",
eval= fganancia_logistic_lightgbm,
valids= list( valid1= dBO_test1 ),
first_metric_only= TRUE,
metric= "custom",
num_iterations= 999999,
early_stopping_rounds= 200,
learning_rate= 0.02, #ATENCION, este es el valor que se cambia
min_data_in_leaf= as.integer( run$x$pmin_data_in_leaf ),
feature_pre_filter= FALSE,
feature_fraction= 0.25,
verbose= -1,
seed= 102191
)
#Genero los archivos que voy a probar contra el Leaderboard Publico y quedarme con el mejor
prediccion_202005 <- predict( modelo_final, data.matrix( dataset_aplicacion[ , campos_buenos, with=FALSE]))
#Genero posibles probabilidades de corte, tener en
for( vprob_corte in (25:25)/100 ) #de 0.15 a 0.35
{
entrega <- as.data.table( list( "numero_de_cliente"= dataset_aplicacion[ , numero_de_cliente],
"estimulo"= (prediccion_202005> vprob_corte) ) )
#genero el archivo de salida
fwrite( entrega, logical01=TRUE, sep=",", file= paste0("~/buckets/b1/work/BORRADOR_lineademuerte_04_", vprob_corte*100, ".csv") )
data = data.table('numero_de_cliente' = dataset_aplicacion[, numero_de_cliente], 'prob' = prediccion_202005)
fwrite(data, sep = ',', file = paste0("~/buckets/b1/work/BORRADOR_lineademuerte_04.probs"))
}
#Se deben probar con el metodo de busqueda binaria contra el leaderboard publico las salidas, y quedarse con el mejor
|
{"hexsha": "409183875a6811d8f62be8931ac1a03143c2d3b0", "size": 7561, "ext": "r", "lang": "R", "max_stars_repo_path": "scripts/lineademuerte.r", "max_stars_repo_name": "miglesias91/dmeyf", "max_stars_repo_head_hexsha": "6b73adacd2f23644b8a14efd784d038c5ec79157", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/lineademuerte.r", "max_issues_repo_name": "miglesias91/dmeyf", "max_issues_repo_head_hexsha": "6b73adacd2f23644b8a14efd784d038c5ec79157", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/lineademuerte.r", "max_forks_repo_name": "miglesias91/dmeyf", "max_forks_repo_head_hexsha": "6b73adacd2f23644b8a14efd784d038c5ec79157", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0923913043, "max_line_length": 132, "alphanum_fraction": 0.6078561037, "num_tokens": 2040}
|
# from sklearn.tree import DecisionTreeClassifier
# import pickle
# import numpy as np
# def resume_clf(train_object):
# """
# :param train_object: List-like object with training data as values
# :return: 1 in case of any error, 0 otherwise.
# """
# try:
# X_train = np.array(train_object.keys())
# y_train = np.array([train_object[key] for key in train_object.keys()])
# clf = pickle.load('classifier.dat')
# clf.fit(X_train, y_train)
# pickle.dump(clf, 'classifier.dat')
# return 0
# except Exception as err:
# return 1
# Code to sync commits with deployment. Doing the needful.
|
{"hexsha": "d75189628cc3dba411a8c7341151f3b0bca0198b", "size": 682, "ext": "py", "lang": "Python", "max_stars_repo_path": "api/lib/algorithms.py", "max_stars_repo_name": "roshnet/peoplestat-api", "max_stars_repo_head_hexsha": "cf17a40def4dc2f094239870a09086c3f3a9eea5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "api/lib/algorithms.py", "max_issues_repo_name": "roshnet/peoplestat-api", "max_issues_repo_head_hexsha": "cf17a40def4dc2f094239870a09086c3f3a9eea5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "api/lib/algorithms.py", "max_forks_repo_name": "roshnet/peoplestat-api", "max_forks_repo_head_hexsha": "cf17a40def4dc2f094239870a09086c3f3a9eea5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0, "max_line_length": 80, "alphanum_fraction": 0.6173020528, "include": true, "reason": "import numpy", "num_tokens": 165}
|
module Issue252 where
data I : Set where
zero : I
data D : I → Set where
c : ∀ i → D i → D i
id : I → I
id i = i
index : ∀ i → D i → I
index i _ = i
foo : ∀ i → D i → D zero
foo .i (c i d) with id i
foo ._ (c i d) | zero = d
bar : ∀ i → D i → D zero
bar .i (c i d) with index i d
bar ._ (c i d) | zero = d
-- In the context of the first goal d has type D i′, in the second it
-- has type D i. Well, not any more.
|
{"hexsha": "23f0bf5417b640cc0809fe15968f0279e8acb970", "size": 424, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/succeed/Issue252.agda", "max_stars_repo_name": "larrytheliquid/agda", "max_stars_repo_head_hexsha": "477c8c37f948e6038b773409358fd8f38395f827", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-10-10T17:08:44.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-10T17:08:44.000Z", "max_issues_repo_path": "test/succeed/Issue252.agda", "max_issues_repo_name": "masondesu/agda", "max_issues_repo_head_hexsha": "70c8a575c46f6a568c7518150a1a64fcd03aa437", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/succeed/Issue252.agda", "max_forks_repo_name": "masondesu/agda", "max_forks_repo_head_hexsha": "70c8a575c46f6a568c7518150a1a64fcd03aa437", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-12T11:35:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T11:35:18.000Z", "avg_line_length": 16.96, "max_line_length": 69, "alphanum_fraction": 0.5471698113, "num_tokens": 166}
|
__author__ = 'INVESTIGACION'
import numpy as np
from copy import deepcopy
import math
def getHeuristic(matrix, pesos):
"""
Vamos a utilizar Cj/Pj donde Pi se obtiene por el numero de filas que cubre la columna
:param matrix:
:param pesos:
:return:
"""
lHeuristic = np.zeros((len(pesos),2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica
for i in range(0,len(pesos)):
lHeuristic[i,0] = int(i)
#print i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i]))
lHeuristic[i,1] = float(pesos[i]/sum(matrix[:,i]))
#lHeuristic[lHeuristic[:,1].argsort()]
return lHeuristic[lHeuristic[:,1].argsort()]
def getRowHeuristics(matrix):
"""
Para cada fila, calculamos como es cubierta y obtenermos 1/Cubrimiento. Mientras menos cubrimiento mas importante es
:param matrix:
:return:
"""
row, col = matrix.shape
rHeuristic = np.zeros((row,2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica
for i in range(0,row):
rHeuristic[i,0] = int(i)
#print (i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i])))
rHeuristic[i,1] = 1/sum(matrix[i,:])
return rHeuristic[rHeuristic[:,1].argsort()]
def getRowColumn(matrix):
#Corresponde a un diccionario que tiene las columnas asociadas a una Fila
nrow, ncol = matrix.shape
dict = {}
for i in range(0,nrow):
list = []
for j in range(0,ncol):
if matrix[i,j]==1:
list.append(j)
dict[i] = deepcopy(list)
return dict
def getColumnRow(matrix):
#Corresponde a un diccionario que tiene las columnas asociadas a una Fila
nrow, ncol = matrix.shape
dictCol = {}
for j in range(0,ncol):
list = []
for i in range(0,nrow):
if matrix[i,j]==1:
list.append(i)
dictCol[j] = deepcopy(list)
return dictCol
def getProposedRows(uRows,rHeuristic,lparam):
"""
:param uRows: Uncovered rows
:param rHeuristic: Rows Heuristic
:param lparam: Number of rows proposed
:return: pRows proposed rows
"""
pRows = []
contador = 1
if len(uRows) < lparam:
pRows = uRows
else:
while len(pRows) < lparam:
if rHeuristic[len(rHeuristic)-contador,0] in uRows:
pRows.append(rHeuristic[len(rHeuristic)-contador,0])
contador = contador + 1
if contador > len(rHeuristic):
break
return pRows
def getProposedColumns(uColumns, cHeuristic,lparam):
"""
:param uRows: Uncovered rows
:param rHeuristic: Rows Heuristic
:param lparam: Number of rows proposed
:return: pRows proposed rows
"""
pColumns = []
contador = 0
#print 'Cuantas columnas propuestas', len(uColumns)
while len(pColumns) < lparam:
#print uColumns
if cHeuristic[contador,0] in uColumns:
pColumns.append(cHeuristic[contador,0])
if contador == len(cHeuristic)-1:
break
contador = contador + 1
return pColumns
def getProposedColumnsNew(uColumns, dictcHeuristics ,lparam):
"""
:param uRows: Uncovered rows
:param rHeuristic: Rows Heuristic
:param lparam: Number of rows proposed
:return: pRows proposed rows
"""
pColumns = []
tColumns = np.zeros((len(uColumns),2))
contador = 0
#print 'Cuantas columnas propuestas', len(uColumns)
for i in range(0,len(uColumns)):
tColumns[i,0] = uColumns[i]
tColumns[i,1] = dictcHeuristics[uColumns[i]]
return tColumns[tColumns[:,1].argsort()][0:lparam,0]
def getProposedColumnsDict(uColumns,dictcHeuristics,lparam):
pColumns = []
tColumns = np.zeros((len(uColumns),2))
for i in range(0,len(uColumns)):
tColumns[i,0] = uColumns[i]
tColumns[i,1] = dictcHeuristics[uColumns[i]]
tColumns = tColumns[tColumns[:,1].argsort()]
largo = min(lparam, len(tColumns[:,0]))
for i in range(0,largo):
pColumns.append(tColumns[i,0])
return pColumns
def getColumnsDict(cHeuristic):
dictcHeuristics = {}
for i in range(0,len(cHeuristic)):
dictcHeuristics[cHeuristic[i,0]] = cHeuristic[i,1]
return dictcHeuristics
def diff(A,B):
C = set(A) -set(B)
return list(C)
def Calcula_Measure_j(Option, Pesos,j, K_j):
"""
:param Option: Identify the Measure 0 Cost, 1 Normalize Cost,
:param Pesos: Is a variable in the measure calculus
:param Matrix: Column by row information
:param j: Column used for the calculus
:return: The measure
"""
if Option==0:
Measure = Pesos[j]
elif Option==1:
Measure = Pesos[j]/K_j
elif Option==2:
Measure = (Pesos[j]/math.log(K_j,2))
return Measure
def SeleccionaColumna(Matrix,S,cHeuristic):
row, col = Matrix.shape
columnTot = range(0,col)
columnComplement = diff(columnTot,S)
estado = 0
i = 0
while estado == 0:
if cHeuristic[i,0] in columnComplement:
column = cHeuristic[i,0]
estado = 1
i = i + 1
return column
def SeleccionaColumna1(S,cHeuristic):
estado = 0
i = 0
while estado == 0:
if cHeuristic[i,0] not in S:
column = cHeuristic[i,0]
estado = 1
i = i + 1
return column
def SeleccionaColumna6(Pesos, Matrix, R,S):
"""
:param Pesos: Is a variable in the measure calculus
:param Matrix: Column by row information
:param R: Uncovered Row
:param S: Column in solution
"""
NumberCalculus = 2
T = 1 # start choice
Option1 = np.random.randint(0,9)
#Option = np.random.randint(2)
Option = 1
#Choice = np.random.randint(0,T)
rows, cols = Matrix.shape
compl = range(0,cols)
columnComplement = list(set(compl)-set(S))
Matrix_F = Matrix[R,:]
Matrix_F = Matrix_F[:,columnComplement]
rowF, colF = Matrix_F.shape
#print rowF, colF
ColumnWeight = np.zeros((colF,NumberCalculus))
Cont = 0
for i in range(0,colF):
ColumnWeight[Cont,0] = columnComplement[i]
K_i = np.sum(Matrix_F[:,i])
if K_i > 0:
ColumnWeight[Cont,1] = Calcula_Measure_j(Option,Pesos,columnComplement[i],K_i)
else:
ColumnWeight[Cont,1] = Pesos[columnComplement[i]]*100
Cont = Cont + 1
ColumnWeight = ColumnWeight[ColumnWeight[:,1].argsort()]
# We need to get the S complement
if Option1 == 0:
#print tam, Option1, len(ColumnWeight)
tam = min(len(ColumnWeight),10)
#print 'El largo', len(ColumnWeight)
if tam == 1:
column = int(ColumnWeight[0,0])
else:
column = int(ColumnWeight[np.random.randint(1,tam),0])
else:
column = int(ColumnWeight[0,0])
#print 'La columna', column
return column
def SeleccionaColumnaNueva(Pesos, Matrix, pRows,pColumns):
"""
:param Pesos: Is a variable in the measure calculus
:param Matrix: Column by row information
:param R: Uncovered Row
:param S: Column in solution
"""
NumberCalculus = 2
T = 1 # start choice
Option = np.random.randint(2)
#Choice = np.random.randint(0,T)
row, col = Matrix.shape
#print 'El largo de las columnas antes', len(pColumns)
columnComplement = list(set(pColumns).intersection(range(0,col)))
#print 'El largo de las columnas ', len(columnComplement), pColumns
Matrix_F = Matrix[pRows,:]
Matrix_F = Matrix_F[:,columnComplement]
rowF, colF = Matrix_F.shape
ColumnWeight = np.zeros((colF,NumberCalculus))
Cont = 0
for i in range(0,colF):
ColumnWeight[Cont,0] = columnComplement[i]
K_i = np.sum(Matrix_F[:,i])
if K_i > 0:
ColumnWeight[Cont,1] = Calcula_Measure_j(Option,Pesos,columnComplement[i],K_i)
else:
ColumnWeight[Cont,1] = Pesos[columnComplement[i]]*100
Cont = Cont + 1
ColumnWeight = ColumnWeight[ColumnWeight[:,1].argsort()]
# We need to get the S complement
#tam = min(len(ColumnWeight)-1,9)
Option1 = np.random.randint(0,5)
if Option1 == 0:
#print tam, Option1, len(ColumnWeight)
tam = min(len(ColumnWeight),10)
#print 'El largo', len(ColumnWeight)
#print tam
if tam == 1:
column = int(ColumnWeight[0,0])
else:
column = int(ColumnWeight[np.random.randint(1,tam),0])
else:
#print len(ColumnWeight), len(pRows), len(columnComplement)
column = int(ColumnWeight[0,0])
#print 'El calculo', column
return column
def heuristByCols(pesos,uRows,pCols,dictCols):
ColumnWeight = np.zeros((len(pCols),2))
#print('pcols',len(pCols))
for i in range(0,len(pCols)):
lRows = dictCols[pCols[i]]
ColumnWeight[i,0] = pCols[i]
ColumnWeight[i,1] = float(pesos[pCols[i]])/len(list(set(lRows).intersection(set(uRows))))
ColumnWeight = ColumnWeight[ColumnWeight[:,1].argsort()]
Option1 = np.random.randint(0,5)
if Option1 == 0:
#print tam, Option1, len(ColumnWeight)
tam = min(len(ColumnWeight),10)
#print 'El largo', len(ColumnWeight)
#print tam
if tam == 1:
#print('El valor del elemento',ColumnWeight[0,0])
column = int(ColumnWeight[0,0])
else:
#print('El valor del elemento',ColumnWeight[0,0])
column = int(ColumnWeight[np.random.randint(1,tam),0])
else:
#print len(ColumnWeight), len(pRows), len(columnComplement)
#print('El valor del elemento',ColumnWeight[0,0])
column = int(ColumnWeight[0,0])
#print 'El calculo', column
return column
|
{"hexsha": "18381a125939e0115bbe4161a548efe8a644addc", "size": 9815, "ext": "py", "lang": "Python", "max_stars_repo_path": "problems/repairs/heuristic.py", "max_stars_repo_name": "m-arnao-molina/SCA-QL-SARSA", "max_stars_repo_head_hexsha": "65f859fce96bb8c11509238c2f7a5d8dd2ad042a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "problems/repairs/heuristic.py", "max_issues_repo_name": "m-arnao-molina/SCA-QL-SARSA", "max_issues_repo_head_hexsha": "65f859fce96bb8c11509238c2f7a5d8dd2ad042a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "problems/repairs/heuristic.py", "max_forks_repo_name": "m-arnao-molina/SCA-QL-SARSA", "max_forks_repo_head_hexsha": "65f859fce96bb8c11509238c2f7a5d8dd2ad042a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2932098765, "max_line_length": 122, "alphanum_fraction": 0.6137544575, "include": true, "reason": "import numpy", "num_tokens": 2794}
|
Address(Dali Place) is a residential Culdesacs culdesac in the Wildhorse section of East Davis.
Intersecting Streets
Hepworth Drive
|
{"hexsha": "ffe4c79ad184183224f11917c2d9b95789a81344", "size": 139, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Dali_Place.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Dali_Place.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Dali_Place.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.8571428571, "max_line_length": 95, "alphanum_fraction": 0.7985611511, "num_tokens": 33}
|
(*
This file is generated by Cogent
*)
theory U8rec_correctness_uabsfunsdeclfix
imports "build_u8rec/U8rec_uabsfunsdeclfix_AllRefine"
Cogent.ValueSemantics
begin
lemmas type_simps = U8rec_uabsfunsdeclfix_TypeProof.main_type_def
U8rec_uabsfunsdeclfix_TypeProof.abbreviatedType1_def
lemmas \<Xi>_simps = \<Xi>_def assoc_lookup.simps type_simps
overloading \<xi>0 \<equiv> \<xi>_0
begin
definition \<xi>0 :: "(funtyp, abstyp, ptrtyp) uabsfuns"
where
"\<xi>0 f x y = False"
end
definition val_abs_typing where "val_abs_typing \<equiv> \<lambda> _ _ _ _. False"
definition upd_abs_typing where "upd_abs_typing \<equiv> \<lambda> _ _ _ _ _ _ _ _. False"
definition abs_upd_val where "abs_upd_val \<equiv> \<lambda> _ _ _ _ _ _ _ _ _. False"
definition \<xi>\<^sub>m where "\<xi>\<^sub>m \<equiv> \<lambda> _ _ _. False"
definition \<xi>\<^sub>p where "\<xi>\<^sub>p \<equiv> \<lambda> _ _ _. False"
definition abs_repr where "abs_repr \<equiv> \<lambda> _. ([],[])"
lemmas abs_defs = val_abs_typing_def upd_abs_typing_def abs_upd_val_def \<xi>\<^sub>m_def \<xi>0_def
locale Abstract begin
end
sublocale Abstract \<subseteq> update_sem upd_abs_typing abs_repr
by(simp add:abs_defs;unfold_locales;simp)
sublocale Abstract \<subseteq> update_sem_init upd_abs_typing abs_repr
by (unfold_locales)
sublocale Abstract \<subseteq> value_sem val_abs_typing
by(simp add:abs_defs;unfold_locales;simp)
sublocale Abstract \<subseteq> U8rec_uabsfunsdeclfix _ upd_abs_typing abs_repr
by (unfold_locales)
sublocale Abstract \<subseteq> correspondence abs_repr val_abs_typing upd_abs_typing abs_upd_val
by (simp add:abs_defs;unfold_locales;simp)
sublocale Abstract \<subseteq> correspondence_init abs_repr val_abs_typing upd_abs_typing abs_upd_val
by (unfold_locales)
sublocale Abstract \<subseteq> shallow val_abs_typing
by (unfold_locales)
sublocale Abstract \<subseteq> U8rec_uabsfunsdeclfix_cogent_shallow _ abs_repr val_abs_typing upd_abs_typing abs_upd_val
by (unfold_locales)
context Abstract begin
lemma abs_stuff :
"rename_mono_prog rename \<Xi> \<xi>\<^sub>m \<xi>\<^sub>p"
"proc_env_matches \<xi>\<^sub>m \<Xi>"
"proc_ctx_wellformed \<Xi>"
"proc_env_u_v_matches \<xi>_0 \<xi>\<^sub>m \<Xi>"
"proc_env_matches_ptrs \<xi>_0 \<Xi>"
apply(subst rename_mono_prog_def, simp add:abs_defs)
apply(subst proc_env_matches_def, simp add: abs_defs)
apply(clarsimp simp add:proc_ctx_wellformed_def \<Xi>_simps)
apply(subst proc_env_u_v_matches_def)
apply(clarsimp simp add: \<Xi>_simps abs_defs )
apply(subst proc_env_matches_ptrs_def)
apply(clarsimp simp add: \<Xi>_simps abs_defs )
done
end
context Abstract begin
lemma assumes
"is_valid st p"
and eqp': "(p', st') \<in> fst (main' p st)"
shows
"heap st' p' = heap st p"
proof -
let ?vc = "heap st p"
let ?a = "a_C ?vc"
let ?vs = "\<lparr> a\<^sub>f = ?a \<rparr>"
let ?pu = "UPtr (ptr_val p) (RRecord [RPrim (Num U8)])"
let ?vu = "URecord [(UPrim (LU8 ?a), RPrim (Num U8))]"
let ?vv = "VRecord [VPrim (LU8 ?a)]"
let ?\<sigma> = "\<lambda> q. if q = ptr_val p then Some ?vu else None"
let ?typ = "TRecord [(''a'', TPrim (Num U8), Present)] (Boxed Writable undefined)"
have vv_typ: " vval_typing \<Xi> ?vv ?typ"
by (intro vval_typing_vval_typing_record.intros v_t_prim';simp)+
have uv_rel : " upd_val_rel \<Xi> ?\<sigma> ?pu ?vv ?typ {} {ptr_val p}"
apply(intro u_v_p_rec_w'[where w="{}" , simplified])
apply simp_all
apply(intro u_v_r_cons1[where r="{}" and r'="{}" and w="{}" and w'="{}", simplified])
apply(intro u_v_prim';simp)
apply(intro u_v_r_empty)
apply simp
done
have uv_matches : "(u_v_matches \<Xi> ?\<sigma>
[?pu] [?vv]
[Some ?typ] {} {ptr_val p})"
apply(intro u_v_matches_some[where r="{}" and r'="{}" and w="{ ptr_val p }" and w'="{}", simplified])
apply(rule uv_rel)
apply(intro u_v_matches.u_v_matches_empty)
done
have various_stuff:
"matches \<Xi> [?vv] [Some ?typ]"
" (?\<sigma>, st) \<in> state_rel"
" val_rel_shallow_C rename ?vs p ?vv ?pu \<xi>\<^sub>p ?\<sigma> \<Xi>"
"matches_ptrs \<Xi> ?\<sigma> [?pu] [Some ?typ] {} { ptr_val p } "
apply(subst matches_def,simp add:type_simps vv_typ)
apply(simp add:state_rel_def heap_rel_def All_def heap_rel_ptr_def assms)
apply(rule ext)
apply (clarsimp simp add:TypeRelSimp ValRelSimp)
apply blast
apply(simp add:val_rel_shallow_C_def)
apply(simp add:valRel_T0 ValRelSimp)
apply(intro exI[where x = ?typ])
apply(intro exI)
apply(rule uv_rel)
apply(rule u_v_matches_to_matches_ptrs )
using uv_matches
by blast
(* correspondence lemma from AllRefine *)
have cor: "corres_shallow_C rename state_rel
(U8rec_uabsfunsdeclfix_Shallow_Desugar.main ?vs) U8rec_uabsfunsdeclfix_TypeProof.main (main' p) \<xi>_0 \<xi>\<^sub>m \<xi>\<^sub>p
[?pu] [?vv] \<Xi>
[Some (fst (snd U8rec_uabsfunsdeclfix_TypeProof.main_type))]
?\<sigma> st"
apply(rule corres_shallow_C_main[where
vv\<^sub>s = ?vs and uv\<^sub>C = p and \<xi>\<^sub>m = \<xi>\<^sub>m and \<xi>\<^sub>p =\<xi>\<^sub>p
and uv\<^sub>m = ?pu and vv\<^sub>m = ?vv and ?vv\<^sub>p = ?vv and s = st and \<sigma> = ?\<sigma>]
)
apply(simp_all add:various_stuff abs_stuff type_simps)
apply(unfold_locales; simp add:various_stuff abs_stuff)
done
(* the meat: this block is where I need help. *)
{
fix \<sigma>' pu' vv'
assume u_eval:"
\<xi>_0, [?pu] \<turnstile> (\<lambda>q. ?\<sigma> q,
U8rec_uabsfunsdeclfix_TypeProof.main) \<Down>! (\<sigma>', pu')"
and v_eval: " \<xi>\<^sub>m , [?vv] \<turnstile> U8rec_uabsfunsdeclfix_TypeProof.main \<Down> rename_val rename (monoval vv')"
and st'_rel: " (\<sigma>', st') \<in> state_rel"
and v_cor: " val_rel_shallow_C rename
(U8rec_uabsfunsdeclfix_Shallow_Desugar.main ?vs) p' vv' pu' \<xi>\<^sub>p \<sigma>' \<Xi>"
(* I am forced to deconstruct the evaluation relation in the update semantics *)
have eqp\<sigma>: "pu' = ?pu \<and> \<sigma>' = ?\<sigma>"
using u_eval
apply(unfold U8rec_uabsfunsdeclfix_TypeProof.main_def)
apply(ind_cases "_, _ \<turnstile> (_, expr.Let _ _) \<Down>! (_, _)")
apply(ind_cases "_, _ \<turnstile> (_, Var 0) \<Down>! (_, _)")+
by simp
(* unfolding v_cor *)
obtain \<tau> r w repr where
eq': "vv' = VRecord [VPrim (LU8 (a\<^sub>f (U8rec_uabsfunsdeclfix_Shallow_Desugar.main ?vs)))]"
"pu' = UPtr (ptr_val p') repr"
and uv_rel': "upd_val_rel \<Xi> \<sigma>' pu' (rename_val rename (monoval vv')) \<tau> r w"
using v_cor
apply(simp add:val_rel_shallow_C_def valRel_T0 ValRelSimp)
apply(elim exE conjE)
by simp
have eqp: "p' = p"
using eq'
by(simp add:eqp\<sigma>)
(* the update value evaluation preserves typing *)
obtain w'
where "uval_typing \<Xi> \<sigma>' pu' ?typ {} w'"
and \<sigma>'f: "frame ?\<sigma> {ptr_val p} \<sigma>' w'"
using u_eval
apply -
apply(drule preservation_mono[rotated 3])
apply(rule U8rec_uabsfunsdeclfix_AllRefine.main_typecorrect'[simplified type_simps]; simp)
using preservation_mono abs_stuff various_stuff
by force+
(* can I show this without evaluating main in the value/update semantics? *)
have "heap st' p' = heap st p"
(* Help! *)
using st'_rel
apply(simp add:state_rel_def heap_rel_def heap_rel_ptr_meta)
apply(drule all_heap_rel_ptrD[where \<sigma> = \<sigma>' and p = p'])
apply(simp add:eqp\<sigma> eqp)
apply(simp add:TypeRelSimp)
apply (simp add:ValRelSimp)
by (metis t1_C_idupdates(1))
}
note meat = this
show ?thesis
using cor
apply -
apply(subst (asm) corres_shallow_C_def)
apply (elim impE)
apply(rule various_stuff abs_stuff )+
apply(fastforce intro:uv_matches simp add:type_simps)
apply(erule conjE)
apply(thin_tac _)
apply (elim allE)
apply(erule impE)
apply(rule eqp')
apply(elim exE conjE)
using meat
apply blast
done
qed
end
end
|
{"author": "amblafont", "repo": "dargent-examples", "sha": "dbcfdd6573c088f65d4dade1b351b3bb2bc073e7", "save_path": "github-repos/isabelle/amblafont-dargent-examples", "path": "github-repos/isabelle/amblafont-dargent-examples/dargent-examples-dbcfdd6573c088f65d4dade1b351b3bb2bc073e7/correctness/U8rec_correctness_uabsfunsdeclfix.thy"}
|
module SmoothDeltaDirectSumModule
use NumberKindsModule
use LoggerModule
use ParticlesModule
use EdgesModule, only : MaxEdgeLength
use PolyMesh2dModule
use FieldModule
use MPISetupModule
use SphereGeomModule, only : ChordDistance, SphereDistance, SphereProjection
implicit none
include 'mpif.h'
private
public SphereDelta, New, Delete
public InterpolateScalar
type SphereDelta
real(kreal) :: eps
end type
interface New
module procedure newPrivate
end interface
interface Delete
module procedure deletePrivate
end interface
interface InterpolateScalar
module procedure interpolateScalarPrivate
end interface
!
!----------------
! Logging
!----------------
!
logical(klog), save :: logInit = .FALSE.
type(Logger) :: log
character(len=28), save :: logKey = 'SphereDelta'
integer(kint), parameter :: logLevel = DEBUG_LOGGING_LEVEL
contains
!----------------
!
! Public functions
!
!----------------
subroutine newPrivate(self, sphereMesh, smoothRadiusMultplier)
type(SphereDelta), intent(out) :: self
type(PolyMesh2D), intent(in) :: sphereMesh
real(kreal), intent(in) :: smoothRadiusMultplier
real(kreal) :: mult
if ( .NOT. logInit ) call InitLogger(log, procRank)
if ( present(smoothRadiusMultplier) ) then
mult = smoothRadiusMultplier
else
mult = 2.0_kreal
endif
self%eps = mult * MaxEdgeLength( sphereMesh%edges, sphereMesh%particles)
end subroutine
subroutine deletePrivate(self)
type(SphereDelta), intent(inout) :: self
end subroutine
function interpolateScalarPrivate(self, sphereMesh, scalarField, interpLoc )
real(kreal) :: interpolateScalarPrivate
type(SphereDelta), intent(in) :: self
type(PolyMesh2D), intent(in) :: sphereMesh
type(Field), intent(in) :: scalarField
real(kreal), dimension(3), intent(in) :: interpLoc
!
integer(kint) :: j
real(kreal) :: dotProd
interpolateScalarPrivate = 0.0_kreal
do j = 1, sphereMesh%particles%N
if ( sphereMEsh%particles%isActive(j) then
dotProd = sphereMesh%particles%x(j) * interpLoc(1) + sphereMesh%particles%y(j) * interpLoc(2) + &
sphereMesh%particles%z(j) * interpLoc(3)
interpolateScalarPrivate = interpolateScalarPrivate + scalarField%scalar(j) * sphereMesh%particles%area(j) * &
(-(1.0_kreal - dotProd)*(1.0_kreal - dotProd) + 2.0_kreal * self%eps*self%eps* dotProd) / &
( 4.0_kreal * PI * ( 1.0_kreal - dotProd + self%eps*self%eps) * (1.0_kreal - dotProd + self%eps*self%eps))
endif
enddo
end function
subroutine interpolateScalarParallel(self, sphereMesh, scalarField, scalarInterp, xPts, yPts, zPts, interpMPI)
type(SphereDelta), intent(in) :: self
type(PolyMesh2d), intent(in) :: sphereMesh
type(Field), intent(in) :: scalarField
real(kreal), dimension(:), intent(out) :: scalarInterp
real(kreal), dimension(:), intent(in) :: xPts, yPts, zPts
type(MPISetup), intent(in) :: interpMPI
!
integer(kint) :: i, j, mpiErrCode
real(kreal) :: dotProd, avg
avg = ScalarAverage(scalarField, sphereMesh%particles)
do i = interpMPI%indexStart(procRank), interpMPI%indexEnd(procRank)
scalarInterp(i) = 0.0_kreal
do j = 1, sphereMesh%particles%N
if ( sphereMesh%particles%isActive(j) ) then
dotProd = xPts(i) * sphereMesh%particles%x(j) + yPts(i) * sphereMesh%particles%y(j) + &
zPts(i) * sphereMesh%particles%z(j)
scalarInterp(i) = scalarInterp(i) + scalarField%scalar(j) * sphereMesh%particles%area(j) * &
(-(1.0_kreal - dotProd)*(1.0_kreal - dotProd) + 2.0_kreal * self%eps * self%eps * dotProd ) / &
( 4.0_kreal * PI * (1.0_kreal - dotProd + self%eps*self%eps)*(1.0_kreal - dotProd + self%eps*self%eps))
endif
enddo
scalarInterp(i) = scalarInterp(i) - avg
enddo
do i = 0, numProcs - 1
call MPI_BCAST(scalarInterp(interpMPI%indexStart(i):interpMPI%indexEnd(i)), interpMPI%messageLength(i), &
MPI_DOUBLE_PRECISION, i, MPI_COMM_WORLD, mpiErrCode)
enddo
end subroutine
subroutine interpolateScalarToLatLonParallel(self, sphereMesh, scalarField, scalarInterp, lons, lats, interpMPI )
type(SphereDelta), intent(in) :: self
type(PolyMesh2d), intent(in) :: sphereMesh
type(Field), intent(in) :: scalarField
real(kreal), dimension(:,:), intent(out) :: scalarInterp
real(kreal), dimension(:), intent(in) :: lons
real(kreal), dimension(:), intent(in) :: lats
type(MPISetup), intent(in) :: interpMPI
!
integer(kint) :: i, j, k, mpiErrCode
real(kreal) :: dotProd, avg
avg = ScalarAverage(scalarField, sphereMesh%particles)
do j = interpMPI%indexStart(procRank), interpMPI%indexEnd(procRank)
do i = 1, size(lats)
scalarInterp(i,j) = 0.0_kreal
do k = 1, sphereMesh%particles%N
if ( sphereMesh%particles%isActive(k)) then
dotProd = cos(lons(j))*cos(lats(i)) * sphereMesh%particles%x(k) + &
sin(lons(j))*cos(lats(i)) * sphereMesh%particles%y(k) + &
sin(lats(i)) * sphereMesh%particles%z(k)
scalarInterp(i,j) = scalarInterp(i,j) + scalarField%scalar(k) * sphereMesh%particles%area(k) * &
(-(1.0_kreal - dotProd)*(1.0_kreal - dotProd) + 2.0_kreal * self%eps * self%eps * dotProd ) / &
( 4.0_kreal * PI * (1.0_kreal - dotProd + self%eps*self%eps)*(1.0_kreal - dotProd + self%eps*self%eps))
endif
enddo
scalarInterp(i,j) = scalarInterp(i,j) - avg
enddo
enddo
do i = 0, numProcs - 1
call MPI_BCAST( scalarInterp(:,interpMPI%indexStart(i):interpMPI%indexEnd(i)), size(lats)*interpMPI%messageLength(i), &
MPI_DOUBLE_PRECISION, i, MPI_COMM_WORLD, mpiErrCode)
enddo
end subroutine
!----------------
!
! Private functions
!
!----------------
pure function deltaKernel( x, y, z, xt, yt, zt, eps )
real(kreal) :: deltaKernel
real(kreal), intent(in) :: x, y, z
real(kreal), intent(in) :: xt, yt, zt
real(kreal), intent(in) :: eps
!
real(kreal) :: dotProd
dotProd = x * xt + y * yt + z * zt
deltaKernel = ( -(1.0_kreal - dotProd) * (1.0_kreal - dotProd) + 2.0_kreal * eps * dotProd ) / &
( 4.0_kreal * PI * ( 1.0_kreal - dotProd + eps*eps) * (1.0_kreal - dotProd + eps*eps))
end function
subroutine InitLogger(aLog,rank)
! Initialize a logger for this module and processor
type(Logger), intent(out) :: aLog
integer(kint), intent(in) :: rank
write(logKey,'(A,A,I0.2,A)') trim(logKey),'_',rank,' : '
if ( rank == 0 ) then
call New(aLog,logLevel)
else
call New(aLog,ERROR_LOGGING_LEVEL)
endif
logInit = .TRUE.
end subroutine
end module
|
{"hexsha": "07c0eb3ad1f5509f47af56a3a7f4202162a0e208", "size": 6316, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "SmoothDeltaDirectSum.f90", "max_stars_repo_name": "pbosler/LPPM", "max_stars_repo_head_hexsha": "33b9572120ceca28ee56630a1af54f3befbda672", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SmoothDeltaDirectSum.f90", "max_issues_repo_name": "pbosler/LPPM", "max_issues_repo_head_hexsha": "33b9572120ceca28ee56630a1af54f3befbda672", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2015-01-21T21:50:20.000Z", "max_issues_repo_issues_event_max_datetime": "2015-01-21T21:54:31.000Z", "max_forks_repo_path": "development/SmoothDeltaDirectSum.f90", "max_forks_repo_name": "pbosler/LPPM", "max_forks_repo_head_hexsha": "33b9572120ceca28ee56630a1af54f3befbda672", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4228855721, "max_line_length": 121, "alphanum_fraction": 0.6949018366, "num_tokens": 1969}
|
"""Module to read data analog data from NI-DAQ device
A simple high-level wrapper for NI-DAQmx functions
Requires: PyDAQmx, Numpy
See COPYING file distributed along with the pyForceDAQ copyright and license terms.
"""
__author__ = 'Oliver Lindemann'
import ctypes as ct
import numpy as np
import PyDAQmx
from ._config import NUM_SAMPS_PER_CHAN, TIMEOUT, NI_DAQ_BUFFER_SIZE\
class DAQReadAnalog(PyDAQmx.Task):
NUM_SAMPS_PER_CHAN = NUM_SAMPS_PER_CHAN
TIMEOUT = TIMEOUT
NI_DAQ_BUFFER_SIZE = NI_DAQ_BUFFER_SIZE
DAQ_TYPE = "PyDAQmx"
def __init__(self, configuration, read_array_size_in_samples):
""" DOC
read_array_size_in_samples for ReadAnalogF64 call
"""
# print('init')
PyDAQmx.Task.__init__(self)
# CreateAIVoltageChan
self.CreateAIVoltageChan(configuration.physicalChannel,
# physicalChannel
"", # nameToAssignToChannel,
PyDAQmx.DAQmx_Val_Diff, # terminalConfig
configuration.minVal, configuration.maxVal,
# min max Val
PyDAQmx.DAQmx_Val_Volts, # units
None # customScaleName
)
# CfgSampClkTiming
self.CfgSampClkTiming("", # source
configuration.rate, # rate
PyDAQmx.DAQmx_Val_Rising, # activeEdge
PyDAQmx.DAQmx_Val_ContSamps, # sampleMode
ct.c_uint64(DAQReadAnalog.NI_DAQ_BUFFER_SIZE)
# sampsPerChanToAcquire, i.e. buffer size
)
self._task_is_started = False
self.read_array_size_in_samples = ct.c_uint32(
read_array_size_in_samples)
# print(self.read_array_size_in_samples )
@property
def is_acquiring_data(self):
return self._task_is_started
def start_data_acquisition(self):
"""Start data acquisition of the NI device
call always before polling
"""
if not self._task_is_started:
self.StartTask()
self._task_is_started = True
def stop_data_acquisition(self):
""" Stop data acquisition of the NI device
"""
if self._task_is_started:
self.StopTask()
self._task_is_started = False
def read_analog(self):
"""Polling data
Reading data from NI device
Parameter
---------
array_size_in_samps : int
the array size in number of samples
Returns
-------
read_buffer : numpy array
the read data
read_samples : int
the number of read samples
"""
# fill in data
read_samples = ct.c_int32()
read_buffer = np.zeros((self.read_array_size_in_samples.value,),
dtype=np.float64)
error = self.ReadAnalogF64(DAQReadAnalog.NUM_SAMPS_PER_CHAN,
DAQReadAnalog.TIMEOUT,
PyDAQmx.DAQmx_Val_GroupByScanNumber,
# fillMode
read_buffer,
self.read_array_size_in_samples,
ct.byref(read_samples),
None)
return read_buffer, read_samples.value
|
{"hexsha": "692c6c0836c43ccbe785e9023681afb7be1afd97", "size": 3580, "ext": "py", "lang": "Python", "max_stars_repo_path": "forceDAQ/daq/_daq_read_Analog_pydaqmx.py", "max_stars_repo_name": "raunaqbhirangi/pyForceDAQ", "max_stars_repo_head_hexsha": "a2a41cd7a4a4f0afd178bc5555ba4e0540902d30", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2016-06-27T12:07:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T08:59:44.000Z", "max_issues_repo_path": "forceDAQ/daq/_daq_read_Analog_pydaqmx.py", "max_issues_repo_name": "raunaqbhirangi/pyForceDAQ", "max_issues_repo_head_hexsha": "a2a41cd7a4a4f0afd178bc5555ba4e0540902d30", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-01-15T20:29:53.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-31T17:35:34.000Z", "max_forks_repo_path": "forceDAQ/daq/_daq_read_Analog_pydaqmx.py", "max_forks_repo_name": "raunaqbhirangi/pyForceDAQ", "max_forks_repo_head_hexsha": "a2a41cd7a4a4f0afd178bc5555ba4e0540902d30", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-01-14T18:31:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T05:56:40.000Z", "avg_line_length": 31.6814159292, "max_line_length": 83, "alphanum_fraction": 0.5466480447, "include": true, "reason": "import numpy", "num_tokens": 734}
|
import numpy as np
import matplotlib.pyplot as plt
import torch, torchvision
from torch.utils.tensorboard import SummaryWriter
import os, argparse
from tqdm import tqdm
from degmo.gan.run_utils import config_model_test, generation, manifold, interpolation, helix_interpolation
from degmo.utils import setup_seed, select_gpus, nats2bits, config_dataset, load_config
from degmo import LOGDIR, MODELDIR, VERSION, CONFIG_PATH
CONFIG = os.path.join(CONFIG_PATH, 'gan.json')
LOGDIR = os.path.join(LOGDIR, 'gan')
MODELDIR = os.path.join(MODELDIR, 'gan')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, help='file name of the stored model')
parser.add_argument('--mode', type=str, default='generation',
help='test mode, select from generation, linear_manifold, circular_manifold, interpolation')
parser.add_argument('--truncation', type=float, default=1.0,
help='truncation trick for generation')
parser.add_argument('--gpu', type=str, default='0')
args = parser.parse_args()
# config gpu
select_gpus(args.gpu)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
checkpoint = torch.load(os.path.join(MODELDIR, args.model + '.pt'), map_location='cpu')
config = checkpoint['config']
print('load model: {}'.format(config['model']))
print('seed: {}'.format(checkpoint['seed']))
print('model parameters: {}'.format(checkpoint['model_parameters']))
# config model
generator, latent_dim = config_model_test(checkpoint)
generator = generator.to(device)
if args.mode == 'generation':
generation(generator, latent_dim, args.truncation)
elif args.mode == 'linear_manifold':
manifold(generator, latent_dim, mode='linear')
elif args.mode == 'circular_manifold':
manifold(generator, latent_dim, mode='circular')
elif args.mode == 'interpolation':
writer = SummaryWriter(os.path.join(LOGDIR, 'GIF', args.model))
interpolation(generator, latent_dim, writer, args.truncation)
elif args.mode == 'helix_interpolation':
writer = SummaryWriter(os.path.join(LOGDIR, 'GIF', args.model))
helix_interpolation(generator, latent_dim, writer, args.truncation)
else:
raise ValueError('Mode {} is not supported!'.format(args.mode))
# # open log
# writer = SummaryWriter(LOGDIR + '{}'.format(args.model))
|
{"hexsha": "c900c6e8f0ecca4bae7a44822b8d4fe299419990", "size": 2476, "ext": "py", "lang": "Python", "max_stars_repo_path": "degmo/test_gan.py", "max_stars_repo_name": "IcarusWizard/Deep-Generative-Models", "max_stars_repo_head_hexsha": "4117c11ad944bdeff106a80adbb3642a076af64e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-21T15:50:59.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-17T02:44:19.000Z", "max_issues_repo_path": "degmo/test_gan.py", "max_issues_repo_name": "IcarusWizard/Deep-Generative-Models", "max_issues_repo_head_hexsha": "4117c11ad944bdeff106a80adbb3642a076af64e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "degmo/test_gan.py", "max_forks_repo_name": "IcarusWizard/Deep-Generative-Models", "max_forks_repo_head_hexsha": "4117c11ad944bdeff106a80adbb3642a076af64e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-02T05:49:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-02T05:49:29.000Z", "avg_line_length": 41.9661016949, "max_line_length": 116, "alphanum_fraction": 0.6942649435, "include": true, "reason": "import numpy", "num_tokens": 568}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayantjain1992@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Utilities for creating 2-D visualizations of Poincare models and Poincare distance heatmaps.
"""
import logging
from collections import Counter
import numpy as np
import plotly.graph_objs as go
from gensim.models.poincare import PoincareKeyedVectors
logger = logging.getLogger(__name__)
def poincare_2d_visualization(model, tree, figure_title, num_nodes=50, show_node_labels=()):
"""Create a 2-d plot of the nodes and edges of a 2-d poincare embedding.
Parameters
----------
model : :class:`~gensim.models.poincare.PoincareModel`
The model to visualize, model size must be 2.
tree : set
Set of tuples containing the direct edges present in the original dataset.
figure_title : str
Title of the plotted figure.
num_nodes : int or None
Number of nodes for which edges are to be plotted.
If `None`, all edges are plotted.
Helpful to limit this in case the data is too large to avoid a messy plot.
show_node_labels : iterable
Iterable of nodes for which to show labels by default.
Returns
-------
:class:`plotly.graph_objs.Figure`
Plotly figure that contains plot.
"""
vectors = model.kv.syn0
if vectors.shape[1] != 2:
raise ValueError('Can only plot 2-D vectors')
node_labels = model.kv.index2word
nodes_x = list(vectors[:, 0])
nodes_y = list(vectors[:, 1])
nodes = go.Scatter(
x=nodes_x, y=nodes_y,
mode='markers',
marker=dict(color='rgb(30, 100, 200)'),
text=node_labels,
textposition='bottom center'
)
nodes_x, nodes_y, node_labels = [], [], []
for node in show_node_labels:
vector = model.kv[node]
nodes_x.append(vector[0])
nodes_y.append(vector[1])
node_labels.append(node)
nodes_with_labels = go.Scatter(
x=nodes_x, y=nodes_y,
mode='markers+text',
marker=dict(color='rgb(200, 100, 200)'),
text=node_labels,
textposition='bottom center'
)
node_out_degrees = Counter(hypernym_pair[1] for hypernym_pair in tree)
if num_nodes is None:
chosen_nodes = list(node_out_degrees.keys())
else:
chosen_nodes = list(sorted(node_out_degrees.keys(), key=lambda k: -node_out_degrees[k]))[:num_nodes]
edges_x = []
edges_y = []
for u, v in tree:
if not(u in chosen_nodes or v in chosen_nodes):
continue
vector_u = model.kv[u]
vector_v = model.kv[v]
edges_x += [vector_u[0], vector_v[0], None]
edges_y += [vector_u[1], vector_v[1], None]
edges = go.Scatter(
x=edges_x, y=edges_y, mode="lines", hoverinfo='none',
line=dict(color='rgb(50,50,50)', width=1))
layout = go.Layout(
title=figure_title, showlegend=False, hovermode='closest', width=800, height=800)
return go.Figure(data=[edges, nodes, nodes_with_labels], layout=layout)
def poincare_distance_heatmap(origin_point, x_range=(-1.0, 1.0), y_range=(-1.0, 1.0), num_points=100):
"""Create a heatmap of Poincare distances from `origin_point` for each point (x, y),
where x and y lie in `x_range` and `y_range` respectively, with `num_points` points chosen uniformly in both ranges.
Parameters
----------
origin_point : tuple (int, int)
(x, y) from which distances are to be measured and plotted.
x_range : tuple (int, int)
Range for x-axis from which to choose `num_points` points.
y_range : tuple (int, int)
Range for y-axis from which to choose `num_points` points.
num_points : int
Number of points to choose from `x_range` and `y_range`.
Notes
-----
Points outside the unit circle are ignored, since the Poincare distance is defined
only for points inside the circle boundaries (exclusive of the boundary).
Returns
-------
:class:`plotly.graph_objs.Figure`
Plotly figure that contains plot
"""
epsilon = 1e-8 # Can't choose (-1.0, -1.0) or (1.0, 1.0), distance undefined
x_range, y_range = list(x_range), list(y_range)
if x_range[0] == -1.0 and y_range[0] == -1.0:
x_range[0] += epsilon
y_range[0] += epsilon
if x_range[0] == 1.0 and y_range[0] == 1.0:
x_range[0] -= epsilon
y_range[0] -= epsilon
x_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
y_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
x, y = np.meshgrid(x_axis_values, y_axis_values)
all_points = np.dstack((x, y)).swapaxes(1, 2).swapaxes(0, 1).reshape(2, num_points ** 2).T
norms = np.linalg.norm(all_points, axis=1)
all_points = all_points[norms < 1]
origin_point = np.array(origin_point)
all_distances = PoincareKeyedVectors.poincare_dists(origin_point, all_points)
distances = go.Scatter(
x=all_points[:, 0],
y=all_points[:, 1],
mode='markers',
marker=dict(
size='9',
color=all_distances,
colorscale='Viridis',
showscale=True,
colorbar=go.ColorBar(
title='Poincare Distance'
),
),
text=[
'Distance from (%.2f, %.2f): %.2f' % (origin_point[0], origin_point[1], d)
for d in all_distances],
name='', # To avoid the default 'trace 0'
)
origin = go.Scatter(
x=[origin_point[0]],
y=[origin_point[1]],
name='Distance from (%.2f, %.2f)' % (origin_point[0], origin_point[1]),
mode='markers+text',
marker=dict(
size='10',
color='rgb(200, 50, 50)'
)
)
layout = go.Layout(
width=900,
height=800,
showlegend=False,
title='Poincare Distances from (%.2f, %.2f)' % (origin_point[0], origin_point[1]),
hovermode='closest',
)
return go.Figure(data=[distances, origin], layout=layout)
|
{"hexsha": "f20fd8ab2d1e45324895848ed8432cd1bfe110f1", "size": 6168, "ext": "py", "lang": "Python", "max_stars_repo_path": "bobo/Lib/site-packages/gensim/viz/poincare.py", "max_stars_repo_name": "nehiridil/MLDays_nlp", "max_stars_repo_head_hexsha": "20d29d01836c82361cb1b656f2e98d7435a93622", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-08T14:05:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-19T02:56:48.000Z", "max_issues_repo_path": "bobo/Lib/site-packages/gensim/viz/poincare.py", "max_issues_repo_name": "nehiridil/MLDays_nlp", "max_issues_repo_head_hexsha": "20d29d01836c82361cb1b656f2e98d7435a93622", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-02-13T15:21:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T06:12:44.000Z", "max_forks_repo_path": "bobo/Lib/site-packages/gensim/viz/poincare.py", "max_forks_repo_name": "nehiridil/MLDays_nlp", "max_forks_repo_head_hexsha": "20d29d01836c82361cb1b656f2e98d7435a93622", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-26T01:47:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-26T01:47:15.000Z", "avg_line_length": 32.9839572193, "max_line_length": 120, "alphanum_fraction": 0.623540856, "include": true, "reason": "import numpy", "num_tokens": 1657}
|
# Import Python libraries
import cv2 # OpenCV
import numpy as np
# Mask for green objects in image using LAB (t = 115)
def colorMaskLAB(img: np.ndarray) -> np.ndarray:
# Convert image to LAB
LAB = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) # BRG image to LAB color space
LAB = LAB[:, :, 1] # Extract 2. channel from LAB (A-channel)
# cv2.imshow('LAB channel 1', LAB)
# Threshold image
threshold = 115 # Initialize of intensity threshold
LAB[LAB < threshold] = 0 # Assign pixels less than threshold to 0
LAB[LAB > threshold] = 255 # Assign pixels above threshold to 255
LAB = cv2.bitwise_not(LAB) # Convert img to binary
# Remove noise and holes in objects
LAB = cv2.erode(LAB, None, iterations=2) # Erode to remove noise
LAB = cv2.dilate(LAB, None, iterations=5) # Dilate to remove holes
# cv2.imshow('Color masking masking - LAB', LAB)
return LAB # Return binary img np.array
# Will only be called when running this file
# Used for testing functions in file
if __name__ == "__main__":
img = cv2.imread("TestImages/Gloves1.png") # Get image
maskedImg = colorMaskLAB(img) # Mask out green objects
cv2.imshow('Color masking masking - LAB', maskedImg) # Show masked img
cv2.waitKey(0) # Stop program to see image(s)
|
{"hexsha": "d83f82e3edd86cc767a98b0be18b89885aa7c685", "size": 1302, "ext": "py", "lang": "Python", "max_stars_repo_path": "softwareProgram/ColorMask.py", "max_stars_repo_name": "Sebastian-Whitehead/Medialogi-P3-02", "max_stars_repo_head_hexsha": "8fb144c17a10417aa2f5a01fcbc71b4d562d4d27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "softwareProgram/ColorMask.py", "max_issues_repo_name": "Sebastian-Whitehead/Medialogi-P3-02", "max_issues_repo_head_hexsha": "8fb144c17a10417aa2f5a01fcbc71b4d562d4d27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "softwareProgram/ColorMask.py", "max_forks_repo_name": "Sebastian-Whitehead/Medialogi-P3-02", "max_forks_repo_head_hexsha": "8fb144c17a10417aa2f5a01fcbc71b4d562d4d27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1666666667, "max_line_length": 78, "alphanum_fraction": 0.6827956989, "include": true, "reason": "import numpy", "num_tokens": 370}
|
%Terminals
DollarSign ::= '$'
Percent ::= '%'
_
a b c d e f g h i j k l m n o p q r s t u v w x y z
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
%End
%Headers
/.
static readonly int[] tokenKind = new int[128];
static bool __b_init = init_block();
static bool init_block()
{
tokenKind['$'] = $sym_type.$prefix$DollarSign$suffix$;
tokenKind['%'] = $sym_type.$prefix$Percent$suffix$;
tokenKind['_'] = $sym_type.$prefix$_$suffix$;
tokenKind['a'] = $sym_type.$prefix$a$suffix$;
tokenKind['b'] = $sym_type.$prefix$b$suffix$;
tokenKind['c'] = $sym_type.$prefix$c$suffix$;
tokenKind['d'] = $sym_type.$prefix$d$suffix$;
tokenKind['e'] = $sym_type.$prefix$e$suffix$;
tokenKind['f'] = $sym_type.$prefix$f$suffix$;
tokenKind['g'] = $sym_type.$prefix$g$suffix$;
tokenKind['h'] = $sym_type.$prefix$h$suffix$;
tokenKind['i'] = $sym_type.$prefix$i$suffix$;
tokenKind['j'] = $sym_type.$prefix$j$suffix$;
tokenKind['k'] = $sym_type.$prefix$k$suffix$;
tokenKind['l'] = $sym_type.$prefix$l$suffix$;
tokenKind['m'] = $sym_type.$prefix$m$suffix$;
tokenKind['n'] = $sym_type.$prefix$n$suffix$;
tokenKind['o'] = $sym_type.$prefix$o$suffix$;
tokenKind['p'] = $sym_type.$prefix$p$suffix$;
tokenKind['q'] = $sym_type.$prefix$q$suffix$;
tokenKind['r'] = $sym_type.$prefix$r$suffix$;
tokenKind['s'] = $sym_type.$prefix$s$suffix$;
tokenKind['t'] = $sym_type.$prefix$t$suffix$;
tokenKind['u'] = $sym_type.$prefix$u$suffix$;
tokenKind['v'] = $sym_type.$prefix$v$suffix$;
tokenKind['w'] = $sym_type.$prefix$w$suffix$;
tokenKind['x'] = $sym_type.$prefix$x$suffix$;
tokenKind['y'] = $sym_type.$prefix$y$suffix$;
tokenKind['z'] = $sym_type.$prefix$z$suffix$;
tokenKind['A'] = $sym_type.$prefix$A$suffix$;
tokenKind['B'] = $sym_type.$prefix$B$suffix$;
tokenKind['C'] = $sym_type.$prefix$C$suffix$;
tokenKind['D'] = $sym_type.$prefix$D$suffix$;
tokenKind['E'] = $sym_type.$prefix$E$suffix$;
tokenKind['F'] = $sym_type.$prefix$F$suffix$;
tokenKind['G'] = $sym_type.$prefix$G$suffix$;
tokenKind['H'] = $sym_type.$prefix$H$suffix$;
tokenKind['I'] = $sym_type.$prefix$I$suffix$;
tokenKind['J'] = $sym_type.$prefix$J$suffix$;
tokenKind['K'] = $sym_type.$prefix$K$suffix$;
tokenKind['L'] = $sym_type.$prefix$L$suffix$;
tokenKind['M'] = $sym_type.$prefix$M$suffix$;
tokenKind['N'] = $sym_type.$prefix$N$suffix$;
tokenKind['O'] = $sym_type.$prefix$O$suffix$;
tokenKind['P'] = $sym_type.$prefix$P$suffix$;
tokenKind['Q'] = $sym_type.$prefix$Q$suffix$;
tokenKind['R'] = $sym_type.$prefix$R$suffix$;
tokenKind['S'] = $sym_type.$prefix$S$suffix$;
tokenKind['T'] = $sym_type.$prefix$T$suffix$;
tokenKind['U'] = $sym_type.$prefix$U$suffix$;
tokenKind['V'] = $sym_type.$prefix$V$suffix$;
tokenKind['W'] = $sym_type.$prefix$W$suffix$;
tokenKind['X'] = $sym_type.$prefix$X$suffix$;
tokenKind['Y'] = $sym_type.$prefix$Y$suffix$;
tokenKind['Z'] = $sym_type.$prefix$Z$suffix$;
return true;
}
public static int getKind(char c)
{
return (((c & 0xFFFFFF80) == 0) /* 0 <= c < 128? */ ? tokenKind[c] : 0);
}
./
%End
|
{"hexsha": "77a23e259f58fa23803a2f0d4f700cd3dbf1cad3", "size": 3748, "ext": "gi", "lang": "GAP", "max_stars_repo_path": "lpg-generator-templates-2.1.00/include/csharp/KWLexerMapF.gi", "max_stars_repo_name": "kuafuwang/LPG2", "max_stars_repo_head_hexsha": "5cda43c109633d951facbeac361e060dd6d59dcd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-05T12:16:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-05T13:09:19.000Z", "max_issues_repo_path": "lpg-generator-templates-2.1.00/include/csharp/KWLexerMapF.gi", "max_issues_repo_name": "kuafuwang/LPG2", "max_issues_repo_head_hexsha": "5cda43c109633d951facbeac361e060dd6d59dcd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lpg-generator-templates-2.1.00/include/csharp/KWLexerMapF.gi", "max_forks_repo_name": "kuafuwang/LPG2", "max_forks_repo_head_hexsha": "5cda43c109633d951facbeac361e060dd6d59dcd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.7073170732, "max_line_length": 84, "alphanum_fraction": 0.5240128068, "num_tokens": 1141}
|
# Many scipy.stats functions support `axis` and `nan_policy` parameters.
# When the two are combined, it can be tricky to get all the behavior just
# right. This file contains utility functions useful for scipy.stats functions
# that support `axis` and `nan_policy`, including a decorator that
# automatically adds `axis` and `nan_policy` arguments to a function.
import numpy as np
import scipy.stats
import scipy.stats._stats_py
from functools import wraps
from scipy._lib._docscrape import FunctionDoc, Parameter
import inspect
def _broadcast_arrays(arrays, axis=None):
"""
Broadcast shapes of arrays, ignoring incompatibility of specified axes
"""
new_shapes = _broadcast_array_shapes(arrays, axis=axis)
if axis is None:
new_shapes = [new_shapes]*len(arrays)
return [np.broadcast_to(array, new_shape)
for array, new_shape in zip(arrays, new_shapes)]
def _broadcast_array_shapes(arrays, axis=None):
"""
Broadcast shapes of arrays, ignoring incompatibility of specified axes
"""
shapes = [np.asarray(arr).shape for arr in arrays]
return _broadcast_shapes(shapes, axis)
def _broadcast_shapes(shapes, axis=None):
"""
Broadcast shapes, ignoring incompatibility of specified axes
"""
if not shapes:
return shapes
# input validation
if axis is not None:
axis = np.atleast_1d(axis)
axis_int = axis.astype(int)
if not np.array_equal(axis_int, axis):
raise ValueError('`axis` must be an integer, a '
'tuple of integers, or `None`.')
axis = axis_int
# First, ensure all shapes have same number of dimensions by prepending 1s.
n_dims = max([len(shape) for shape in shapes])
new_shapes = np.ones((len(shapes), n_dims), dtype=int)
for row, shape in zip(new_shapes, shapes):
row[len(row)-len(shape):] = shape # can't use negative indices (-0:)
# Remove the shape elements of the axes to be ignored, but remember them.
if axis is not None:
axis[axis < 0] = n_dims + axis[axis < 0]
axis = np.sort(axis)
if axis[-1] >= n_dims or axis[0] < 0:
message = (f"`axis` is out of bounds "
f"for array of dimension {n_dims}")
raise ValueError(message)
if len(np.unique(axis)) != len(axis):
raise ValueError("`axis` must contain only distinct elements")
removed_shapes = new_shapes[:, axis]
new_shapes = np.delete(new_shapes, axis, axis=1)
# If arrays are broadcastable, shape elements that are 1 may be replaced
# with a corresponding non-1 shape element. Assuming arrays are
# broadcastable, that final shape element can be found with:
new_shape = np.max(new_shapes, axis=0)
# except in case of an empty array:
new_shape *= new_shapes.all(axis=0)
# Among all arrays, there can only be one unique non-1 shape element.
# Therefore, if any non-1 shape element does not match what we found
# above, the arrays must not be broadcastable after all.
if np.any(~((new_shapes == 1) | (new_shapes == new_shape))):
raise ValueError("Array shapes are incompatible for broadcasting.")
if axis is not None:
# Add back the shape elements that were ignored
new_axis = axis - np.arange(len(axis))
new_shapes = [tuple(np.insert(new_shape, new_axis, removed_shape))
for removed_shape in removed_shapes]
return new_shapes
else:
return tuple(new_shape)
def _broadcast_array_shapes_remove_axis(arrays, axis=None):
"""
Broadcast shapes of arrays, dropping specified axes
Given a sequence of arrays `arrays` and an integer or tuple `axis`, find
the shape of the broadcast result after consuming/dropping `axis`.
In other words, return output shape of a typical hypothesis test on
`arrays` vectorized along `axis`.
Examples
--------
>>> a = np.zeros((5, 2, 1))
>>> b = np.zeros((9, 3))
>>> _broadcast_array_shapes((a, b), 1)
(5, 3)
"""
# Note that here, `axis=None` means do not consume/drop any axes - _not_
# ravel arrays before broadcasting.
shapes = [arr.shape for arr in arrays]
return _broadcast_shapes_remove_axis(shapes, axis)
def _broadcast_shapes_remove_axis(shapes, axis=None):
"""
Broadcast shapes, dropping specified axes
Same as _broadcast_array_shapes, but given a sequence
of array shapes `shapes` instead of the arrays themselves.
"""
shapes = _broadcast_shapes(shapes, axis)
shape = shapes[0]
if axis is not None:
shape = np.delete(shape, axis)
return tuple(shape)
def _broadcast_concatenate(arrays, axis):
"""Concatenate arrays along an axis with broadcasting."""
arrays = _broadcast_arrays(arrays, axis)
res = np.concatenate(arrays, axis=axis)
return res
# TODO: add support for `axis` tuples
def _remove_nans(samples, paired):
"Remove nans from paired or unpaired 1D samples"
# potential optimization: don't copy arrays that don't contain nans
if not paired:
return [sample[~np.isnan(sample)] for sample in samples]
# for paired samples, we need to remove the whole pair when any part
# has a nan
nans = np.isnan(samples[0])
for sample in samples[1:]:
nans = nans | np.isnan(sample)
not_nans = ~nans
return [sample[not_nans] for sample in samples]
def _remove_sentinel(samples, paired, sentinel):
"Remove sentinel values from paired or unpaired 1D samples"
# could consolidate with `_remove_nans`, but it's not quite as simple as
# passing `sentinel=np.nan` because `(np.nan == np.nan) is False`
# potential optimization: don't copy arrays that don't contain sentinel
if not paired:
return [sample[sample != sentinel] for sample in samples]
# for paired samples, we need to remove the whole pair when any part
# has a nan
sentinels = (samples[0] == sentinel)
for sample in samples[1:]:
sentinels = sentinels | (sample == sentinel)
not_sentinels = ~sentinels
return [sample[not_sentinels] for sample in samples]
def _masked_arrays_2_sentinel_arrays(samples):
# masked arrays in `samples` are converted to regular arrays, and values
# corresponding with masked elements are replaced with a sentinel value
# return without modifying arrays if none have a mask
has_mask = False
for sample in samples:
mask = getattr(sample, 'mask', False)
has_mask = has_mask or np.any(mask)
if not has_mask:
return samples, None # None means there is no sentinel value
# Choose a sentinel value. We can't use `np.nan`, because sentinel (masked)
# values are always omitted, but there are different nan policies.
for i in range(len(samples)):
# Things get more complicated if the arrays are of different types.
# We could have different sentinel values for each array, but
# the purpose of this code is convenience, not efficiency.
samples[i] = samples[i].astype(np.float64, copy=False)
max_possible, eps = np.finfo(np.float64).max, np.finfo(np.float64).eps
sentinel = max_possible
while sentinel > 0:
for sample in samples:
if np.any(sample == sentinel):
sentinel *= (1 - 2*eps) # choose a new sentinel value
break
else: # when sentinel value is OK, break the while loop
break
# replace masked elements with sentinel value
out_samples = []
for sample in samples:
mask = getattr(sample, 'mask', False)
if np.any(mask):
mask = np.broadcast_to(mask, sample.shape)
sample = sample.data.copy() # don't modify original array
sample[mask] = sentinel
out_samples.append(sample)
return out_samples, sentinel
def _check_empty_inputs(samples, axis):
"""
Check for empty sample; return appropriate output for a vectorized hypotest
"""
# if none of the samples are empty, we need to perform the test
if not any((sample.size == 0 for sample in samples)):
return None
# otherwise, the statistic and p-value will be either empty arrays or
# arrays with NaNs. Produce the appropriate array and return it.
output_shape = _broadcast_array_shapes_remove_axis(samples, axis)
output = np.ones(output_shape) * np.nan
return output
# Standard docstring / signature entries for `axis` and `nan_policy`
_name = 'axis'
_type = "int or None, default: 0"
_desc = (
"""If an int, the axis of the input along which to compute the statistic.
The statistic of each axis-slice (e.g. row) of the input will appear in a
corresponding element of the output.
If ``None``, the input will be raveled before computing the statistic."""
.split('\n'))
_axis_parameter_doc = Parameter(_name, _type, _desc)
_axis_parameter = inspect.Parameter(_name,
inspect.Parameter.KEYWORD_ONLY,
default=0)
_name = 'nan_policy'
_type = "{'propagate', 'omit', 'raise'}"
_desc = (
"""Defines how to handle input NaNs.
- ``propagate``: if a NaN is present in the axis slice (e.g. row) along
which the statistic is computed, the corresponding entry of the output
will be NaN.
- ``omit``: NaNs will be omitted when performing the calculation.
If insufficient data remains in the axis slice along which the
statistic is computed, the corresponding entry of the output will be
NaN.
- ``raise``: if a NaN is present, a ``ValueError`` will be raised."""
.split('\n'))
_nan_policy_parameter_doc = Parameter(_name, _type, _desc)
_nan_policy_parameter = inspect.Parameter(_name,
inspect.Parameter.KEYWORD_ONLY,
default='propagate')
def _axis_nan_policy_factory(result_object, default_axis=0,
n_samples=1, paired=False,
result_unpacker=None, too_small=0):
"""Factory for a wrapper that adds axis/nan_policy params to a function.
Parameters
----------
result_object : callable
Callable that returns an object of the type returned by the function
being wrapped (e.g. the namedtuple or dataclass returned by a
statistical test) provided the separate components (e.g. statistic,
pvalue).
default_axis : int, default: 0
The default value of the axis argument. Standard is 0 except when
backwards compatibility demands otherwise (e.g. `None`).
n_samples : int or callable, default: 1
The number of data samples accepted by the function
(e.g. `mannwhitneyu`), a callable that accepts a dictionary of
parameters passed into the function and returns the number of data
samples (e.g. `wilcoxon`), or `None` to indicate an arbitrary number
of samples (e.g. `kruskal`).
paired : {False, True}
Whether the function being wrapped treats the samples as paired (i.e.
corresponding elements of each sample should be considered as different
components of the same sample.)
result_unpacker : callable, optional
Function that unpacks the results of the function being wrapped into
a tuple. This is essentially the inverse of `result_object`. Default
is `None`, which is appropriate for statistical tests that return a
statistic, pvalue tuple (rather than, e.g., a non-iterable datalass).
too_small : int, default: 0
The largest unnacceptably small sample for the function being wrapped.
For example, some functions require samples of size two or more or they
raise an error. This argument prevents the error from being raised when
input is not 1D and instead places a NaN in the corresponding element
of the result.
"""
if result_unpacker is None:
def result_unpacker(res):
return res[..., 0], res[..., 1]
def is_too_small(samples):
for sample in samples:
if len(sample) <= too_small:
return True
return False
def axis_nan_policy_decorator(hypotest_fun_in):
@wraps(hypotest_fun_in)
def axis_nan_policy_wrapper(*args, _no_deco=False, **kwds):
if _no_deco: # for testing, decorator does nothing
return hypotest_fun_in(*args, **kwds)
# We need to be flexible about whether position or keyword
# arguments are used, but we need to make sure users don't pass
# both for the same parameter. To complicate matters, some
# functions accept samples with *args, and some functions already
# accept `axis` and `nan_policy` as positional arguments.
# The strategy is to make sure that there is no duplication
# between `args` and `kwds`, combine the two into `kwds`, then
# the samples, `nan_policy`, and `axis` from `kwds`, as they are
# dealt with separately.
# Check for intersection between positional and keyword args
params = list(inspect.signature(hypotest_fun_in).parameters)
if n_samples is None:
# Give unique names to each positional sample argument
# Note that *args can't be provided as a keyword argument
params = [f"arg{i}" for i in range(len(args))] + params[1:]
d_args = dict(zip(params, args))
intersection = set(d_args) & set(kwds)
if intersection:
message = (f"{hypotest_fun_in.__name__}() got multiple values "
f"for argument '{list(intersection)[0]}'")
raise TypeError(message)
# Consolidate other positional and keyword args into `kwds`
kwds.update(d_args)
# rename avoids UnboundLocalError
if callable(n_samples):
n_samp = n_samples(kwds)
else:
n_samp = n_samples or len(args)
# Extract the things we need here
samples = [np.atleast_1d(kwds.pop(param))
for param in params[:n_samp]]
vectorized = True if 'axis' in params else False
axis = kwds.pop('axis', default_axis)
nan_policy = kwds.pop('nan_policy', 'propagate')
del args # avoid the possibility of passing both `args` and `kwds`
# convert masked arrays to regular arrays with sentinel values
samples, sentinel = _masked_arrays_2_sentinel_arrays(samples)
# standardize to always work along last axis
if axis is None:
samples = [sample.ravel() for sample in samples]
else:
samples = _broadcast_arrays(samples, axis=axis)
axis = np.atleast_1d(axis)
n_axes = len(axis)
# move all axes in `axis` to the end to be raveled
samples = [np.moveaxis(sample, axis, range(-len(axis), 0))
for sample in samples]
shapes = [sample.shape for sample in samples]
# New shape is unchanged for all axes _not_ in `axis`
# At the end, we append the product of the shapes of the axes
# in `axis`. Appending -1 doesn't work for zero-size arrays!
new_shapes = [shape[:-n_axes] + (np.prod(shape[-n_axes:]),)
for shape in shapes]
samples = [sample.reshape(new_shape)
for sample, new_shape in zip(samples, new_shapes)]
axis = -1 # work over the last axis
# if axis is not needed, just handle nan_policy and return
ndims = np.array([sample.ndim for sample in samples])
if np.all(ndims <= 1):
# Addresses nan_policy == "raise"
contains_nans = []
for sample in samples:
contains_nan, _ = (
scipy.stats._stats_py._contains_nan(sample, nan_policy))
contains_nans.append(contains_nan)
# Addresses nan_policy == "propagate"
# Consider adding option to let function propagate nans, but
# currently the hypothesis tests this is applied to do not
# propagate nans in a sensible way
if any(contains_nans) and nan_policy == 'propagate':
return result_object(np.nan, np.nan)
# Addresses nan_policy == "omit"
if any(contains_nans) and nan_policy == 'omit':
# consider passing in contains_nans
samples = _remove_nans(samples, paired)
# ideally, this is what the behavior would be:
# if is_too_small(samples):
# return result_object(np.nan, np.nan)
# but some existing functions raise exceptions, and changing
# behavior of those would break backward compatibility.
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
return hypotest_fun_in(*samples, **kwds)
# check for empty input
# ideally, move this to the top, but some existing functions raise
# exceptions for empty input, so overriding it would break
# backward compatibility.
empty_output = _check_empty_inputs(samples, axis)
if empty_output is not None:
statistic = empty_output
pvalue = empty_output.copy()
return result_object(statistic, pvalue)
# otherwise, concatenate all samples along axis, remembering where
# each separate sample begins
lengths = np.array([sample.shape[axis] for sample in samples])
split_indices = np.cumsum(lengths)
x = _broadcast_concatenate(samples, axis)
# Addresses nan_policy == "raise"
contains_nan, _ = (
scipy.stats._stats_py._contains_nan(x, nan_policy))
if vectorized and not contains_nan and not sentinel:
return hypotest_fun_in(*samples, axis=axis, **kwds)
# Addresses nan_policy == "omit"
if contains_nan and nan_policy == 'omit':
def hypotest_fun(x):
samples = np.split(x, split_indices)[:n_samp]
samples = _remove_nans(samples, paired)
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
if is_too_small(samples):
return result_object(np.nan, np.nan)
return hypotest_fun_in(*samples, **kwds)
# Addresses nan_policy == "propagate"
elif contains_nan and nan_policy == 'propagate':
def hypotest_fun(x):
if np.isnan(x).any():
return result_object(np.nan, np.nan)
samples = np.split(x, split_indices)[:n_samp]
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
if is_too_small(samples):
return result_object(np.nan, np.nan)
return hypotest_fun_in(*samples, **kwds)
else:
def hypotest_fun(x):
samples = np.split(x, split_indices)[:n_samp]
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
if is_too_small(samples):
return result_object(np.nan, np.nan)
return hypotest_fun_in(*samples, **kwds)
x = np.moveaxis(x, axis, -1)
res = np.apply_along_axis(hypotest_fun, axis=-1, arr=x)
return result_object(*result_unpacker(res))
doc = FunctionDoc(axis_nan_policy_wrapper)
parameter_names = [param.name for param in doc['Parameters']]
if 'axis' in parameter_names:
doc['Parameters'][parameter_names.index('axis')] = (
_axis_parameter_doc)
else:
doc['Parameters'].append(_axis_parameter_doc)
if 'nan_policy' in parameter_names:
doc['Parameters'][parameter_names.index('nan_policy')] = (
_nan_policy_parameter_doc)
else:
doc['Parameters'].append(_nan_policy_parameter_doc)
doc = str(doc).split("\n", 1)[1] # remove signature
axis_nan_policy_wrapper.__doc__ = str(doc)
sig = inspect.signature(axis_nan_policy_wrapper)
parameters = sig.parameters
parameter_list = list(parameters.values())
if 'axis' not in parameters:
parameter_list.append(_axis_parameter)
if 'nan_policy' not in parameters:
parameter_list.append(_nan_policy_parameter)
sig = sig.replace(parameters=parameter_list)
axis_nan_policy_wrapper.__signature__ = sig
return axis_nan_policy_wrapper
return axis_nan_policy_decorator
|
{"hexsha": "d9a93a66d4b569981603ef7acfead2535f4ab62f", "size": 21314, "ext": "py", "lang": "Python", "max_stars_repo_path": "scipy/stats/_axis_nan_policy.py", "max_stars_repo_name": "ChristinaCzaikowski/scipy", "max_stars_repo_head_hexsha": "544b938e06eba166b7e5bcc6298d9b3314f6cc33", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9095, "max_stars_repo_stars_event_min_datetime": "2015-01-02T18:24:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T20:35:31.000Z", "max_issues_repo_path": "scipy/stats/_axis_nan_policy.py", "max_issues_repo_name": "Seanpm2001-Python/scipy", "max_issues_repo_head_hexsha": "4871f3d1c61bdb296ae03e3480f5f584f5c67256", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 11500, "max_issues_repo_issues_event_min_datetime": "2015-01-01T01:15:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:07:35.000Z", "max_forks_repo_path": "scipy/stats/_axis_nan_policy.py", "max_forks_repo_name": "Seanpm2001-Python/scipy", "max_forks_repo_head_hexsha": "4871f3d1c61bdb296ae03e3480f5f584f5c67256", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5838, "max_forks_repo_forks_event_min_datetime": "2015-01-05T11:56:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T23:21:19.000Z", "avg_line_length": 42.628, "max_line_length": 80, "alphanum_fraction": 0.6243314254, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 4641}
|
fact=function(num)
{
if(num==1)
return(1)
else
{
return(num*fact(num-1))
}
print(fact)
}
fact(5)
|
{"hexsha": "7076148f61a009f3226fbbcca83d143a799c3d1d", "size": 128, "ext": "r", "lang": "R", "max_stars_repo_path": "RecursionFact.r", "max_stars_repo_name": "ninadsumant/RProgramming", "max_stars_repo_head_hexsha": "62aaf73e33d8e65f0f17a89c358feecef973cd5e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "RecursionFact.r", "max_issues_repo_name": "ninadsumant/RProgramming", "max_issues_repo_head_hexsha": "62aaf73e33d8e65f0f17a89c358feecef973cd5e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "RecursionFact.r", "max_forks_repo_name": "ninadsumant/RProgramming", "max_forks_repo_head_hexsha": "62aaf73e33d8e65f0f17a89c358feecef973cd5e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 9.8461538462, "max_line_length": 28, "alphanum_fraction": 0.4921875, "num_tokens": 42}
|
import util
import json
import sys
import numpy as np
from metrics import CorefEvaluator, Evaluator
from collections import defaultdict
from eval_ontonotes import SEGMENT_EVAL
predictions = sys.argv[1:]
ALL = "__@ALL"
def read_file(path):
d = {}
with open(path, 'r') as f:
for line in f:
document_blob = json.loads(line)
d[document_blob["doc_key"]] = document_blob
return d
def bucket_tokens(subtoken_map):
num_tokens = subtoken_map[-1]
if num_tokens <= 128:
return ("0-128")
elif num_tokens <= 256:
return ("128-256")
elif num_tokens <= 512:
return ("256-512")
elif num_tokens <= 768:
return ("512-768")
elif num_tokens <= 1152:
return ("768-1152")
else:
return ("_1152+")
def bucket_tokens_by_seg(doc_key):
# Only works for ontonotes dev
segment_count = SEGMENT_EVAL.get(doc_key, 0)
if segment_count <= 1:
return ("0-128")
elif segment_count <= 2:
return ("128-256")
elif segment_count <= 4:
return ("256-512")
elif segment_count <= 6:
return ("512-768")
elif segment_count <= 9:
return ("768-1152")
else:
return ("_1152+")
def bucket_segments(sentences):
num_segments = len(sentences)
if num_segments <= 1:
return ("1")
elif num_segments <= 2:
return ("2")
elif num_segments <= 3:
return ("3")
elif num_segments <= 4:
return ("4")
else:
return (">5")
def update_evaluators(evaluators, document, predicted_clusters, gold_clusters):
(predicted_clusters, gold_clusters,
mention_to_predicted, mention_to_gold) = util.mention_maps(predicted_clusters,
gold_clusters)
def keyed_update(key):
evaluators[key].update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold)
genre = document["doc_key"][:2]
token_bucket = bucket_tokens(document["subtoken_map"])
has_speakers = "_speaker_" + str(genre in ["bc", "tc"])
update_keys = [
ALL,
"_genre_" + genre + "_t_" + token_bucket,
"_genre0" + has_speakers,
"_s_" + bucket_segments(document["sentences"]),
# "_s+t_" + bucket_tokens_by_seg(document["doc_key"]),
"_t_" + token_bucket,
genre,
]
if "language" in document:
update_keys.append(document["language"])
for key in update_keys:
keyed_update(key)
def print_cluster(doc, clusters):
tokens = util.flatten(doc["sentences"])
for i, cluster in enumerate(clusters):
if len(cluster) > 1:
print(f"cluster {i}")
for span in cluster:
print(f"{' '.join(tokens[span[0]:span[1] + 1])} [{span[0]}, {span[1] + 1}]")
print("-" * 80)
def count_crossings(clusters, segment_map):
# does the "direct" variant
seam_spans = set()
relaxed_seam_spans = {}
for cluster in clusters:
spans = sorted(cluster, key=lambda x: x[0])
prev_span = spans[0]
for i, span in enumerate(spans[1:]):
if segment_map[prev_span[0]] != segment_map[span[0]]:
# break
seam_spans.add((tuple(prev_span), tuple(span)))
relaxed_seam_spans[tuple(span)] = spans[:i+1] # any antecedent is okay
prev_span = span
return seam_spans, relaxed_seam_spans
def seam_evaluation(exp):
strict = Evaluator(metric=None)
relaxed = Evaluator(metric=None)
for key, document in exp.items():
gold_clusters = document["clusters"]
predicted_clusters = document["predicted_clusters"]
# For each cluster, for each new mention, what's the accuracy it linked to the
# right cluster from earlier (i.e. is its direct/any antecedent included in the gold set?)
segment_map = util.segment_map(document["sentences"])
gold_crossings, relaxed_gold = count_crossings(gold_clusters, segment_map)
predicted_crossings, _ = count_crossings(predicted_clusters, segment_map)
intersection = gold_crossings & predicted_crossings
relaxed_intersection = [(ant, span) for ant, span in predicted_crossings
if list(ant) in relaxed_gold.get(span, [])]
gold_seam = len(gold_crossings)
predicted_seam = len(predicted_crossings)
intersection_seam = len(intersection)
relaxed_intersection_seam = len(relaxed_intersection)
strict.raw_update(intersection_seam, predicted_seam, gold_seam)
relaxed.raw_update(relaxed_intersection_seam, predicted_seam, gold_seam)
sp, sr, sf = strict.get_prf()
rp, rr, rf = relaxed.get_prf()
print(f"[STRICT] RECALL (accuracy) {sr:.3f}, p: {sp:.3f}, f1: {sf:.3f}")
print(f"[RELAX] RECALL (accuracy) {rr:.3f}, p: {rp:.3f}, f1: {rf:.3f}")
def calc_spread(cluster):
# distance = min and max
# variance = take set of all points and find variance
cluster = sorted(cluster)
points = sorted(util.flatten(cluster))
distance = points[-1] - points[0]
variance = np.std([(c[0] + c[1]) / 2 for c in cluster])
size = len(cluster)
diffs = [cluster[i+1][0] - cluster[i][0] for i in range(size - 1)]
max_hop = max(diffs)
mean_hop = np.average(diffs)
hop_var = np.std(diffs)
return (distance, variance, max_hop, mean_hop, hop_var, size)
def renumber(clusters, sentences):
# assumes [cls] and [sep] for each sentence
segment_map = util.segment_map(sentences)
def fix_span(span):
num_fillers = 1 + 2 * segment_map[span[0]]
return [span[0] - num_fillers,
span[1] - num_fillers]
def fix_cluster(cluster):
return [fix_span(span) for span in cluster]
return [fix_cluster(cluster) for cluster in clusters]
def distance_eval(exp):
gold_spread = []
predicted_spread = []
def aggregate(distances, variances, max_hops, mean_hops, hop_vars, sizes):
ret_val = {
"avg_dist": np.average(distances),
"max_dist": np.max(distances),
"avg_var": np.average(variances),
"max_hop": np.max(max_hops),
"mean_hop": np.average(mean_hops),
"avg_hop_var": np.average(hop_vars),
"avg_size": np.average(sizes)
}
return ret_val
for key, document in exp.items():
gold_clusters = renumber(document["clusters"], document["sentences"]) # [CLS] and [SEP] get in the way
predicted_clusters = renumber(document["predicted_clusters"], document["sentences"])
gold_spread.extend([calc_spread(cluster) for cluster in gold_clusters if len(cluster) > 1])
predicted_spread.extend([calc_spread(cluster) for cluster in predicted_clusters if len(cluster) > 1])
gold_stats = aggregate(*list(zip(*gold_spread)))
predicted_stats = aggregate(*list(zip(*predicted_spread)))
print({key: f"{val:.2f}" for key, val in gold_stats.items()})
print({key: f"{val:.2f}" for key, val in predicted_stats.items()})
def evaluate_exp(exp, simple=False):
evaluators = defaultdict(CorefEvaluator)
for key, document in exp.items():
gold_clusters = document["clusters"]
predicted_clusters = document["predicted_clusters"]
update_evaluators(evaluators, document, predicted_clusters, gold_clusters)
eval_dict = [f"{key}: {evaluator.prf_str()}, ({evaluator.get_count()} docs)" for key, evaluator in evaluators.items()]
if simple:
print(list(sorted(eval_dict))[0])
else:
print("\n".join(list(sorted(eval_dict))))
if __name__ == "__main__":
for pred_file in predictions:
print(pred_file)
preds = read_file(pred_file)
evaluate_exp(preds)# , simple=True)
# seam_evaluation(preds)
# distance_eval(preds)
|
{"hexsha": "8c14d3a68659553d32ef5b34c87c522c466a18b3", "size": 7312, "ext": "py", "lang": "Python", "max_stars_repo_path": "eval_all.py", "max_stars_repo_name": "wgantt/incremental-coref", "max_stars_repo_head_hexsha": "fadc44b59456b67055bf25f3dc688dd982a083af", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "eval_all.py", "max_issues_repo_name": "wgantt/incremental-coref", "max_issues_repo_head_hexsha": "fadc44b59456b67055bf25f3dc688dd982a083af", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "eval_all.py", "max_forks_repo_name": "wgantt/incremental-coref", "max_forks_repo_head_hexsha": "fadc44b59456b67055bf25f3dc688dd982a083af", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1538461538, "max_line_length": 120, "alphanum_fraction": 0.6799781182, "include": true, "reason": "import numpy", "num_tokens": 1952}
|
__author__ = "Nikhil Mehta"
__copyright__ = "--"
import tensorflow as tf
import numpy as np
import os
import sys
import argparse
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']="3"
from utils import load_train_data, load_validation_data, translate
from train_model import Model_Train
RUN = 'run_2'
TRAIN_DIR = '/data1/nikhil/trans-autoencoder-summary/'
tf.app.flags.DEFINE_string('train_dir', TRAIN_DIR+RUN, """Directory where we write logs and checkpoints""")
tf.app.flags.DEFINE_string('checkpoint_dir', TRAIN_DIR+RUN, """Directory from where to read the checkpoint""")
tf.app.flags.DEFINE_integer('num_epochs', 800, "Number of epochs to train")
tf.app.flags.DEFINE_integer('num_gpus', 1, "Number of gpus to use")
tf.app.flags.DEFINE_integer('batch_size', 100, "Batch size")
tf.app.flags.DEFINE_integer('save_checkpoint_every', 200, "Save prediction after save_checkpoint_every epochs")
tf.app.flags.DEFINE_integer('save_pred_every', 20, "Save prediction after save_pred_every epochs"
)
tf.app.flags.DEFINE_integer('save_checkpoint_after', 0, "Save prediction after epochs")
tf.app.flags.DEFINE_integer('num_capsules', 60, "Number of capsules")
tf.app.flags.DEFINE_integer('generator_dimen', 20, "Dimension of generator layer")
tf.app.flags.DEFINE_integer('recognizer_dimen', 10, "Dimension of recognition layer")
FLAGS = tf.app.flags.FLAGS
def main():
train_images = load_train_data()
X_trans, trans, X_original = translate(train_images)
model = Model_Train(X_trans, trans, X_original, FLAGS.num_capsules, FLAGS.recognizer_dimen, FLAGS.generator_dimen, X_trans.shape[1])
model.train()
if __name__ == "__main__":
main()
|
{"hexsha": "82cbcc2eab3ecd4ae8ef34a1247945c215cbd90c", "size": 1695, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "nikhil-dce/Transforming-AutoEncoder-TF", "max_stars_repo_head_hexsha": "9475638e4c35342cdf71ba2bf5c2a6fa709f8e44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 66, "max_stars_repo_stars_event_min_datetime": "2017-09-05T20:45:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T17:17:36.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "nikhil-dce/Transforming-AutoEncoder-TF", "max_issues_repo_head_hexsha": "9475638e4c35342cdf71ba2bf5c2a6fa709f8e44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-10-05T07:52:46.000Z", "max_issues_repo_issues_event_max_datetime": "2017-10-05T08:37:49.000Z", "max_forks_repo_path": "main.py", "max_forks_repo_name": "nikhil-dce/Transforming-AutoEncoder-TF", "max_forks_repo_head_hexsha": "9475638e4c35342cdf71ba2bf5c2a6fa709f8e44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2017-09-21T10:20:52.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-15T06:12:25.000Z", "avg_line_length": 36.0638297872, "max_line_length": 136, "alphanum_fraction": 0.7693215339, "include": true, "reason": "import numpy", "num_tokens": 406}
|
[STATEMENT]
lemma permutation_edge_succ: "permutation (edge_succ M)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. permutation (edge_succ M)
[PROOF STEP]
by (metis edge_succ_permutes finite_arcs permutation_permutes)
|
{"llama_tokens": 88, "file": "Planarity_Certificates_Planarity_Graph_Genus", "length": 1}
|
import numpy as np
import numpy.testing as npt
import pytest
from pulse2percept.utils import (is_strictly_increasing, radial_mask, sample,
unique)
def test_is_strictly_increasing():
npt.assert_equal(is_strictly_increasing([1]), True)
npt.assert_equal(is_strictly_increasing([0, 1, 2]), True)
npt.assert_equal(is_strictly_increasing([0, 1, 2], tol=1), True)
npt.assert_equal(is_strictly_increasing([0, 0.5, 1], tol=1), False)
npt.assert_equal(is_strictly_increasing([0, 0, 0]), False)
npt.assert_equal(is_strictly_increasing([0, 2, 1]), False)
def test_sample():
npt.assert_equal(sample([12]), [12])
npt.assert_equal(sample([1, 2, 3], k=0), [])
npt.assert_equal(np.sort(sample([1, 2, 3], k=3)), [1, 2, 3])
selected = sample(np.arange(100), k=0.25)
npt.assert_equal(len(selected), 25)
npt.assert_equal(len(np.unique(selected)), 25)
with pytest.raises(TypeError):
sample([1, 2, 3], k=(1,))
with pytest.raises(ValueError):
sample([1, 2, 3], k=4)
def test_unique():
a = [0, 0.001, 0.1, 0.2, 1]
npt.assert_almost_equal(unique(a, tol=1e-6), a)
npt.assert_almost_equal(unique(a, tol=0.001), a)
npt.assert_almost_equal(unique(a, tol=0.1), [0, 0.1, 0.2, 1])
npt.assert_almost_equal(unique(a, tol=1), [0, 1])
val, idx = unique(a, tol=1e-6, return_index=True)
npt.assert_almost_equal(val, a)
npt.assert_almost_equal(idx, np.arange(len(a)))
val, idx = unique(a, tol=1, return_index=True)
npt.assert_almost_equal(val, [0, 1])
npt.assert_almost_equal(idx, [0, 4])
def test_radial_mask():
# Circle:
mask = radial_mask((3, 5), mask='circle')
npt.assert_equal(mask, np.array([[False, False, True, False, False],
[True, True, True, True, True],
[False, False, True, False, False]]))
# Gauss:
mask = radial_mask((3, 5), mask='gauss', sd=3)
npt.assert_almost_equal(mask[1, 2], 1)
npt.assert_almost_equal(mask[0, 0], 0.0001234)
npt.assert_almost_equal(mask[0, 4], 0.0001234)
npt.assert_almost_equal(mask[2, 0], 0.0001234)
npt.assert_almost_equal(mask[2, 4], 0.0001234)
npt.assert_almost_equal(mask[0, 2], 0.01111, decimal=5)
npt.assert_almost_equal(mask[1, 0], 0.01111, decimal=5)
npt.assert_almost_equal(mask[1, 4], 0.01111, decimal=5)
npt.assert_almost_equal(mask[2, 2], 0.01111, decimal=5)
with pytest.raises(ValueError):
radial_mask((10, 10), mask='invalid')
|
{"hexsha": "ca6f3280451537d2a3ee1e8ee53980713b617f23", "size": 2549, "ext": "py", "lang": "Python", "max_stars_repo_path": "pulse2percept/utils/tests/test_array.py", "max_stars_repo_name": "narenberg/pulse2percept", "max_stars_repo_head_hexsha": "ca3aaf66672ccf3c9ee6a9a9d924184cdc6f031d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2016-04-12T01:53:52.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-18T02:19:01.000Z", "max_issues_repo_path": "pulse2percept/utils/tests/test_array.py", "max_issues_repo_name": "narenberg/pulse2percept", "max_issues_repo_head_hexsha": "ca3aaf66672ccf3c9ee6a9a9d924184cdc6f031d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 86, "max_issues_repo_issues_event_min_datetime": "2016-01-19T17:31:23.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-10T22:55:00.000Z", "max_forks_repo_path": "pulse2percept/utils/tests/test_array.py", "max_forks_repo_name": "narenberg/pulse2percept", "max_forks_repo_head_hexsha": "ca3aaf66672ccf3c9ee6a9a9d924184cdc6f031d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2016-08-23T16:17:46.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-25T17:53:49.000Z", "avg_line_length": 38.6212121212, "max_line_length": 77, "alphanum_fraction": 0.6371125932, "include": true, "reason": "import numpy", "num_tokens": 836}
|
// (C) Copyright 2015 by Autodesk, Inc.
//== INCLUDES =================================================================
//== COMPILE-TIME PACKAGE REQUIREMENTS ========================================
#include <CoMISo/Config/config.hh>
#if COMISO_DOCLOUD_AVAILABLE
//=============================================================================
#include "DOCloudCache.hh"
#include "DOCloudConfig.hh"
#include <Base/Debug/DebOut.hh>
#include <fstream>
#include <iomanip>
#include <cctype>
#include <functional>
#include <sstream>
#include <boost/filesystem.hpp>
// include windows.h without some of the excess
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
// ... and undefine ERROR
#ifdef ERROR
#undef ERROR
#endif//ERROR
//== NAMESPACES ===============================================================
namespace COMISO {
namespace DOcloud {
namespace {
// Create a new temporary exclusive file without extension that is used to
// prevent write or read operation on files with the same name and extension
// .lp or .dat. while the cache is being written. This is the only class that
// uses windows specific APIs.
class FileLock
{
public:
FileLock(const std::string& _filename)
{
file_hnd_ = CreateFile(_filename.c_str(),
GENERIC_WRITE,
0, // ShareMode - 0 prevents any sharing
nullptr, // SecurityAttributes
CREATE_NEW, // Fails if file already exists.
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_DELETE_ON_CLOSE, // File attributes.
NULL);
}
// We can write the DoCloud results only id the lock is successful.
bool sucess() const { return file_hnd_ != INVALID_HANDLE_VALUE; }
// If there is an active lock we can not read the related data because they
// are being written.
static bool active(const std::string& _filename)
{
return GetFileAttributes(_filename.c_str()) != INVALID_FILE_ATTRIBUTES;
}
// The destructor removes the lock. from this moment the data can be freely
// read and there should not be anyone who tries to rewrite them.
~FileLock()
{
if (sucess())
CloseHandle(file_hnd_); // This will delete the file.
}
private:
HANDLE file_hnd_;
};
bool load_file(const std::string& _filename, std::string& _file_cnts)
{
std::ifstream in_file_strm(_filename, std::ios::ate);
if (!in_file_strm.is_open())
return false;
_file_cnts.reserve(in_file_strm.tellg());
in_file_strm.seekg(0, std::ios::beg);
_file_cnts.assign(std::istreambuf_iterator<char>(in_file_strm),
std::istreambuf_iterator<char>());
return true;
}
bool save_file(const std::string& _filename, const std::string& _file_cnts)
{
std::ofstream out_file_strm(_filename);
if (!out_file_strm.is_open())
return false;
out_file_strm << _file_cnts;
return true;
}
// Finds a key string from the file name. This string will be used as file name
// where to store the related cached data.
std::string string_to_hash(const std::string& _str)
{
const std::hash<std::string> hash_fn;
std::stringstream strm;
strm << std::hex << hash_fn(_str);
return strm.str();
}
const size_t NO_SOLUTION_CODE = UINT_MAX;
// Load variables and objective values from a file.
bool load_data(const std::string& _filename,
std::vector<double>& _x, double& _obj_val)
{
std::ifstream in_file_strm(_filename);
if (!in_file_strm.is_open())
return false;
size_t dim = std::numeric_limits<size_t>::max();
in_file_strm >> dim;
if (dim == NO_SOLUTION_CODE)
{
_x.clear();
return true;
}
if (dim != _x.size())
return false;
for (auto& xi : _x)
in_file_strm >> xi;
in_file_strm >> _obj_val;
return !in_file_strm.bad();
}
// Store variables and objective values in a file.
bool save_data(const std::string& _filename,
const std::vector<double>& _x, const double& _obj_val)
{
std::ofstream out_file_strm(_filename);
out_file_strm << std::setprecision(std::numeric_limits<double>::digits10 + 2);
if (!out_file_strm.is_open())
return false;
if (_x.empty())
{
out_file_strm << NO_SOLUTION_CODE;
return true;
}
out_file_strm << _x.size() << std::endl;
for (const auto& xi : _x)
out_file_strm << xi << std::endl;;
out_file_strm << _obj_val;
return !out_file_strm.bad();
}
} // namespace
Cache::Cache(const std::string& _mip_lp)
: mip_lp_(_mip_lp), hash_(string_to_hash(mip_lp_)), found_(false)
{
DEB_enter_func;
DEB_line(2, "Cache hash: " << hash_);
}
bool Cache::restore_result(std::vector<double>& _x, double& _obj_val)
{
DEB_enter_func;
const auto* cache_loc = Config::query().cache_location();
if (cache_loc == nullptr) // cache location not provided, disabale the cache
return false;
for (size_t iter_nmbr = 0; iter_nmbr < 10; ++iter_nmbr)
{
filename_ = cache_loc + hash_ + '_' + std::to_string(iter_nmbr);
std::string dat_filename(filename_ + ".dat");
boost::system::error_code err_cod;
if (!boost::filesystem::exists(
boost::filesystem::path(dat_filename.c_str()), err_cod) ||
err_cod.value() != boost::system::errc::success)
{
// If the .dat file does not exist it is not safe to check the lock because
// it is possible that this process finds no lock, another process sets the
// lock and start writing data, this process reads not fully written data.
break;
}
if (FileLock::active(filename_))
break;
std::string cache_cnts;
if (!load_file(filename_ + ".lp", cache_cnts))
break;
if (cache_cnts == mip_lp_)
{
found_ = load_data(filename_ + ".dat", _x, _obj_val);
return found_;
}
}
return false;
}
namespace {
// Save the couple of files fname.lp and fname.dat . They are an element of a
// sort of map from file.lp to file.dat. So if there is an error saving the .dat
// file, the .lp file must also e deleted.
class CacheSaver
{
public:
CacheSaver() : success_(false) {}
~CacheSaver()
{
if (success_)
return;
// Removes files eventually written if there has been any kind of failure.
for (const auto& filename : used_files_)
{
if (!filename.empty())
std::remove(filename.c_str());
}
}
void save(const std::string& _filename, const std::vector<double>& _x,
const double& _obj_val, const std::string& _lp_cnts)
{
DEB_enter_func;
FileLock file_lock(_filename);
if (file_lock.sucess())
{
used_files_[0] = _filename + ".lp";
success_ = save_file(used_files_[0], _lp_cnts);
if (success_)
{
used_files_[1] = _filename + ".dat";
success_ = save_data(used_files_[1], _x, _obj_val);
}
}
}
private:
bool success_;
std::string used_files_[2];
};
} // namespace
void Cache::store_result(const std::vector<double>& _x, const double& _obj_val)
{
DEB_enter_func;
if (filename_.empty() || found_)
{// restore_result() either not called at all, or hit the cache
DEB_error("store_result() called incorrectly");
return;
}
CacheSaver saver;
saver.save(filename_, _x, _obj_val, mip_lp_);
}
} // namespace DOcloud
} // namespace COMISO
#endif // COMISO_DOCLOUD_AVAILABLE
//=============================================================================
|
{"hexsha": "58c1cab14ae847779a3980315b0818bc6236f140", "size": 7260, "ext": "cc", "lang": "C++", "max_stars_repo_path": "ACAP_linux/3rd/CoMISo/NSolver/DOCloudCache.cc", "max_stars_repo_name": "shubhMaheshwari/Automatic-Unpaired-Shape-Deformation-Transfer", "max_stars_repo_head_hexsha": "8c9afe017769f9554706bcd267b6861c4c144999", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 216.0, "max_stars_repo_stars_event_min_datetime": "2018-09-09T11:53:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T13:41:35.000Z", "max_issues_repo_path": "ACAP_linux/3rd/CoMISo/NSolver/DOCloudCache.cc", "max_issues_repo_name": "gaolinorange/Automatic-Unpaired-Shape-Deformation-Transfer", "max_issues_repo_head_hexsha": "8c9afe017769f9554706bcd267b6861c4c144999", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13.0, "max_issues_repo_issues_event_min_datetime": "2018-10-23T08:29:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T06:45:34.000Z", "max_forks_repo_path": "ACAP_linux/3rd/CoMISo/NSolver/DOCloudCache.cc", "max_forks_repo_name": "shubhMaheshwari/Automatic-Unpaired-Shape-Deformation-Transfer", "max_forks_repo_head_hexsha": "8c9afe017769f9554706bcd267b6861c4c144999", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 41.0, "max_forks_repo_forks_event_min_datetime": "2018-09-13T08:50:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T00:33:54.000Z", "avg_line_length": 26.7896678967, "max_line_length": 81, "alphanum_fraction": 0.6438016529, "num_tokens": 1856}
|
import unittest
import numpy as np
from smt.utils.sm_test_case import SMTestCase
from smt.utils.kriging_utils import standardization
class Test(SMTestCase):
def test_standardization(self):
d, n = (10, 100)
X = np.random.normal(size=(n, d))
y = np.random.normal(size=(n, 1))
X_norm, _, _, _, _, _ = standardization(X, y, scale_X_to_unit=True)
interval = (np.min(X_norm), np.max(X_norm))
self.assertEqual((0, 1), interval)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "f18fba92984333efeb4b825bf4887fb82c49e799", "size": 526, "ext": "py", "lang": "Python", "max_stars_repo_path": "smt/utils/test/test_kriging_utils.py", "max_stars_repo_name": "joshuauk1026/smt", "max_stars_repo_head_hexsha": "ec6aa20643b1e4fa772c6f470281c58df113c3a6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-09-08T21:32:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-20T20:52:30.000Z", "max_issues_repo_path": "smt/utils/test/test_kriging_utils.py", "max_issues_repo_name": "joshuauk1026/smt", "max_issues_repo_head_hexsha": "ec6aa20643b1e4fa772c6f470281c58df113c3a6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-14T16:37:33.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-14T16:37:33.000Z", "max_forks_repo_path": "smt/utils/test/test_kriging_utils.py", "max_forks_repo_name": "joshuauk1026/smt", "max_forks_repo_head_hexsha": "ec6aa20643b1e4fa772c6f470281c58df113c3a6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0476190476, "max_line_length": 75, "alphanum_fraction": 0.6501901141, "include": true, "reason": "import numpy", "num_tokens": 139}
|
"""Modification CLAHE (Contrast Limited Adaptive Histogram Equalization)."""
import cv2 as cv
import numpy as np
from dfd.datasets.modifications.interfaces import ModificationInterface
class CLAHEModification(ModificationInterface):
"""Modification CLAHE (Contrast Limited Adaptive Histogram Equalization)"""
def __init__(self, clip_limit: float, grid_width: int, grid_height: int) -> None:
"""Initialize AdaptiveHistogramEqualizationModification.
Args:
clip_limit: limit used to define opencv CLAHE object, as in cv.createCLAHE
grid_width: tile grid width used to define opencv CLAHE object, as in cv.createCLAHE
grid_height: tile grid height used to define opencv CLAHE object, as in cv.createCLAHE
"""
self._clip_limit = clip_limit
self._title_grid_size = (grid_width, grid_height)
def perform(self, image: np.ndarray) -> np.ndarray:
"""Perform CLAHE on image.
Equalization is done in YCbCr color space,
after equalization image is converted back to BGR.
Args:
image: OpenCV image.
Returns:
Image after equalization.
"""
# Convert from BGR color space to YCrCb
ycrcb_image = cv.cvtColor(image, cv.COLOR_BGR2YCrCb)
# Prepare CLAHE
clahe = cv.createCLAHE(clipLimit=self._clip_limit, tileGridSize=self._title_grid_size)
# Equalize y channel
ycrcb_image[:, :, 0] = clahe.apply(ycrcb_image[:, :, 0])
# Convert back to BGR
return cv.cvtColor(ycrcb_image, cv.COLOR_YCrCb2BGR)
def __str__(self) -> str:
width, height = self._title_grid_size
return f"clahe_{width}_{height}_{self._clip_limit}"
|
{"hexsha": "3ea8787fe392273afaaf0e54603353732a05a5a2", "size": 1745, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/dfd/datasets/modifications/definitions/clahe.py", "max_stars_repo_name": "cicheck/dfd", "max_stars_repo_head_hexsha": "b02752f958cfea2f85222e2b4b3ba7e265a6152d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/dfd/datasets/modifications/definitions/clahe.py", "max_issues_repo_name": "cicheck/dfd", "max_issues_repo_head_hexsha": "b02752f958cfea2f85222e2b4b3ba7e265a6152d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-12-31T17:44:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-31T19:51:11.000Z", "max_forks_repo_path": "src/dfd/datasets/modifications/definitions/clahe.py", "max_forks_repo_name": "cicheck/dfd", "max_forks_repo_head_hexsha": "b02752f958cfea2f85222e2b4b3ba7e265a6152d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.612244898, "max_line_length": 98, "alphanum_fraction": 0.6773638968, "include": true, "reason": "import numpy", "num_tokens": 420}
|
#ifndef YQVMC_EXTERNAL_LIBRARY_ADAPTOR_EIGEN3_HPP
#define YQVMC_EXTERNAL_LIBRARY_ADAPTOR_EIGEN3_HPP
#include <Eigen/Core>
#include "../impl_/mae_traits.hpp"
namespace yqvmc {
namespace impl_ {
template <typename S_, int R_, int C_, int O_, int MR_, int MC_>
struct MeanAndErrorTraits<Eigen::Array<S_, R_, C_, O_, MR_, MC_>, void> {
public:
typedef Eigen::Array<S_, R_, C_, O_, MR_, MC_> input_type;
typedef typename input_type::Scalar Scalar;
typedef typename std::conditional<input_type::ColsAtCompileTime == 1,
Eigen::Array<Scalar, Eigen::Dynamic, 1>,
Eigen::Array<Scalar, Eigen::Dynamic, Eigen::Dynamic> >::type sum_type;
typedef sum_type result_type;
static void set_zero(sum_type& x) { x.setZero(); }
static void add_to(sum_type& x, const input_type& dx) {
if (x.size() == 0)
x = dx;
else
x += dx;
}
static input_type square(const input_type& x) { return x*x; }
static result_type mean(const sum_type& x, std::size_t n) {
return x/n;
}
static result_type standard_error(const result_type& x2mean,
const result_type& xmean, std::size_t n) {
return ((x2mean - xmean*xmean) / n).sqrt();
}
};
}
}
#endif
|
{"hexsha": "c1ca65d58c643f92c3c1a6f23a72a36ca5fac381", "size": 1277, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/yqvmc/libadapt/eigen3_adaptor.hpp", "max_stars_repo_name": "yangqi137/yqvmc", "max_stars_repo_head_hexsha": "73b7367f6d4b01ea61612ea0888b285c8dac2fad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/yqvmc/libadapt/eigen3_adaptor.hpp", "max_issues_repo_name": "yangqi137/yqvmc", "max_issues_repo_head_hexsha": "73b7367f6d4b01ea61612ea0888b285c8dac2fad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/yqvmc/libadapt/eigen3_adaptor.hpp", "max_forks_repo_name": "yangqi137/yqvmc", "max_forks_repo_head_hexsha": "73b7367f6d4b01ea61612ea0888b285c8dac2fad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7435897436, "max_line_length": 78, "alphanum_fraction": 0.6405638215, "num_tokens": 348}
|
# script for extracting patches from video frames suitable for neural network
# training
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img
from PIL import Image
import sys
import os
import glob
from PIL import Image
from os.path import basename, splitext
import numpy as np
def acceptable(a):
if np.average(a) > 0.95 * 255:
return False
return True
overlap = 8
source_path = './reducted-conferences-videos-equations/'
destination_path = './conferences-videos-equations-samples-512/'
for file_name in glob.glob(source_path+ "*.jpg"):
name_without_extenstion = splitext(basename(file_name))[0]
gt_file_path = source_path + name_without_extenstion + ".gt.jpg"
print(file_name)
print(gt_file_path)
source_image = load_img(file_name, grayscale=False)
try:
groud_img = load_img(gt_file_path, grayscale=True)
except FileNotFoundError:
#groud_img = Image.new('RGB', (source_image.size[0], source_image.size[1]), (255, 255, 255))
continue
size_list = [512]
for size_x in size_list:
for size_y in size_list:
subimage_size = (size_x, size_y)
num_of_subimages_horizontal = source_image.size[0] // (subimage_size[0] // overlap)
num_of_subimages_vertical = source_image.size[1] // (subimage_size[1] // overlap)
rest_h = source_image.size[0] - num_of_subimages_horizontal * (subimage_size[0] // overlap)
rest_v = source_image.size[1] - num_of_subimages_vertical * (subimage_size[1] // overlap)
for i in range(num_of_subimages_horizontal):
for j in range(num_of_subimages_vertical):
x = i * (subimage_size[0] // overlap)
y = j * (subimage_size[1] // overlap)
w = x + (subimage_size[0])
h = y + (subimage_size[1])
crop_rect = (x,y,w,h)
if w > source_image.size[0] or h > source_image.size[1]:
continue
chunk_file_name = "{dir}{name}-{sizex}-{sizey}-{i}-{j}".format(dir=destination_path, i=i, j=j, name=name_without_extenstion, sizex=size_x, sizey=size_y)
gt_sub_image = groud_img.crop(crop_rect)
if not acceptable(img_to_array(gt_sub_image)):
continue
print(chunk_file_name)
gt_sub_image.save(chunk_file_name + ".gt.jpg")
sub_image = source_image.crop(crop_rect)
sub_image.save(chunk_file_name+ ".jpg")
|
{"hexsha": "118bb9cc42104087368b917dcb655edae791e512", "size": 2624, "ext": "py", "lang": "Python", "max_stars_repo_path": "extract-subimages-videos.py", "max_stars_repo_name": "rzaluska/fcnn-conferences", "max_stars_repo_head_hexsha": "509946a4d342451f29e7b8706b6ff46b0af20f36", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-07T05:55:48.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-07T05:55:48.000Z", "max_issues_repo_path": "extract-subimages-videos.py", "max_issues_repo_name": "rzaluska/fcnn-conferences", "max_issues_repo_head_hexsha": "509946a4d342451f29e7b8706b6ff46b0af20f36", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "extract-subimages-videos.py", "max_forks_repo_name": "rzaluska/fcnn-conferences", "max_forks_repo_head_hexsha": "509946a4d342451f29e7b8706b6ff46b0af20f36", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1641791045, "max_line_length": 172, "alphanum_fraction": 0.6269054878, "include": true, "reason": "import numpy", "num_tokens": 621}
|
"""
Modified from: http://hubpages.com/technology/Simplex-Algorithm-in-Python
"""
from __future__ import division
from numpy import *
# Ref: http://stackoverflow.com/questions/23344185/how-to-convert-a-decimal-number-into-fraction
from fractions import Fraction
class Tableau:
def __init__(self, obj):
self.obj = [1] + obj
self.rows = []
self.cons = []
self.no_variables = len(obj)
self.no_constraints = 0
self.is_fraction = False # set True to output in fraction
def add_constraint(self, expression, value):
self.rows.append([0] + expression)
self.cons.append(value)
self.no_constraints += 1
self.header_tableau = ["Basic"] + ["x"+str(i+1) for i in range(self.no_variables)] \
+ ["s"+str(i+1) for i in range(self.no_constraints)] \
+ ["Solution"]
self.basic_variables = ["s"+str(i+1) for i in range(self.no_constraints)]
def _pivot_column(self):
low = 0
idx = 0
for i in range(1, len(self.obj)-1):
if self.obj[i] < low:
low = self.obj[i]
idx = i
if idx == 0: return -1
return idx
def _pivot_row(self, col):
rhs = [self.rows[i][-1] for i in range(len(self.rows))]
lhs = [self.rows[i][col] for i in range(len(self.rows))]
ratio = []
for i in range(len(rhs)):
if lhs[i] == 0:
ratio.append(99999999 * abs(max(rhs)))
continue
ratio.append(rhs[i]/lhs[i])
return argmin(ratio)
def display(self):
if self.is_fraction:
# Formatting the output in fraction
# Ref: https://pyformat.info/
fmt = '{:<8}'.format("Basic") \
+ "".join(['{:>8}'.format("x"+str(i+1)) for i in range(self.no_variables)]) \
+ "".join(['{:>8}'.format("s"+str(i+1)) for i in range(self.no_constraints)]) \
+ '{:>8}'.format("Sol.")
fmt += "\n"
fmt += '{:<8}'.format("z") \
+ "".join(["{:>8}".format(Fraction(item).limit_denominator(3)) for item in self.obj[1:]])
for i, row in enumerate(self.rows):
fmt += "\n"
fmt += '{:<8}'.format(self.basic_variables[i]) \
+ "".join(["{:>8}".format(Fraction(item).limit_denominator(3)) for item in row[1:]])
print fmt
else:
# Formatting the output in float with 2 decimal places
fmt = '{:<8}'.format("Basic") \
+ "".join(['{:>8}'.format("x"+str(i+1)) for i in range(self.no_variables)]) \
+ "".join(['{:>8}'.format("s"+str(i+1)) for i in range(self.no_constraints)]) \
+ '{:>8}'.format("Sol.")
fmt += "\n"
fmt += '{:<8}'.format("z") + "".join(["{:>8.2f}".format(item) for item in self.obj[1:]])
for i, row in enumerate(self.rows):
fmt += "\n"
fmt += '{:<8}'.format(self.basic_variables[i]) \
+ "".join(["{:>8.2f}".format(item) for item in row[1:]])
print fmt
# print '\n', matrix([self.obj] + self.rows)
def _pivot(self, row, col):
e = self.rows[row][col]
self.rows[row] /= e
for r in range(len(self.rows)):
if r == row: continue
self.rows[r] = self.rows[r] - self.rows[r][col]*self.rows[row]
self.obj = self.obj - self.obj[col]*self.rows[row]
def _check(self):
if min(self.obj[1:-1]) >= 0: return 1
return 0
def solve(self):
# build full tableau
for i in range(len(self.rows)):
self.obj += [0]
ident = [0 for r in range(len(self.rows))]
ident[i] = 1
self.rows[i] += ident + [self.cons[i]]
self.rows[i] = array(self.rows[i], dtype=float)
self.obj = array(self.obj + [0], dtype=float)
# solve
self.display()
while not self._check():
c = self._pivot_column()
r = self._pivot_row(c)
self._pivot(r,c)
# print '\npivot column: %s\npivot row: %s'%(c+1,r+2)
print '\n'
print 'Entering Variable: ', self.header_tableau[c]
print 'Leaving Variable : ', self.basic_variables[r]
print '\n'
# Updating the basic variable
for index, item in enumerate(self.basic_variables):
if self.basic_variables[index] == self.basic_variables[r]:
self.basic_variables[index] = self.header_tableau[c]
self.display()
|
{"hexsha": "f19a5864b7030d2e7e0bf7d50ec0cd87efe0be2b", "size": 4860, "ext": "py", "lang": "Python", "max_stars_repo_path": "tableau.py", "max_stars_repo_name": "infimath/optimization-taha", "max_stars_repo_head_hexsha": "5f2c02429710614cf7ec5993cddb5e15afcb8103", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tableau.py", "max_issues_repo_name": "infimath/optimization-taha", "max_issues_repo_head_hexsha": "5f2c02429710614cf7ec5993cddb5e15afcb8103", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tableau.py", "max_forks_repo_name": "infimath/optimization-taha", "max_forks_repo_head_hexsha": "5f2c02429710614cf7ec5993cddb5e15afcb8103", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.96875, "max_line_length": 108, "alphanum_fraction": 0.4884773663, "include": true, "reason": "from numpy", "num_tokens": 1177}
|
# -*- coding: utf-8 -*-
# Dependencies:
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from statsmodels.tsa.seasonal import seasonal_decompose
from src import (
PATH_COVID_IMPACT_GRAPH,
PATH_HISTOGRAM_BOOKINGS,
PATH_PLOT_REVENUE_PER_DATE,
PATH_PREPROCESSOR_COVID_IMPACT,
PATH_PREPROCESSOR_REVENUE_MODEL_Q2,
PATH_REGRESSOR_COVID_IMPACT,
PATH_REGRESSOR_REVENUE_MODEL_Q2,
PATH_REVENUE_COMPARISON,
PATH_SEASONAL_DECOMPOSE_RESERVATIONS,
PATH_SEASONAL_DECOMPOSE_REVENUE,
)
from src.commons import get_date_from_ymd, load_pickle
from src.features.build_features import (
build_date_features,
build_features_revenue_model_q2,
)
from src.models.preprocessing import preprocess_transform
def plot_revenue_per_date(df_daily_revenue: pd.DataFrame):
"""Plots a graph of revenue per date.
Parameters
----------
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
"""
temp = df_daily_revenue.groupby("date")[["revenue"]].mean().reset_index()
plt.style.use("seaborn")
sns.lineplot(data=temp, x="date", y="revenue")
plt.xticks(rotation=45)
plt.xlabel("Date")
plt.ylabel("Mean Daily Revenue (R$)")
plt.tight_layout()
plt.savefig(PATH_PLOT_REVENUE_PER_DATE)
plt.close()
print("Exporting graph revenue_per_date to path: " + PATH_PLOT_REVENUE_PER_DATE)
def plot_hist_reservation_advance(df_daily_revenue: pd.DataFrame):
"""Plots a histogram with the distribution of booking advance days.
Parameters
----------
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
"""
plt.style.use("seaborn")
plt.hist(df_daily_revenue["reservation_advance_days"].dropna(), bins=100)
plt.xlabel("Reservation advance (days)")
plt.ylabel("Number of reservations")
plt.tight_layout()
plt.savefig(PATH_HISTOGRAM_BOOKINGS)
plt.close()
print(
"Exporting graph histogram_reservation_advance to path: "
+ PATH_HISTOGRAM_BOOKINGS
)
def plot_real_pred_data(df_listings: pd.DataFrame, df_daily_revenue: pd.DataFrame):
"""Plots a graph comparing the real and the predicted revenue.
Parameters
----------
df_listings : pd.DataFrame
Pandas dataframe with information about listings.
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
"""
X, y = build_features_revenue_model_q2(df_listings, df_daily_revenue)
X["date"] = X.apply(
lambda x: pd.to_datetime(
str(int(x["year"])) + "-" + str(int(x["month"])) + "-" + str(int(x["day"]))
),
axis=1,
)
data_pred = pd.DataFrame()
data_pred["date"] = pd.date_range(start=X["date"].min(), end=X["date"].max())
data_pred = build_date_features(data_pred, "date")
preprocessor = load_pickle(PATH_PREPROCESSOR_REVENUE_MODEL_Q2)
model = load_pickle(PATH_REGRESSOR_REVENUE_MODEL_Q2)
X_pred = preprocess_transform(data_pred, preprocessor)
y_pred = model.predict(X_pred)
plt.style.use("seaborn")
plt.plot(X["date"], y, label="Real revenue", alpha=0.8)
plt.plot(X["date"], y_pred, label="Predicted revenue", color="orange", alpha=0.8)
plt.xticks(rotation=45)
plt.xlabel("Date")
plt.ylabel("Revenue (R$)")
plt.legend()
plt.tight_layout()
plt.savefig(PATH_REVENUE_COMPARISON)
plt.close()
print(
"Exporting graph real_versus_predicted_revenue to path: "
+ PATH_REVENUE_COMPARISON
)
def plot_seasonal_decomposed_q2(
df_listings: pd.DataFrame, df_daily_revenue: pd.DataFrame
):
"""Plots the graphs of seasonal decomposition for question 2.
Parameters
----------
df_listings : pd.DataFrame
Pandas dataframe with information about listings.
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
"""
data = pd.merge(
df_daily_revenue,
df_listings[["Código", "Comissão"]],
left_on="listing",
right_on="Código",
how="left",
)
data["company_revenue"] = data["Comissão"] * data["revenue"]
data_revenue = (
data.groupby("date")
.agg(company_revenue=("company_revenue", "sum"))
.reset_index()
)
data_revenue = build_date_features(data_revenue, "date")
data = data_revenue.loc[data_revenue["company_revenue"].notna()]
try:
tsmodel = seasonal_decompose(
data["company_revenue"],
model="additive",
extrapolate_trend="freq",
freq=365,
)
except Exception as err:
tsmodel = seasonal_decompose(
data["company_revenue"],
model="additive",
extrapolate_trend="freq",
period=365,
)
plt.style.use("seaborn")
plt.rcParams.update({"figure.figsize": (10, 10)})
tsmodel.plot()
plt.xlabel("Days")
plt.tight_layout()
plt.savefig(PATH_SEASONAL_DECOMPOSE_REVENUE)
plt.close()
print(
"Exporting graph seasonal_decompose_revenue to path: "
+ PATH_SEASONAL_DECOMPOSE_REVENUE
)
def plot_seasonal_decomposed_q3(df_daily_revenue: pd.DataFrame):
"""Plots the graphs of seasonal decomposition for question 3.
Parameters
----------
df_daily_revenue : pd.DataFrame
Pandas dataframe with information aboutt daily revenue.
"""
df_q3 = df_daily_revenue[
(df_daily_revenue["occupancy"] == 1) & (df_daily_revenue["blocked"] == 0)
]
data_q3 = df_q3.groupby(["creation_date"]).count().iloc[:, 0:1].reset_index()
data_q3.columns = ["creation_date", "qt_reservations"]
data_q3 = build_date_features(data_q3, "creation_date")
try:
tsmodel = seasonal_decompose(
data_q3["qt_reservations"],
model="additive",
extrapolate_trend="freq",
freq=365,
)
except Exception as err:
tsmodel = seasonal_decompose(
data_q3["qt_reservations"],
model="additive",
extrapolate_trend="freq",
period=365,
)
plt.style.use("seaborn")
plt.rcParams.update({"figure.figsize": (10, 10)})
tsmodel.plot()
plt.xlabel("Days")
plt.tight_layout()
plt.savefig(PATH_SEASONAL_DECOMPOSE_RESERVATIONS)
plt.close()
print(
"Exporting graph seasonal_decompose_reservations to path: "
+ PATH_SEASONAL_DECOMPOSE_RESERVATIONS
)
def plot_revenue_loss_due_to_covid(
df_listings: pd.DataFrame, df_daily_revenue: pd.DataFrame
):
"""Plots the graph comparing the revenue expected in comparison to
real revenue in order to compare loss due to covid-19 pandemic.
Parameters
----------
df_listings : pd.DataFrame
Pandas dataframe with information about listings.
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
"""
data = pd.merge(
df_daily_revenue,
df_listings[["Código", "Comissão"]],
left_on="listing",
right_on="Código",
how="left",
)
data["company_revenue"] = data["Comissão"] * data["revenue"]
data = (
data.groupby("date")
.agg(company_revenue=("company_revenue", "sum"))
.reset_index()
)
data_pred = pd.DataFrame()
data_pred["date"] = pd.date_range(
start=data["date"].min(), end=data["date"].max()
).to_list()
data_pred = build_date_features(data_pred, "date")
preprocessor = load_pickle(PATH_PREPROCESSOR_COVID_IMPACT)
model = load_pickle(PATH_REGRESSOR_COVID_IMPACT)
X_pred = preprocess_transform(data_pred, preprocessor)
data_pred["predicted_company_revenue"] = model.predict(X_pred)
data_pred["date"] = get_date_from_ymd(data_pred)
plt.style.use("seaborn")
plt.rcParams.update({"figure.figsize": (10, 10)})
plt.plot(
data["date"], data["company_revenue"], label="Real Company Revenue", alpha=0.8
)
plt.plot(
data_pred["date"],
data_pred["predicted_company_revenue"],
label="Predicted Company Revenue",
color="orange",
alpha=0.8,
)
plt.xlabel("Date")
plt.ylabel("Company Revenue (R$)")
plt.legend()
plt.tight_layout()
plt.savefig(PATH_COVID_IMPACT_GRAPH)
plt.close()
print("Exporting graph covid_impact_on_revenue to path: " + PATH_COVID_IMPACT_GRAPH)
|
{"hexsha": "027e24bd44347785f1e9da6dc0fe1630584af266", "size": 8812, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/visualization/visualize.py", "max_stars_repo_name": "carolmoraescruz/case_seazone", "max_stars_repo_head_hexsha": "76b44a64272685681442929c04ea9e4fd21a147e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/visualization/visualize.py", "max_issues_repo_name": "carolmoraescruz/case_seazone", "max_issues_repo_head_hexsha": "76b44a64272685681442929c04ea9e4fd21a147e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/visualization/visualize.py", "max_forks_repo_name": "carolmoraescruz/case_seazone", "max_forks_repo_head_hexsha": "76b44a64272685681442929c04ea9e4fd21a147e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.67003367, "max_line_length": 89, "alphanum_fraction": 0.6390149796, "include": true, "reason": "from statsmodels", "num_tokens": 2098}
|
module MeshingBenchmarks
using FileIO
using NRRD
using Meshing
using MeshIO
using GeometryTypes
using BenchmarkTools
function benchmark()
here = dirname(@__FILE__)
println(pwd())
println("CTA-cardio.nrrd loading...")
ctacardio = @btime load($here*"/../data/CTA-cardio.nrrd")
q = 100
samples = 1
println("CTA-cardio.nrrd loaded")
println("CTA-cardio.nrrd MarchingCubes Float32 runtime")
mc = @btime HomogenousMesh{Point{3,Float32}, Face{3,Int}}($ctacardio, MarchingCubes(iso=Float32($q), insidepositive=true))
for i in 1:samples-1
@btime HomogenousMesh{Point{3,Float32}, Face{3,Int}}($ctacardio, MarchingCubes(iso=Float32($q), insidepositive=true))
end
println("CTA-cardio.nrrd MarchingCubes Float64 runtime")
@btime HomogenousMesh{Point{3,Float64}, Face{3,Int}}($ctacardio, MarchingCubes(iso=$q, insidepositive=true))
for i in 1:samples-1
@btime HomogenousMesh{Point{3,Float64}, Face{3,Int}}($ctacardio, MarchingCubes(iso=$q, insidepositive=true))
end
# println("CTA-cardio.nrrd MarchingTetrahedra Float32 runtime")
# mt = @btime HomogenousMesh{Point{3,Float32},Face{3,Int}}($ctasdf, MarchingTetrahedra($q))
# for i in 1:samples-1
# @btime HomogenousMesh{Point{3,Float32},Face{3,Int}}($ctasdf, MarchingTetrahedra($q))
# end
println("Saving files")
save("ctacardio_mc.ply", mc)
#save("ctacardio_mt.ply", mt)
end
function brain(samples=1, q=100)
here = dirname(@__FILE__)
brain = load(here*"/../Seg3DData/Brain_DataSet/MRI-brain50.nrrd")
brainsdf = SignedDistanceField(HyperRectangle(Vec(0,0,0), Vec(10,10,10)),brain.data)
println("Brain.nrrd MarchingCubes Float32 runtime")
mc_brain = @btime HomogenousMesh{Point{3,Float32},Face{3,Int}}($brainsdf, MarchingCubes(iso=$q, insidepositive=false))
for i in 1:samples-1
@btime HomogenousMesh{Point{3,Float32},Face{3,Int}}($brainsdf, MarchingCubes(iso=$q, insidepositive=false))
end
save("brian_mc.ply", mc_brain)
end
end # module
|
{"hexsha": "c4fc6b88f75f70c7ddca98c3b4c1b15bb4cc0c57", "size": 2043, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MeshingBenchmarks.jl", "max_stars_repo_name": "sjkelly/MeshingBenchmarks.jl", "max_stars_repo_head_hexsha": "3184b00206e0b8be8626afa63799f7991a21e032", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/MeshingBenchmarks.jl", "max_issues_repo_name": "sjkelly/MeshingBenchmarks.jl", "max_issues_repo_head_hexsha": "3184b00206e0b8be8626afa63799f7991a21e032", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MeshingBenchmarks.jl", "max_forks_repo_name": "sjkelly/MeshingBenchmarks.jl", "max_forks_repo_head_hexsha": "3184b00206e0b8be8626afa63799f7991a21e032", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4307692308, "max_line_length": 126, "alphanum_fraction": 0.6999510524, "num_tokens": 689}
|
[STATEMENT]
lemma \<rho>'_ide_simp:
assumes "ide a"
shows "\<rho>'.map a = \<r>\<^sup>-\<^sup>1[a]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<rho>' a = \<r>\<^sup>-\<^sup>1[a]
[PROOF STEP]
using assms \<rho>'.inverts_components \<rho>_ide_simp inverse_unique
[PROOF STATE]
proof (prove)
using this:
ide a
ide ?a \<Longrightarrow> inverse_arrows (\<rho> ?a) (\<rho>' ?a)
ide ?a \<Longrightarrow> \<rho> ?a = \<r>[?a]
inverse_arrows ?f ?g \<Longrightarrow> local.inv ?f = ?g
goal (1 subgoal):
1. \<rho>' a = \<r>\<^sup>-\<^sup>1[a]
[PROOF STEP]
by auto
|
{"llama_tokens": 244, "file": "MonoidalCategory_MonoidalCategory", "length": 2}
|
"""Class for reading, parsing, and downloading data from the Harmonizome API.
"""
import gzip
import json
import os
import logging
# Support for both Python2.X and 3.X.
# -----------------------------------------------------------------------------
try:
import io
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.parse import quote_plus
except ImportError:
from StringIO import StringIO
from urllib2 import urlopen, HTTPError
from urllib import quote_plus
try:
input_shim = raw_input
except NameError:
# If `raw_input` throws a `NameError`, the user is using Python 2.X.
input_shim = input
import pandas as pd
import numpy as np
from scipy.sparse import lil_matrix, isspmatrix
from itertools import takewhile, repeat
def getfshape(fn, row_sep='\n', col_sep='\t', open_args={}):
''' Fast and efficient way of finding row/col height of file '''
with open(fn, 'r', newline=row_sep, **open_args) as f:
col_size = f.readline().count(col_sep) + 1
row_size = sum(1 for line in f) + 1
return (row_size, col_size)
def parse(fn, column_size=3, index_size=3, shape=None,
index_fmt=np.ndarray, data_fmt=np.ndarray,
index_dtype=np.object, data_dtype=np.float64,
col_sep='\t', row_sep='\n',
open_args={}):
'''
Smart(er) parser for processing matrix formats. Evaluate size and construct
ndframes with the right size before parsing, this allows for more efficient
loading of sparse dataframes as well. To obtain a sparse representation use:
data_fmt=scipy.lil_matrix
This only works if all of the data is of the same type, if it isn't a float
use:
data_dtype=np.float64
Returns:
(column_names, columns, index_names, index, data)
'''
if shape is not None:
rows, cols = shape
else:
rows, cols = getfshape(fn, row_sep=row_sep, col_sep=col_sep, open_args=open_args)
columns = index_fmt((column_size, cols - index_size), dtype=index_dtype)
index = index_fmt((rows - column_size, index_size), dtype=index_dtype)
data = data_fmt((rows - column_size, cols - index_size), dtype=data_dtype)
with open(fn, 'r', newline=row_sep, **open_args) as fh:
header = np.array([next(fh).strip().split(col_sep)
for _ in repeat(None, column_size)])
column_names = header[:column_size, index_size - 1]
index_names = header[column_size - 1, :index_size]
columns[:, :] = header[:column_size, index_size:]
for ind, line in enumerate(fh):
lh = line.strip().split(col_sep)
index[ind, :] = lh[:index_size]
data[ind, :] = lh[index_size:]
return (column_names, columns, index_names, index, data)
def parse_df(fn, sparse=False, default_fill_value=None,
column_apply=None, index_apply=None, df_args={},
**kwargs):
data_fmt = lil_matrix if sparse else np.ndarray
df_type = pd.SparseDataFrame if sparse else pd.DataFrame
(
column_names, columns,
index_names, index,
data,
) = parse(fn, data_fmt=data_fmt, **kwargs)
if column_apply is not None:
column_names, columns = column_apply(column_names.T, columns.T)
else:
column_names, columns = (column_names.T, columns.T)
if index_apply is not None:
index_names, index = index_apply(index_names, index)
return df_type(
data=data.tocsr() if sparse else data,
index=pd.Index(
data=index,
name=str(index_names),
dtype=np.object,
),
columns=pd.Index(
data=columns,
name=str(column_names),
dtype=np.object,
),
**df_args,
)
def save_df(df, fn):
df.reset_index().to_feather(fn)
def read_df(fn, sparse=False, **kwargs):
df = pd.read_feather(fn)
df = df.set_index(df.columns[0])
return df.to_sparse(**kwargs) if sparse else df
def df_column_uniquify(df):
df_columns = df.columns
new_columns = []
for item in df_columns:
counter = 0
newitem = item
while newitem in new_columns:
counter += 1
newitem = "{}_{}".format(item, counter)
new_columns.append(newitem)
df.columns = new_columns
return df
# Enumerables and constants
# -----------------------------------------------------------------------------
class Enum(set):
"""Simple Enum shim since Python 2.X does not have them.
"""
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
# The entity types supported by the Harmonizome API.
class Entity(Enum):
DATASET = 'dataset'
GENE = 'gene'
GENE_SET = 'gene_set'
ATTRIBUTE = 'attribute'
GENE_FAMILY = 'gene_family'
NAMING_AUTHORITY = 'naming_authority'
PROTEIN = 'protein'
RESOURCE = 'resource'
def json_from_url(url):
"""Returns API response after decoding and loading JSON.
"""
response = urlopen(url)
data = response.read().decode('utf-8')
return json.loads(data)
VERSION = '1.0'
API_URL = 'http://amp.pharm.mssm.edu/Harmonizome/api'
DOWNLOAD_URL = 'http://amp.pharm.mssm.edu/static/hdfs/harmonizome/data'
# This config objects pulls the names of the datasets, their directories, and
# the possible downloads from the API. This allows us to add new datasets and
# downloads without breaking this file.
config = json_from_url('http://amp.pharm.mssm.edu/Harmonizome/api/dark/script_config')
DOWNLOADS = [x for x in config.get('downloads')]
DATASET_TO_PATH = config.get('datasets')
# Harmonizome class
# -----------------------------------------------------------------------------
class Harmonizome(object):
__version__ = VERSION
DATASETS = DATASET_TO_PATH.keys()
@classmethod
def get(cls, entity, name=None, start_at=None):
"""Returns a single entity or a list, depending on if a name is
provided. If no name is provided and start_at is specified, returns a
list starting at that cursor position.
"""
if name:
name = quote_plus(name)
return _get_by_name(entity, name)
if start_at is not None and type(start_at) is int:
return _get_with_cursor(entity, start_at)
url = '%s/%s/%s' % (API_URL, VERSION, entity)
result = json_from_url(url)
return result
@classmethod
def next(cls, response):
"""Returns the next set of entities based on a previous API response.
"""
start_at = _get_next(response)
entity = _get_entity(response)
return cls.get(entity=entity, start_at=start_at)
@classmethod
def download(cls, datasets=None, what=None):
"""For each dataset, creates a directory and downloads files into it.
"""
# Why not check `if not datasets`? Because in principle, a user could
# call `download([])`, which should download nothing, not everything.
# Why might they do this? Imagine that the list of datasets is
# dynamically generated in another user script.
if datasets is None:
datasets = cls.DATASETS
warning = 'Warning: You are going to download all Harmonizome '\
'data. This is roughly 30GB. Do you accept?\n(Y/N) '
resp = input_shim(warning)
if resp.lower() != 'y':
return
for dataset in datasets:
if dataset not in cls.DATASETS:
msg = '"%s" is not a valid dataset name. Check the `DATASETS`'\
' property for a complete list of names.' % dataset
raise AttributeError(msg)
if not os.path.exists(dataset):
os.mkdir(dataset)
if what is None:
what = DOWNLOADS
for dl in what:
path = DATASET_TO_PATH[dataset]
url = '%s/%s/%s' % (DOWNLOAD_URL, path, dl)
try:
response = urlopen(url)
except HTTPError as e:
# Not every dataset has all downloads.
if what is not None:
raise Exception('Error downloading from %s: %s' % (url, e))
filename = '%s/%s' % (dataset, dl)
filename = filename.replace('.gz', '')
if response.code != 200:
raise Exception('This should not happen')
if os.path.isfile(filename):
logging.info('Using cached `%s`' % (filename))
else:
_download_and_decompress_file(response, filename)
yield filename
@classmethod
def download_df(cls, datasets=None, what=None, sparse=False, **kwargs):
for file in cls.download(datasets, what):
if sparse:
yield _read_as_sparse_dataframe(file, **kwargs)
else:
yield _read_as_dataframe(file, **kwargs)
# Utility functions
# -------------------------------------------------------------------------
def _get_with_cursor(entity, start_at):
"""Returns a list of entities based on cursor position.
"""
url = '%s/%s/%s?cursor=%s' % (API_URL, VERSION, entity,str(start_at))
result = json_from_url(url)
return result
def _get_by_name(entity, name):
"""Returns a single entity based on name.
"""
url = '%s/%s/%s/%s' % (API_URL, VERSION, entity, name)
return json_from_url(url)
def _get_entity(response):
"""Returns the entity from an API response.
"""
path = response['next'].split('?')[0]
return path.split('/')[3]
def _get_next(response):
"""Returns the next property from an API response.
"""
if response['next']:
return int(response['next'].split('=')[1])
return None
# This function was adopted from here: http://stackoverflow.com/a/15353312.
# def _download_and_decompress_file(response, filename):
# """Downloads and decompresses a single file from a response object.
# """
# compressed_file = StringIO()
# compressed_file.write(response.read())
# compressed_file.seek(0)
# decompressed_file = gzip.GzipFile(fileobj=compressed_file, mode='rb')
# with open(filename, 'w+') as outfile:
# outfile.write(decompressed_file.read())
def _download_and_decompress_file(response, filename):
"""
"""
compressed_file = io.BytesIO(response.read())
decompressed_file = gzip.GzipFile(fileobj=compressed_file)
with open(filename, 'wb+') as outfile:
outfile.write(decompressed_file.read())
def json_ind_no_slash(ind_names, ind):
return (
json.dumps([ind_name.replace('/', '|')
for ind_name in ind_names]),
[json.dumps([ii.replace('/', '|')
for ii in i])
for i in ind],
)
def _read_as_dataframe(fn):
''' Standard loading of dataframe '''
# return fn
import pandas as pd
if fn.endswith('gene_attribute_matrix.txt'):
return df_column_uniquify(parse_df(
fn,
sparse=False,
index_apply=json_ind_no_slash,
column_apply=json_ind_no_slash,
open_args=dict(encoding="latin-1"),
))
elif fn.endswith('gene_list_terms.txt') or fn.endswith('attribute_list_entries.txt'):
return pd.read_table(fn, encoding="latin-1", index_col=None)
else:
raise Exception('Unable to parse this file into a dataframe.')
def _read_as_sparse_dataframe(fn, blocksize=10e6, fill_value=0):
''' Efficient loading sparse dataframe '''
# return fn
import pandas as pd
import numpy as np
if fn.endswith('gene_attribute_matrix.txt'):
return df_column_uniquify(parse_df(
fn,
sparse=True,
index_apply=json_ind_no_slash,
column_apply=json_ind_no_slash,
df_args=dict(default_fill_value=0),
open_args=dict(encoding="latin-1"),
))
else:
raise Exception('Unable to parse this file into a dataframe.')
|
{"hexsha": "230d68bf47bd6dd7857e8eeb2f114fa3030c4a44", "size": 11967, "ext": "py", "lang": "Python", "max_stars_repo_path": "appyters/harmonizome_ml/harmonizome.py", "max_stars_repo_name": "shui02/appyter-catalog", "max_stars_repo_head_hexsha": "dfa15946d151daeb7d7b1bc9af9e48428474f012", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "appyters/harmonizome_ml/harmonizome.py", "max_issues_repo_name": "shui02/appyter-catalog", "max_issues_repo_head_hexsha": "dfa15946d151daeb7d7b1bc9af9e48428474f012", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-04-15T22:47:17.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-28T16:34:16.000Z", "max_forks_repo_path": "appyters/harmonizome_ml/harmonizome.py", "max_forks_repo_name": "shui02/appyter-catalog", "max_forks_repo_head_hexsha": "dfa15946d151daeb7d7b1bc9af9e48428474f012", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-14T20:25:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-14T20:25:32.000Z", "avg_line_length": 32.5190217391, "max_line_length": 89, "alphanum_fraction": 0.6182000501, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2750}
|
import numpy as np
from alphaomega.cv.channel.channel_split import channel_splitter_apply
from alphaomega.cv.channel.channel_merge import channel_merger_apply
from alphaomega.utils.exceptions import WrongAttribute, WrongDimension
class BorderIntropolation:
"""
Usage: Use this class to intropolate the borders of a single channel image.
"""
def __init__(self):
self.__top = 1
self.__bottom = 1
self.__left = 1
self.__right = 1
self.__border_type = "constant"
def config(self, **kwargs) -> None:
"""
Usage: Use this method to configure the parameteres of the BorderIntropolation instantiation.
Inputs:
top : The number of pixels added to the top of the image.
bottom : The number of pixels added to the bottom of the image.
left : The number of pixels added to the left of the image.
rights : The number of pixels added to the right of the image.
pixels_add : Instead of specifying the number of pixels added to the top, bottom, left, and right of the image, you can give the same number of pixels to all of them using this parameter.
border_type: The type of intropolatoin. It could be one of this options:
"constant": default option.
"reflect"
"replicate"
"wrap"
"reflect_without_border"
Returns: Nothing.
"""
for key, value in kwargs.items():
if key == "top":
if (int(value) >= 0):
self.__top = value
else:
raise WrongAttribute("The value of top should be an integer greater than -1.")
elif key == "bottom":
if (int(value) >= 0):
self.__bottom = value
else:
raise WrongAttribute("The value of bottom should be an integer greater than -1.")
elif key == "left":
if (int(value) >= 0):
self.__left = value
else:
raise WrongAttribute("The value of left should be an integer greater than -1.")
elif key == "right":
if (int(value) >= 0):
self.__right = value
else:
raise WrongAttribute("The value of right should be an integer greater than -1.")
elif key == "pixels_add":
if (int(value) > 0):
self.__left = value
self.__right = value
self.__top = value
self.__bottom = value
else:
raise WrongAttribute("The value of pixels_add should be an integer greater than -1.")
elif key == "border_type":
if (value not in ["constant", "reflect", "replicate", "wrap", "reflect_without_border"]):
raise WrongAttribute('The only options for border are "constant", "reflect", "replicate", "wrap", and "reflect_without_border".')
else:
self.__border_type = value
def apply(self, image: np.ndarray) -> np.ndarray:
"""
Usage: Use this method to apply BorderIntropolation to an image.
Inputs:
image: The intropolation will be applied on this image.
Returns:
- The new image with intropolated borders.
"""
#checking if the image has three dimensions.
if (len(image.shape) == 3):
channels_applied = []
channels = channel_splitter_apply(image)
for index, channel in enumerate(channels):
channels_applied.append(self.apply(channel))
image = channel_merger_apply(channels_applied)
return image
elif (len(image.shape) == 2):
if self.__border_type == "constant":
image = np.concatenate((np.zeros((image.shape[0], self.__left), dtype=np.int16), image), axis = 1)
image = np.concatenate((image, np.zeros((image.shape[0], self.__right), dtype=np.int16)), axis = 1)
image = np.concatenate((np.zeros((self.__top, image.shape[1]), dtype=np.int16), image), axis = 0)
image = np.concatenate((image, np.zeros((self.__bottom, image.shape[1]), dtype=np.int16)), axis = 0)
return image
if self.__border_type == "replicate":
image = np.concatenate((np.repeat(np.expand_dims(image[:, 0], 1), self.__left, 1), image), axis = 1)
image = np.concatenate((image, np.repeat(np.expand_dims(image[:, image.shape[1]-1], 1), self.__right, 1)), axis = 1)
image = np.concatenate((np.repeat(np.expand_dims(image[0, :], 0), self.__top, 0), image), axis = 0)
image = np.concatenate((image, np.repeat(np.expand_dims(image[image.shape[0]-1, :], 0), self.__bottom, 0)), axis=0)
return image
if self.__border_type == "reflect":
image = np.concatenate((np.flip(image[:, 0:self.__left], axis= 1), image), axis= 1)
image = np.concatenate((image, np.flip(image[:, image.shape[1] - self.__right:], axis = 1)), axis= 1)
image = np.concatenate((np.flip(image[:self.__top,:], axis=0), image), axis= 0)
image = np.concatenate((image, np.flip(image[image.shape[0] - self.__bottom:,:], axis=0)), axis= 0)
return image
if self.__border_type == "reflect_without_border":
image = np.concatenate((np.flip(image[:, 1:self.__left+1], axis= 1), image), axis= 1)
image = np.concatenate((image, np.flip(image[:, image.shape[1] - self.__right - 1:-1], axis = 1)), axis= 1)
image = np.concatenate((np.flip(image[1:self.__top + 1,:], axis=0), image), axis= 0)
image = np.concatenate((image, np.flip(image[image.shape[0] - self.__bottom - 1:-1,:], axis=0)), axis= 0)
return image
#only option left is warp
orig = image.copy()
image = np.concatenate((orig[:, image.shape[1] - self.__left:], image), axis= 1)
image = np.concatenate((image, orig[:, 0:self.__right]), axis= 1)
orig = image.copy()
image = np.concatenate((image[image.shape[0] - self.__top:,:], image), axis= 0)
image = np.concatenate((image, orig[:self.__bottom,:]), axis= 0)
return image
raise WrongDimension("image should be 2 dimensional or 3 dimensional.")
def border_intropolate_apply(image: np.ndarray, pixels_add: int, border_type: str = "constant") -> np.ndarray:
"""
Usage: Use this function to apply border intropolation to an image.
Inputs:
image: The intropolation will be applied on this image.
pixels_add: The pixels to add to the borders
border_type: The type of intropolatoin. It could be one of this options:
"constant": default option.
"reflect"
"replicate"
"wrap"
"reflect_without_border"
Returns:
- The new image with intropolated borders.
"""
#checking for pixels_add
if int(pixels_add) < 0:
raise WrongAttribute("pixels_add should be a non-negative integer.")
if (len(image.shape) == 3):
channels_applied = []
channels = channel_splitter_apply(image)
for index, channel in enumerate(channels):
channels_applied.append(border_intropolate_apply(channel, pixels_add, border_type))
image = channel_merger_apply(channels_applied)
return image
elif (len(image.shape) == 2):
if border_type == "constant":
image = np.concatenate((np.zeros((image.shape[0], pixels_add), dtype=np.int16), image), axis = 1)
image = np.concatenate((image, np.zeros((image.shape[0], pixels_add), dtype=np.int16)), axis = 1)
image = np.concatenate((np.zeros((pixels_add, image.shape[1]), dtype=np.int16), image), axis = 0)
image = np.concatenate((image, np.zeros((pixels_add, image.shape[1]), dtype=np.int16)), axis = 0)
return image
if border_type == "replicate":
image = np.concatenate((np.repeat(np.expand_dims(image[:, 0], 1), pixels_add, 1), image), axis = 1)
image = np.concatenate((image, np.repeat(np.expand_dims(image[:, image.shape[1]-1], 1), pixels_add, 1)), axis = 1)
image = np.concatenate((np.repeat(np.expand_dims(image[0, :], 0), pixels_add, 0), image), axis = 0)
image = np.concatenate((image, np.repeat(np.expand_dims(image[image.shape[0]-1, :], 0), pixels_add, 0)), axis=0)
return image
if border_type == "reflect":
image = np.concatenate((np.flip(image[:, 0:pixels_add], axis= 1), image), axis= 1)
image = np.concatenate((image, np.flip(image[:, image.shape[1] - pixels_add:], axis = 1)), axis= 1)
image = np.concatenate((np.flip(image[:pixels_add,:], axis=0), image), axis= 0)
image = np.concatenate((image, np.flip(image[image.shape[0] - pixels_add:,:], axis=0)), axis= 0)
return image
if border_type == "reflect_without_border":
image = np.concatenate((np.flip(image[:, 1:pixels_add+1], axis= 1), image), axis= 1)
image = np.concatenate((image, np.flip(image[:, image.shape[1] - pixels_add - 1:-1], axis = 1)), axis= 1)
image = np.concatenate((np.flip(image[1:pixels_add + 1,:], axis=0), image), axis= 0)
image = np.concatenate((image, np.flip(image[image.shape[0] - pixels_add - 1:-1,:], axis=0)), axis= 0)
return image
if border_type == "warp":
orig = image.copy()
image = np.concatenate((orig[:, image.shape[1] - pixels_add:], image), axis= 1)
image = np.concatenate((image, orig[:, 0:pixels_add]), axis= 1)
orig = image.copy()
image = np.concatenate((image[image.shape[0] - pixels_add:,:], image), axis= 0)
image = np.concatenate((image, orig[:pixels_add,:]), axis= 0)
return image
raise WrongAttribute("wrong argument for border type. It could be one of this options:\nconstant\nreplicate\nreflect\nreflect_without_border\nwarp")
raise WrongDimension("image should be 2 dimensional or 3 dimensional.")
|
{"hexsha": "bc9805d95f78fc3bef75e0c26e17d0dc4f416a86", "size": 10554, "ext": "py", "lang": "Python", "max_stars_repo_path": "alphaomega/cv/border/border_intropolation.py", "max_stars_repo_name": "heidariarash/Alpha-Omega", "max_stars_repo_head_hexsha": "123be3c90cfb0e382845a1243923613d5475b529", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "alphaomega/cv/border/border_intropolation.py", "max_issues_repo_name": "heidariarash/Alpha-Omega", "max_issues_repo_head_hexsha": "123be3c90cfb0e382845a1243923613d5475b529", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "alphaomega/cv/border/border_intropolation.py", "max_forks_repo_name": "heidariarash/Alpha-Omega", "max_forks_repo_head_hexsha": "123be3c90cfb0e382845a1243923613d5475b529", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.2571428571, "max_line_length": 199, "alphanum_fraction": 0.5739056282, "include": true, "reason": "import numpy", "num_tokens": 2475}
|
from ShazamAPI import Shazam
import subprocess
import os
import time
import urllib.request
from PIL import Image, ImageOps
from pathlib import Path
from selenium.common.exceptions import WebDriverException
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sys
import pylast
from io import BytesIO
from collections import Counter
import statistics
from colorthief import ColorThief
import numpy as np
import json
import requests
API_KEY = "___________"
API_SECRET = "____________"
USERNAME = "_________"
PASSWORD = "_________"
homepath = os.path.expanduser("~")
firstloop = 1
lasttrack = ""
lastartist = ""
timestart = time.time()
anythingplayed = 0
name = ""
lastscrobbled = ""
offset = 0
lastoffset = 0
correctscrobbles = 0
prevname = ""
album_url_old = ""
lapse_timer = 0
lastdisplayed =""
#Even if error
#run forever
while True:
# Even with error
try:
# Download sample
os.system('arecord -r 44100 -d 10 -t wav --device="hw:0,0" '+homepath+'/test-mic.wav')
# Get song
ftr = open(homepath+'/test-mic.wav', 'rb').read()
shazam = Shazam(ftr)
recognize_generator = shazam.recognizeSong()
resp_shazam = next(recognize_generator)
# Get values
resp2 = resp_shazam[1]
#print(resp2)
#print("Length:")
#print(len(resp2))
#print("end.")
selected_color = 1 # assume you will get the color
if len(resp2) > 5:
name = resp2["track"]["title"]
artist = resp2["track"]["subtitle"]
coverurl = resp2["track"]["images"]["coverarthq"]
offset = resp2["matches"][0]["offset"]
print("#################### ",artist,": ",name,"###################")
if (name == prevname) and (len(name) > 0):
correctscrobbles = correctscrobbles + 1
prevname = name
# Get image
urllib.request.urlretrieve(coverurl, homepath+"/albumimage.jpg")
# Compose html
htp1 = Path(homepath+"/songhtml1.html").read_text()
htp2 = Path(homepath+"/songhtml2.html").read_text()
ht_whole = htp1 + artist + "<br>" + name + htp2
Path(homepath+'/songhtml.html').write_text(ht_whole)
# Open browser and display file
if firstloop == 1:
option = Options()
option.add_argument("--start-maximized")
option.add_argument("--no-sandbox")
option.add_argument("--disable-web-security")
option.add_argument("--ignore-certificate-errors")
option.add_argument("--kiosk")
option.add_argument("--disable-password-manager-reauthentication")
option.add_argument("--disable-infobars")
option.add_argument("--disable-notifications")
option.add_experimental_option('excludeSwitches', ['load-extension', 'enable-automation'])
browser = webdriver.Chrome("/snap/bin/chromium.chromedriver",0,option)
# Close all previously open windows
handles = browser.window_handles
size = len(handles)
#for x in range(size):
# driver.switch_to.window(handles[x])
# print(driver.title)
if size > 0:
browser.close()
browser = webdriver.Chrome("/snap/bin/chromium.chromedriver",0,option)
browser.get('file://'+homepath+'/songhtml.html')
firstloop = 0
anythingplayed = 1
timestart = time.time()
lastoffset = offset
lastdisplayed = name
else:
if (lasttrack != name) and (lastdisplayed != name):
lastoffset = offset
#timestart = time.time()
browser.refresh()
lastdisplayed = name
#Set lamps
resp = requests.get(coverurl)
assert resp.ok
img = Image.open(BytesIO(resp.content))
img2 = ImageOps.posterize(img.convert('RGB'), bits=4)
img2.save(homepath+"/albumimage.jpg")
img4 = ColorThief(homepath+"/albumimage.jpg")
dominant_color = img4.get_color(quality=1)
vr,vb,vg = dominant_color
#print(type(dominant_color))
print("Dominant color: ",vr,vb,vg)
#print("Resp: ",resp,", TYPE: ",type(resp))
list_of_colors = [[255,255,255],[255,0,0],[0,255,0],[0,0,255],[255,255,0],[0,255,255],[255,143,191],[255,215,0],[255,0,255],[255,127,80]]
ha_list_of_colors = [[0,0,127],[0,0,255],[0,127,0],[0,127,127],[0,127,255],[0,255,0],[0,255,127],[0,255,255],[127,0,0],[127,0,127],[127,0,255],[127,127,0],[127,127,255],[127,255,0],[127,255,127],[127,255,255],[255,0,0],[255,0,127],[255,0,255],[255,127,0],[255,127,127],[255,127,255],[255,255,0],[255,255,127]]
color = [vr,vb,vg]
def closest(colors,color):
colors = np.array(colors)
color = np.array(color)
distances = np.sqrt(np.sum((colors-color)**2,axis=1))
index_of_smallest = np.where(distances==np.amin(distances))
smallest_distance = colors[index_of_smallest]
return smallest_distance
closest_color = closest(ha_list_of_colors,color) # for home assistant changed to ha_list_of_colors
print("Closest color: ",closest_color )
#print("Index1: ", closest_color[0][2])
main_color = "pink"
if(np.array_equal(closest_color,np.asarray([[255,255,255]]))): main_color="white"
if(np.array_equal(closest_color,np.asarray([[255,0,0]]))): main_color="red"
if(np.array_equal(closest_color,np.asarray([[0,255,0]]))): main_color="green"
if(np.array_equal(closest_color,np.asarray([[0,0,255]]))): main_color="blue"
if(np.array_equal(closest_color,np.asarray([[255,255,0]]))): main_color="yellow"
if(np.array_equal(closest_color,np.asarray([[0,255,255]]))): main_color="cyan"
if(np.array_equal(closest_color,np.asarray([[255,143,191]]))): main_color="pink"
if(np.array_equal(closest_color,np.asarray([[255,215,0]]))): main_color="gold"
if(np.array_equal(closest_color,np.asarray([[255,0,255]]))): main_color="purple"
if(np.array_equal(closest_color,np.asarray([[255,127,80]]))): main_color="orange"
ha_url = "http://_____IP ADDRESS OF YOUR HOME ASSISTANT SERVER_____:8123/api/services/light/turn_on"
ha_headers = {"Authorization": "Bearer ________HA_AUTHORIZATION_KEY______________-fR-o", "content-type": "application/json",}
if (selected_color == 1):
ha_data = {"entity_id": "light.____ENTITY_1____", "rgb_color": [int(closest_color[0][0]),int(closest_color[0][1]),int(closest_color[0][2])]} # "brightness": 150}
ha_data2 = {"entity_id": "light.____ENTITY_2____", "rgb_color": [int(closest_color[0][0]),int(closest_color[0][1]),int(closest_color[0][2])]} # "brightness": 255}
else:
ha_data = {"entity_id": "light.____ENTITY_1____", "rgb_color": [50,50,10],} # "brightness": 50}
ha_data2 = {"entity_id": "light.____ENTITY_2____", "rgb_color": [50,50,10],} # "brightness": 50}
#print("Calling "+bulb_url)
print("Calling "+ha_url)
print("headers: ",ha_headers)
print("data: ",ha_data)
ha_resp = requests.post(ha_url, headers=ha_headers, json=ha_data)
ha_resp2 = requests.post(ha_url, headers=ha_headers, json=ha_data2)
print(ha_resp)
#Scrobble now playing
#network = pylast.LastFMNetwork(api_key = API_KEY, api_secret = API_SECRET, username = USERNAME, password_hash = pylast.md5(PASSWORD))
#network.update_now_playing(artist = artist, title = name)
#Scrobble now playing
network = pylast.LastFMNetwork(api_key = API_KEY, api_secret = API_SECRET, username = USERNAME, password_hash = pylast.md5(PASSWORD))
network.update_now_playing(artist = artist, title = name)
else:
print("Nothing found...")
name = ""
artist = ""
#Scrobble last played
if lasttrack != name:
timenow = time.time()
if (correctscrobbles > 2) and (anythingplayed == 1) and (lastscrobbled != lasttrack) and (len(lasttrack) > 0) and ((timenow - timestart) > 180):
print("Scrobbling last track: ",lasttrack)
trackstart = timenow - lastoffset
network.scrobble(artist = lastartist, title = lasttrack, timestamp = trackstart)
lastscrobbled = lasttrack
correctscrobbles = 0
timestart = time.time()
lasttrack = name
lastartist = artist
except:
print("Some error, trying again...")
|
{"hexsha": "60a5e2d62a66c3318d6b3f4bec8e06b083322a27", "size": 7888, "ext": "py", "lang": "Python", "max_stars_repo_path": "listen-show-scrobble-setlamps.py", "max_stars_repo_name": "jbrepogmailcom/listen-show-scrobble", "max_stars_repo_head_hexsha": "a8a4313570f19a5fd5ba195f13da0c72826c6800", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "listen-show-scrobble-setlamps.py", "max_issues_repo_name": "jbrepogmailcom/listen-show-scrobble", "max_issues_repo_head_hexsha": "a8a4313570f19a5fd5ba195f13da0c72826c6800", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "listen-show-scrobble-setlamps.py", "max_forks_repo_name": "jbrepogmailcom/listen-show-scrobble", "max_forks_repo_head_hexsha": "a8a4313570f19a5fd5ba195f13da0c72826c6800", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3502304147, "max_line_length": 312, "alphanum_fraction": 0.6779918864, "include": true, "reason": "import numpy", "num_tokens": 2350}
|
import tensorflow as tf
import tensorflow_compression as tfc
from focal_loss import focal_loss
import os
import numpy as np
from collections import namedtuple
def pc_to_tf(points, dense_tensor_shape):
x = points
x = tf.pad(x, [[0, 0], [1, 0]])
st = tf.sparse.SparseTensor(x, tf.ones_like(x[:,0]), dense_tensor_shape)
return st
def process_x(x, dense_tensor_shape):
x = tf.sparse.to_dense(x, default_value=0, validate_indices=False)
x.set_shape(dense_tensor_shape)
x = tf.cast(x, tf.float32)
return x
def quantize_tensor(x):
x = tf.clip_by_value(x, 0, 1)
x = tf.round(x)
x = tf.cast(x, tf.uint8)
return x
def input_fn(features, batch_size, dense_tensor_shape, preprocess_threads, repeat=True, prefetch_size=1):
# Create input data pipeline.
with tf.device('/cpu:0'):
zero = tf.constant(0)
dataset = tf.data.Dataset.from_generator(lambda: iter(features), tf.int64, tf.TensorShape([None, 3]))
if repeat:
dataset = dataset.shuffle(buffer_size=len(features))
dataset = dataset.repeat()
dataset = dataset.map(lambda x: pc_to_tf(x, dense_tensor_shape), num_parallel_calls=preprocess_threads)
dataset = dataset.map(lambda x: (process_x(x, dense_tensor_shape), zero), num_parallel_calls=preprocess_threads)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(prefetch_size)
return dataset.make_one_shot_iterator().get_next()
def analysis_transform(tensor, num_filters, data_format):
with tf.variable_scope("analysis"):
with tf.variable_scope("layer_0"):
layer = tf.layers.Conv3D(
num_filters, (9, 9, 9), strides=(2, 2, 2), padding="same",
use_bias=True, activation=tf.nn.relu, data_format=data_format)
tensor = layer(tensor)
with tf.variable_scope("layer_1"):
layer = tf.layers.Conv3D(
num_filters, (5, 5, 5), strides=(2, 2, 2), padding="same",
use_bias=True, activation=tf.nn.relu, data_format=data_format)
tensor = layer(tensor)
with tf.variable_scope("layer_2"):
layer = tf.layers.Conv3D(
num_filters, (5, 5, 5), strides=(2, 2, 2), padding="same",
use_bias=False, activation=None, data_format=data_format)
tensor = layer(tensor)
return tensor
def synthesis_transform(tensor, num_filters, data_format):
with tf.variable_scope("synthesis"):
with tf.variable_scope("layer_0"):
layer = tf.layers.Conv3DTranspose(
num_filters, (5, 5, 5), strides=(2, 2, 2), padding="same",
use_bias=True, activation=tf.nn.relu, data_format=data_format)
tensor = layer(tensor)
with tf.variable_scope("layer_1"):
layer = tf.layers.Conv3DTranspose(
num_filters, (5, 5, 5), strides=(2, 2, 2), padding="same",
use_bias=True, activation=tf.nn.relu, data_format=data_format)
tensor = layer(tensor)
with tf.variable_scope("layer_2"):
layer = tf.layers.Conv3DTranspose(
1, (9, 9, 9), strides=(2, 2, 2), padding="same",
use_bias=True, activation=tf.nn.relu, data_format=data_format)
tensor = layer(tensor)
return tensor
def model_fn(features, labels, mode, params):
if params.get('decompress') is None:
params['decompress'] = False
params = namedtuple('Struct', params.keys())(*params.values())
# Unused
del labels
training = (mode == tf.estimator.ModeKeys.TRAIN)
if params.decompress:
assert mode == tf.estimator.ModeKeys.PREDICT, 'Decompression must use prediction mode'
y_shape = params.y_shape
y_shape = [params.num_filters] + [int(s) for s in y_shape]
x_shape = tf.constant(params.x_shape, dtype=tf.int64)
entropy_bottleneck = tfc.EntropyBottleneck(data_format=params.data_format, dtype=tf.float32)
y_hat = entropy_bottleneck.decompress(features, y_shape, channels=params.num_filters)
x_hat = synthesis_transform(y_hat, params.num_filters, params.data_format)
# Crop away any extraneous padding on the bottom
# or right boundaries.
x_hat = x_hat[:, :, :x_shape[0], :x_shape[1], :x_shape[2]]
x_hat_quant = quantize_tensor(x_hat)
predictions = {
'x_hat': x_hat,
'x_hat_quant': x_hat_quant
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Get training patch from dataset.
x = features
num_voxels = tf.cast(tf.size(x), tf.float32)
num_occupied_voxels = tf.reduce_sum(x)
# Build autoencoder.
y = analysis_transform(x, params.num_filters, params.data_format)
entropy_bottleneck = tfc.EntropyBottleneck(data_format=params.data_format)
y_tilde, likelihoods = entropy_bottleneck(y, training=training)
x_tilde = synthesis_transform(y_tilde, params.num_filters, params.data_format)
# Quantize
x_quant = quantize_tensor(x)
x_tilde_quant = quantize_tensor(x_tilde)
# Total number of bits divided by number of pixels.
log_likelihoods = tf.log(likelihoods)
train_mbpov = tf.reduce_sum(log_likelihoods) / (-np.log(2) * num_occupied_voxels)
if mode == tf.estimator.ModeKeys.PREDICT:
string = entropy_bottleneck.compress(y)
# Remove batch and channels dimensions
# Repeat batch_size times
x_shape = tf.shape(x)
y_shape = tf.shape(y)
batch_size = x_shape[0]
def repeat(t, n):
return tf.reshape(tf.tile(t, [n]), tf.concat([[n], tf.shape(t)], 0))
x_shape_rep = repeat(x_shape[2:], batch_size)
y_shape_rep = repeat(y_shape[2:], batch_size)
predictions = {
'x_tilde': x_tilde,
'y_tilde': y_tilde,
'x_tilde_quant': x_tilde_quant,
'string': string,
'x_shape': x_shape_rep,
'y_shape': y_shape_rep
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
train_fl = focal_loss(x, x_tilde, gamma=params.gamma, alpha=params.alpha)
# The rate-distortion cost.
train_loss = params.lmbda * train_fl + train_mbpov
# Main metrics
tf.summary.scalar("loss", train_loss)
tf.summary.scalar("mbpov", train_mbpov)
tf.summary.scalar("focal_loss", train_fl)
tf.summary.scalar("num_occupied_voxels", num_occupied_voxels)
tf.summary.scalar("num_voxels", num_voxels)
# Additional metrics
if params.additional_metrics:
train_mae = tf.reduce_mean(tf.abs(x - x_tilde))
train_mse = tf.reduce_mean(tf.squared_difference(x, x_tilde))
train_bpv = tf.reduce_sum(log_likelihoods) / (-np.log(2) * num_voxels)
tp = tf.count_nonzero(x_tilde_quant * x_quant, dtype=tf.float32) / num_voxels
tn = tf.count_nonzero((x_tilde_quant - 1) * (x_quant - 1), dtype=tf.float32) / num_voxels
fp = tf.count_nonzero(x_tilde_quant * (x_quant - 1), dtype=tf.float32) / num_voxels
fn = tf.count_nonzero((x_tilde_quant - 1) * x_quant, dtype=tf.float32) / num_voxels
precision = tp / (tp + fp)
recall = tp / (tp + fn)
accuracy = (tp + tn) / (tp + tn + fp + fn)
specificity = tn / (tn + fp)
f1_score = (2 * precision * recall) / (precision + recall)
tf.summary.scalar("mae", train_mae)
tf.summary.scalar("mse", train_mse)
tf.summary.scalar("bpv", train_bpv)
tf.summary.scalar("precision_metric", precision)
tf.summary.scalar("recall_metric", recall)
tf.summary.scalar("accuracy_metric", accuracy)
tf.summary.scalar("specificity_metric", specificity)
tf.summary.scalar("f1_score_metric", f1_score)
tf.summary.histogram("y", y)
tf.summary.histogram("y_tilde", y_tilde)
tf.summary.histogram("x", x)
tf.summary.histogram("x_tilde", x_tilde)
tf.summary.histogram("x_tilde_quant", x_tilde_quant)
tf.summary.histogram("likelihoods", likelihoods)
tf.summary.histogram("log_likelihoods", log_likelihoods)
# Creates summary for the probability mass function (PMF) estimated in the
# bottleneck.
entropy_bottleneck.visualize()
if mode == tf.estimator.ModeKeys.EVAL:
summary_hook = tf.train.SummarySaverHook(
save_steps=5,
output_dir=os.path.join(params.checkpoint_dir, 'eval'),
summary_op=tf.summary.merge_all())
return tf.estimator.EstimatorSpec(mode, loss=train_loss, evaluation_hooks=[summary_hook])
# Minimize loss and auxiliary loss, and execute update op.
assert mode == tf.estimator.ModeKeys.TRAIN
main_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
main_step = main_optimizer.minimize(train_loss, global_step=tf.train.get_global_step())
aux_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
aux_step = aux_optimizer.minimize(entropy_bottleneck.losses[0])
train_op = tf.group(main_step, aux_step, entropy_bottleneck.updates[0])
return tf.estimator.EstimatorSpec(mode, loss=train_loss, train_op=train_op)
|
{"hexsha": "e036813680e0eba75b5313f6484865fa99112496", "size": 9268, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/compression_model.py", "max_stars_repo_name": "mauriceqch/pcc_geo_cnn", "max_stars_repo_head_hexsha": "22bbf081ffe7b77c9308f54c15490da60e78803c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2019-06-17T21:13:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T07:52:11.000Z", "max_issues_repo_path": "src/compression_model.py", "max_issues_repo_name": "mauriceqch/pcc_geo_cnn", "max_issues_repo_head_hexsha": "22bbf081ffe7b77c9308f54c15490da60e78803c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-07-05T09:51:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-06T14:00:03.000Z", "max_forks_repo_path": "src/compression_model.py", "max_forks_repo_name": "mauriceqch/pcc_geo_cnn", "max_forks_repo_head_hexsha": "22bbf081ffe7b77c9308f54c15490da60e78803c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2019-04-10T01:09:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T01:24:57.000Z", "avg_line_length": 41.1911111111, "max_line_length": 120, "alphanum_fraction": 0.6525679758, "include": true, "reason": "import numpy", "num_tokens": 2287}
|
#include <boost/graph/graph_traits.hpp>
|
{"hexsha": "3a4eedb92bee9f756d0b598ff7a5ae070438a37c", "size": 40, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_graph_graph_traits.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_graph_graph_traits.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_graph_graph_traits.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 20.0, "max_line_length": 39, "alphanum_fraction": 0.8, "num_tokens": 8}
|
import network3
import numpy as np
import scipy
import matplotlib.pyplot as plt
import PIL
training_data, _, _ = network3.load_data_shared()
training_x, training_y = training_data
x = training_x.get_value()[0] # vector corresponding to a 5
x_img = np.reshape(x, (-1, 28)) # recognizable 5
y = training_y.eval()[0] # label: 5
#### Translate
# Randomly translate the image by -1, 0 or 1 pixel right and down
x_tr = np.random.randint(-1, 2) # x > 0 means translated to the right
print("translation: {} pixel right".format(x_tr))
y_tr = np.random.randint(-1, 2) # y > 0 means translated down
print("translation: {} pixel down".format(y_tr))
if x_tr != 0:
x_img = np.roll(x_img, x_tr, 1) # 1: x axis
if x_tr > 0:
# The image is to be translated to the right
x_img[:, 0:x_tr] = np.zeros((28, x_tr))
else:
# The image is to be translated to the left
x_img[:, 28+x_tr:] = np.zeros((28, -x_tr))
if y_tr != 0:
x_img = np.roll(x_img, y_tr, 0) # 0: y axis
if y_tr > 0:
# The image is to be translated up
x_img[0:y_tr, :] = np.zeros((y_tr, 28))
else:
# The image is to be translated down
x_img[28+y_tr:, :] = np.zeros((-y_tr, 28))
#### Rotate
theta = np.random.uniform(low=-5, high=5) # degrees counter-clockwise
print("theta: {} degrees".format(theta))
x_img = scipy.ndimage.interpolation.rotate(x_img, theta,
reshape=False,
prefilter=False)
#### Skew
img = PIL.Image.fromarray(x_img)
# upper pixels will be translated to the right by this amount divided by 2:
delta = 2*np.random.randint(-1, 2) # -2, 0 or 2
print("skew: {} pixels".format(delta))
if delta != 0:
m = delta / 28
new_width = 28 + abs(delta)
img = img.transform((new_width, 28), PIL.Image.AFFINE,
(1, m, -abs(delta) if delta > 0 else 0, 0, 1, 0))
# Only keep the center of the new image, keeping the dimensions 28x28:
img = img.crop((abs(delta)/2, 0, 28 + abs(delta)/2, 28))
x_img = np.array(img)
plt.imshow(x_img, cmap="binary")
plt.show()
|
{"hexsha": "37ca91916338b4baf1a396480a5e34dd50c5c163", "size": 2121, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tutorials/Intro_To_NN/NNDL-solutions/code/chap6p4-9/display_transformed_image.py", "max_stars_repo_name": "lev1khachatryan/ASDS_CV", "max_stars_repo_head_hexsha": "c9f0c0412002e929bcb7cc2fc6e5392977a9fa76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-12-13T16:26:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-10T07:44:05.000Z", "max_issues_repo_path": "Tutorials/Intro_To_NN/NNDL-solutions/code/chap6p4-9/display_transformed_image.py", "max_issues_repo_name": "lev1khachatryan/ASDS_CV", "max_issues_repo_head_hexsha": "c9f0c0412002e929bcb7cc2fc6e5392977a9fa76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-01-07T16:48:21.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-18T18:43:37.000Z", "max_forks_repo_path": "Tutorials/Intro_To_NN/NNDL-solutions/code/chap6p4-9/display_transformed_image.py", "max_forks_repo_name": "lev1khachatryan/ASDS_CV", "max_forks_repo_head_hexsha": "c9f0c0412002e929bcb7cc2fc6e5392977a9fa76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2096774194, "max_line_length": 75, "alphanum_fraction": 0.6148043376, "include": true, "reason": "import numpy,import scipy", "num_tokens": 643}
|
\documentclass{my_cv}
\usepackage[skins]{tcolorbox}
\usepackage{hyperref}
\usepackage{parskip}
\usepackage{parskip}
\begin{document}
\begin{multicols}{2}[
\titletext{Ankit}%
{Devri}%
{Hno.377/5,Mohan Mikins Sociey,Vasundhara,GZB,UP,201012}%
{\href{mailto:first.last@mail.com}{vivekdevri@gmail.com}}%
{+92 7827737229}%
{\href{https://github.com/AnkitDevri/}{github.com/AnkitDevri}}%
{\href{https://www.linkedin.com/in/ankitdevri/}{linkedin.com/in/ankitdevri}}%
{}
]
\end{multicols}
\section{\faFileText}{SUMMARY}
An aspiring data scientist who deals with Deep learning and Machine Learning.
Intermediately skilled in problem solving. Sometimes tends to differ from traditional way of programming to the solve real life problems using modern AI solutions.
\begin{multicols}{2}
\section{\faPencil}{PROJECTS}
\projects{Cars Pricing Predictor Web App }%
{Oct 2021 - Jan 2022\newline Deployment at : {\href{https://cars-pricing-prediction.herokuapp.com/}{cars-pricing-prediction.herokuapp.com}}}
{A web app deployed on Heroku, using a WSGI web framework flask with a machine learning model trained on the dataset from CarDekho.com.\newline Check it out at : {\textbf{\href{https://github.com/AnkitDevri/carPricePrediction}{GitHub}}}
}
{Heroku, Python, Flask, Machine Learning, scikit-learn, numpy, kaggle}
\par
%
\projects{Denosing AutoEncoder }%
{March 2021 - April 2021 \newline Check it out at : {\href{https://github.com/AnkitDevri/Denosing-Autoencoder}{GitHub}}
}%
{A Deep Learning model based on CNN Neural Network and trained on MNIST digit and FASHION MNIST dataset a total of 70000 images. \textbf{Accuracy :} 98 percent. }%
{Python,Deep Learning,CNN,Tensorflow,Keras,scikit-learn}
\par
\projects{Hospital Web app }%
{March 2020 - May 2020 \newline Deployment at : {\href{https://hospmanag.herokuapp.com/}{hospmanag.herokuapp.com}}}%
{A very basic web app for a hospital management system, deployed on heroku using the Flask Web Framework. }%
{Python, HTML, CSS, Javascript,Flask,Heroku}
\section{\faList}{TECHNICAL SKILLS}
\textbf{Languages:} C++, Python, JavaScript, HTML, CSS, Git
\noindent\textbf{Tools:} Tensorflow, NumPy, Pandas, Flask, scikit-learn, Keras
\noindent\textbf{Skills:} Problem Solving, Competitive Programming , Data science, Machine Learning and Deep Learning
\columnbreak
\section{\faPaintBrush}{TECHNICAL CERTIFICATIONS}
\begin{itemize}[noitemsep]
\item \textbf{Ethical Hacking Workshop} by Upgrad Campus in Feb 2022. Check it: \textbf{\href{https://www.credential.net/90177f72-2c1d-4c6c-a2c1-5f0fab5d1319}{here}}
\item \textbf{Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning} in Jan 2022 provided by DeepLearning.ai. Check it: \textbf{\href{https://www.coursera.org/account/accomplishments/certificate/H8WRJYD4T2BQ}{here}} .
\par
\item Letter of Attendance\textbf{ Nvidia GTC 2021} for sessions at the conference in Dec 2021.
Check it: \textbf{\href{https://drive.google.com/file/d/16Tgme1Sr4KLjh2kWmrEXJBxSxgiaxqPo/view}{here}}
\par
\item \textbf{Summer Training} in \textbf{DSA} by Renaissance provided by ProgrammingPathshala.com in July 2021. Check: \textbf{\href{https://drive.google.com/file/d/1mEDQhzi45hriBOnNocF9JVuJe5caTsaX/view}{here}}
\par
\item \textbf{Algorithmic Toolbox} by University of California, San Diego in May 2020. check it: \textbf{\href{https://www.coursera.org/account/accomplishments/certificate/7TUG86DARH68}{here}}.
\par
\item \textbf{Ethical Hacking Workshop} at IIT Delhi in Rendezvous 2017. Check it: \textbf{\href{https://drive.google.com/file/d/1I0uNqQeMgE7rf2xn4YXI0yu-UilRbEui/view}{here}}
\end{itemize}
\section{\faGraduationCap}{EDUCATION}
\school{Lovely Professional University, Phagwara} %
{From 2019 to 2023} %
{B.Tech in Computer Science and Engineering}
\school{St. Teresa School, Indirapuram} %
{ 10+2 From \textbf{ 2017-2019 } \newline 10th From \textbf{ 2007-2017 }}
%
{Senior Secondary Education, \textbf{Board}: CBSE }
\section{\faStar}{EXTRACURRICULAR ACTIVITIES}
\begin{itemize}[noitemsep]
\item Certificate of Merit for being first in TechnOlympics 2017, in Future Tech Category.
check it: \textbf{\href{https://drive.google.com/file/d/1RLxo1emzGHALReSLDn7UjfB2_2hTYwOW/view}{here}}
\item Certification of Commitment at 73rd Republic Day by Ministry of Defence. Check it: \textbf{\href{https://drive.google.com/file/d/19l8o8HjbIlwEPfwosTMTENgxJ1geFbsB/view?usp=sharing}{here}}
\item Certification of Participation, by Ministry of Tourism and MyGov. Check it: \textbf{\href{https://drive.google.com/file/d/10gOOfH7SBqizZOaP-F6VMnyDdtaQRh1v/view?usp=sharing}{here}}
\end{itemize}
\section{\faSoccerBallO}{PEOPLE'S SKILLS}
Problem solving is daily part of life.
Proactive in learning about different fields.
Interested in Emerging technologies in both hardware and software industry.
\end{multicols}
\end{document}
|
{"hexsha": "3c4dca35304bb8488d785d8e798a27d19309d4c6", "size": 5063, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "resume.tex", "max_stars_repo_name": "AnkitDevri/Resume", "max_stars_repo_head_hexsha": "45e97c151a57a8dba6e41b45ffbd4e9f8c1c6acf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "resume.tex", "max_issues_repo_name": "AnkitDevri/Resume", "max_issues_repo_head_hexsha": "45e97c151a57a8dba6e41b45ffbd4e9f8c1c6acf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "resume.tex", "max_forks_repo_name": "AnkitDevri/Resume", "max_forks_repo_head_hexsha": "45e97c151a57a8dba6e41b45ffbd4e9f8c1c6acf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.1414141414, "max_line_length": 259, "alphanum_fraction": 0.7461979064, "num_tokens": 1566}
|
import jax.numpy as np
def adam_ops_init(flat_params):
adam_ops = {
"b1": 0.9,
"b2": 0.999,
"step_size": 0.001,
"eps": 1e-8,
"wd": 0.001,
}
adam_ops.update({"m": np.zeros(len(flat_params))})
adam_ops.update({"v": np.zeros(len(flat_params))})
return adam_ops
def adam_step(
op: dict, g: np.ndarray, i, flat_params: np.ndarray, weight_decay=False
):
"""
One step of the adamW optimizer.
:param op: optimizer parameters
:param g: derivative of loss function w.r.t. params. Should be same shape
as flat_params.
:param i: the iteration number
:param flat_params: Flattened parameter vector.
:param weight_decay: Whether to use weight decay or not. Requires a key 'wd' in
the optimizer parameters dictionary.
"""
op["m"] = (1 - op["b1"]) * g + op["b1"] * op[
"m"
] # First moment estimate.
op["v"] = (1 - op["b2"]) * (g ** 2) + op["b2"] * op[
"v"
] # Second moment estimate.
mhat = op["m"] / (1 - op["b1"] ** (i + 1)) # Bias correction.
vhat = op["v"] / (1 - op["b2"] ** (i + 1))
if weight_decay:
flat_params = (
flat_params
- op["step_size"] * mhat / (np.sqrt(vhat) + op["eps"])
- op["wd"] * flat_params
)
else:
flat_params = flat_params - op["step_size"] * mhat / (
np.sqrt(vhat) + op["eps"]
)
return flat_params, op
|
{"hexsha": "ad65727e45726a1d52b61f393d5596944f34dbb4", "size": 1466, "ext": "py", "lang": "Python", "max_stars_repo_path": "fundl/optimizers/step.py", "max_stars_repo_name": "ElArkk/fundl", "max_stars_repo_head_hexsha": "04b126d484f77e480196a24849683df93a0eabd8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2018-12-11T04:04:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-21T09:38:05.000Z", "max_issues_repo_path": "fundl/optimizers/step.py", "max_issues_repo_name": "ElArkk/fundl", "max_issues_repo_head_hexsha": "04b126d484f77e480196a24849683df93a0eabd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fundl/optimizers/step.py", "max_forks_repo_name": "ElArkk/fundl", "max_forks_repo_head_hexsha": "04b126d484f77e480196a24849683df93a0eabd8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-09-09T10:03:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-06T14:31:35.000Z", "avg_line_length": 29.32, "max_line_length": 83, "alphanum_fraction": 0.5375170532, "include": true, "reason": "import jax", "num_tokens": 449}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import pearsonr
# from mpl_toolkits.axes_grid1 import host_subplot
# import mpl_toolkits.axisartist as AA
# import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
Estacion = '6001'
df1 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/6001Historico.txt', parse_dates=[2])
Theoric_rad_method = 'GIS_Model' ##-->> PARA QUE USE EL MODELO DE Gis DEBE SER 'GIS_Model'
resolucion = 'diaria' ##-->> LAS OPCIONES SON 'diaria' U 'horaria'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## ---CALCULO DE LA RADIACIÓN TEORICA--- ##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada 10 min. Las fechas final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=10)
while start_date <= end_date:
yield start_date
start_date += delta
def serie_Kumar_Model_hora(estacion):
'Retorna un dataframe horario con la radiacion teórico con las recomendacione de Kumar elaborado por Gisel Guzmán ' \
'para el AMVA y su tesis. El dataframe original se le ordenan los datos a 12 meses ascendentes (2018), aunque pueden ' \
'pertencer a años difernetes. El resultado es para el punto seleccionado y con el archivo de Total_Timeseries.csv'
data_Model = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Radiacion_GIS/Teoricos_nati/Total_Timeseries.csv',
sep=',')
fecha_hora = [pd.to_datetime(data_Model['Unnamed: 0'], format="%Y-%m-%d %H:%M:%S")[i].to_pydatetime() for i in
range(len(data_Model['Unnamed: 0']))]
data_Model.index = fecha_hora
data_Model = data_Model.sort_index()
data_Model['Month'] = np.array(data_Model.index.month)
data_Model = data_Model.sort_values(by="Month")
fechas = []
for i in daterange('2018-01-01', '2019-01-01'):
fechas.append(i)
fechas = fechas[0:-1]
if estacion == '6001':
punto = data_Model['TS_kumar']
elif estacion == '6002':
punto = data_Model['CI_kumar']
elif estacion == '6003':
punto = data_Model['JV_kumar']
Rad_teorica = []
for i in range(len(fechas)):
mes = fechas[i].month
hora = fechas[i].hour
mint = fechas[i].minute
rad = \
np.where((data_Model.index.month == mes) & (data_Model.index.hour == hora) & (data_Model.index.minute == mint))[
0]
if len(rad) == 0:
Rad_teorica.append(np.nan)
else:
Rad_teorica.append(punto.iloc[rad].values[0])
data_Theorical = pd.DataFrame()
data_Theorical['fecha_hora'] = fechas
data_Theorical['Radiacion_Teo'] = Rad_teorica
data_Theorical.index = data_Theorical['fecha_hora']
df_hourly_theoric = data_Theorical.groupby(pd.Grouper(freq="H")).mean()
df_hourly_theoric = df_hourly_theoric[df_hourly_theoric['Radiacion_Teo'] > 0]
return df_hourly_theoric
def Elevation_RadiationTA(n, lat, lon, start):
'Para obtener la radiación en W/m2 y el ángulo de elevación del sol en grados horariamente para un número "n" de ' \
'días aun punto en una latitud y longitud determinada ( "lat-lon"como flotantes) a partir de una fecha de inicio ' \
'"start" como por ejemplo datetime.datetime(2018, 1, 1, 8).'
import pysolar
import pytz
import datetime
timezone = pytz.timezone("America/Bogota")
start_aware = timezone.localize(start)
# Calculate radiation every hour for 365 days
nhr = 24*n
dates, altitudes_deg, radiations = list(), list(), list()
for ihr in range(nhr):
date = start_aware + datetime.timedelta(hours=ihr)
altitude_deg = pysolar.solar.get_altitude(lat, lon, date)
if altitude_deg <= 0:
radiation = 0.
else:
radiation = pysolar.radiation.get_radiation_direct(date, altitude_deg)
dates.append(date)
altitudes_deg.append(altitude_deg)
radiations.append(radiation)
days = [ihr/24 for ihr in range(nhr)]
return days, altitudes_deg, radiations
if Theoric_rad_method != 'GIS_Model' and Estacion == '6001':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.259, -75.588, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6002':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.168, -75.644, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6003':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.255, -75.542, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method == 'GIS_Model':
Io_hora = serie_Kumar_Model_hora(Estacion)
print('Teorica con el modelo de KUMAR')
###############################################################################
##--------------EFICIENCIAS TEORICAS COMO PROXI DE TRANSPARENCIA-------------##
###############################################################################
'Calculo de la eficiencias teorica como proxi de la transparencia de la atmosfera'
'Para esto se hace uso de la información del piranometro y de la radiación teórica'
'de Gisel Guzman, con esto se prentenden obtener las caracteristicas que deriven'
'del análisis estocastico, similar al de Estefanía Muñoz en su tesis de doctorado.'
##------------------LECTURA DE LOS DATOS DEL EXPERIMENTO----------------------##
df_P975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
df_P350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel350.txt', sep=',', index_col =0)
df_P348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel348.txt', sep=',', index_col =0)
df_P975['Fecha_hora'] = df_P975.index
df_P350['Fecha_hora'] = df_P350.index
df_P348['Fecha_hora'] = df_P348.index
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P348.index = pd.to_datetime(df_P348.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
## ----------------ACOTANDO LOS DATOS A VALORES VÁLIDOS---------------- ##
'Como en este caso lo que interesa es la radiacion, para la filtración de los datos, se'
'considerarán los datos de potencia mayores o iguales a 0, los que parecen generarse una'
'hora despues de cuando empieza a incidir la radiación.'
df_P975 = df_P975[(df_P975['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P350 = df_P350[(df_P350['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P348 = df_P348[(df_P348['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P975_h = df_P975.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P350_h = df_P350.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P348_h = df_P348.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P975_h = df_P975_h.between_time('06:00', '17:00')
df_P350_h = df_P350_h.between_time('06:00', '17:00')
df_P348_h = df_P348_h.between_time('06:00', '17:00')
##----AJUSTE DE LOS DATOS DE RADIACIÓN TEORICA AL RANGO DE FECHAS DESEADO-----##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada hora. Las fechas'
'final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=60)
while start_date <= end_date:
yield start_date
start_date += delta
Io_hora_975 = serie_Kumar_Model_hora('6001')
Io_hora_350 = serie_Kumar_Model_hora('6002')
Io_hora_348 = serie_Kumar_Model_hora('6003')
fechas_975 = []
for i in daterange(df_P975.index[0].date().strftime("%Y-%m-%d"), (df_P975.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_975.append(i)
fechas_350 = []
for i in daterange(df_P350.index[0].date().strftime("%Y-%m-%d"), (df_P350.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_350.append(i)
fechas_348 = []
for i in daterange(df_P348.index[0].date().strftime("%Y-%m-%d"), (df_P348.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_348.append(i)
Io_hora_975 = Io_hora_975.loc[(Io_hora_975.index >= '2018-03-20') & (Io_hora_975.index <= '2018-'+str(df_P975.index[-1].month)+'-'+str(df_P975.index[-1].day+1))]
Io_hora_350 = Io_hora_350.loc[(Io_hora_350.index >= '2018-03-22') & (Io_hora_350.index <= '2018-'+str(df_P350.index[-1].month)+'-'+str(df_P350.index[-1].day+1))]
Io_hora_348 = Io_hora_348.loc[(Io_hora_348.index >= '2018-03-23') & (Io_hora_348.index <= '2018-'+str(df_P348.index[-1].month)+'-'+str(df_P348.index[-1].day+1))]
Io_hora_975 = Io_hora_975.between_time('06:00', '17:00')
Io_hora_975.index = [Io_hora_975.index[i].replace(year=2019) for i in range(len(Io_hora_975.index))]
Io_hora_350 = Io_hora_350.between_time('06:00', '17:00')
Io_hora_350.index = [Io_hora_350.index[i].replace(year=2019) for i in range(len(Io_hora_350.index))]
Io_hora_348 = Io_hora_348.between_time('06:00', '17:00')
Io_hora_348.index = [Io_hora_348.index[i].replace(year=2019) for i in range(len(Io_hora_348.index))]
df_Rad_P975 = pd.concat([Io_hora_975, df_P975_h], axis = 1)
df_Rad_P350 = pd.concat([Io_hora_350, df_P350_h], axis = 1)
df_Rad_P348 = pd.concat([Io_hora_348, df_P348_h], axis = 1)
df_Rad_P975 = df_Rad_P975.drop(['NI','strength'], axis=1)
df_Rad_P350 = df_Rad_P350.drop(['NI','strength'], axis=1)
df_Rad_P348 = df_Rad_P348.drop(['NI','strength'], axis=1)
##--------------------EFICIANCIA REAL PROXI DE TRANSPARENCIA-----------------##
df_Rad_P975['Efi_Transp'] = df_Rad_P975['radiacion'] / df_Rad_P975['Radiacion_Teo']
df_Rad_P350['Efi_Transp'] = df_Rad_P350['radiacion'] / df_Rad_P350['Radiacion_Teo']
df_Rad_P348['Efi_Transp'] = df_Rad_P348['radiacion'] / df_Rad_P348['Radiacion_Teo']
##-----------------HORAS EN LA QUE SE PRODUCE LA MAYOR EFICIENCIA Y SU HISTOGRAMA-------------##
'La frecuencia de las horas que excedieron el máximo de la eficiencia (1), se presenta en el hisograma'
'a continuación. El resultado muestra que las mayores frecuencias se presentan a als 6 y las 7 de la ma-'
'ñana, y esto es atribuible a falencias en el modelo de radiacion en condiciones de cierlo despejado'
'en esos puntos.'
Hour_Max_Efi_975 = df_Rad_P975[df_Rad_P975['Efi_Transp']>1].index.hour
Hour_Max_Efi_350 = df_Rad_P350[df_Rad_P350['Efi_Transp']>1].index.hour
Hour_Max_Efi_348 = df_Rad_P348[df_Rad_P348['Efi_Transp']>1].index.hour
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Hour_Max_Efi_348, bins='auto', alpha = 0.5)
ax1.set_title(u'Distribución horas de excedencia \n de la eficiencia en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Horas', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Hour_Max_Efi_350, bins='auto', alpha = 0.5)
ax2.set_title(u'Distribución horas de excedencia \n de la eficiencia en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Horas', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Hour_Max_Efi_975, bins='auto', alpha = 0.5)
ax3.set_title(u'Distribución horas de excedencia \n de la eficiencia en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Horas', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoHoraExceEfi.png')
plt.show()
##-------DISCRIMINACION ENTRE DIAS LLUVIOSOS Y SECOS POR PERCENTILES DE RADIACION--------##
'Para lidiar cno la situación en que pueden haber dias en los que los piranometros solo midieron'
'durante una fracción del día por posibles daños y alteraciones, se deben considerar los dias que'
'al menos tuvieron 6 horas de medicion.'
df_Rad_P975_count_h_pira = df_Rad_P975.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
df_Rad_P350_count_h_pira = df_Rad_P350.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
df_Rad_P348_count_h_pira = df_Rad_P348.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
days_P975_count_h_pira = df_Rad_P975_count_h_pira.index[df_Rad_P975_count_h_pira == True]
days_P350_count_h_pira = df_Rad_P350_count_h_pira.index[df_Rad_P350_count_h_pira == True]
days_P348_count_h_pira = df_Rad_P348_count_h_pira.index[df_Rad_P348_count_h_pira == True]
'Se establecieron umbrales empiricamente para la seleccion de los dias marcadamente nubados y'
'marcadamente despejados dentro el periodo de registro, de acuerdo a los procedimentos en el'
'programa Umbrales_Radiacion_Piranometro.py'
Sum_df_Rad_P975 = df_Rad_P975.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P350 = df_Rad_P350.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P348 = df_Rad_P348.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P975 = Sum_df_Rad_P975[Sum_df_Rad_P975['radiacion']>0]
Sum_df_Rad_P350 = Sum_df_Rad_P350[Sum_df_Rad_P350['radiacion']>0]
Sum_df_Rad_P348 = Sum_df_Rad_P348[Sum_df_Rad_P348['radiacion']>0]
lista_days_975 = []
for i in range(len(Sum_df_Rad_P975)):
if Sum_df_Rad_P975.index[i] in days_P975_count_h_pira:
lista_days_975.append(1)
else:
lista_days_975.append(0)
Sum_df_Rad_P975['days'] = lista_days_975
Sum_df_Rad_P975 = Sum_df_Rad_P975[Sum_df_Rad_P975['days'] == 1]
Sum_df_Rad_P975 = Sum_df_Rad_P975.drop(['days'], axis = 1)
lista_days_350 = []
for i in range(len(Sum_df_Rad_P350)):
if Sum_df_Rad_P350.index[i] in days_P350_count_h_pira:
lista_days_350.append(1)
else:
lista_days_350.append(0)
Sum_df_Rad_P350['days'] = lista_days_350
Sum_df_Rad_P350 = Sum_df_Rad_P350[Sum_df_Rad_P350['days'] == 1]
Sum_df_Rad_P350 = Sum_df_Rad_P350.drop(['days'], axis = 1)
lista_days_348 = []
for i in range(len(Sum_df_Rad_P348)):
if Sum_df_Rad_P348.index[i] in days_P348_count_h_pira:
lista_days_348.append(1)
else:
lista_days_348.append(0)
Sum_df_Rad_P348['days'] = lista_days_348
Sum_df_Rad_P348 = Sum_df_Rad_P348[Sum_df_Rad_P348['days'] == 1]
Sum_df_Rad_P348 = Sum_df_Rad_P348.drop(['days'], axis = 1)
Desp_Pira_975 = Sum_df_Rad_P975[Sum_df_Rad_P975.radiacion>=(Sum_df_Rad_P975.Radiacion_Teo)*0.85]
Desp_Pira_350 = Sum_df_Rad_P350[Sum_df_Rad_P350.radiacion>=(Sum_df_Rad_P350.Radiacion_Teo)*0.78]
Desp_Pira_348 = Sum_df_Rad_P348[Sum_df_Rad_P348.radiacion>=(Sum_df_Rad_P348.Radiacion_Teo)*0.80]
Nuba_Pira_975 = Sum_df_Rad_P975[Sum_df_Rad_P975.radiacion<=(Sum_df_Rad_P975.Radiacion_Teo)*0.25]
Nuba_Pira_350 = Sum_df_Rad_P350[Sum_df_Rad_P350.radiacion<=(Sum_df_Rad_P350.Radiacion_Teo)*0.25]
Nuba_Pira_348 = Sum_df_Rad_P348[Sum_df_Rad_P348.radiacion<=(Sum_df_Rad_P348.Radiacion_Teo)*0.22]
Appended_data_desp_975 = []
for i in range(len(Desp_Pira_975.index.values)):
Appended_data_desp_975.append(df_P975_h[df_P975_h.index.date == Desp_Pira_975.index.date[i]])
Appended_data_desp_975 = pd.concat(Appended_data_desp_975)
Appended_data_desp_350 = []
for i in range(len(Desp_Pira_350.index.values)):
Appended_data_desp_350.append(df_P350_h[df_P350_h.index.date == Desp_Pira_350.index.date[i]])
Appended_data_desp_350 = pd.concat(Appended_data_desp_350)
Appended_data_desp_348 = []
for i in range(len(Desp_Pira_348.index.values)):
Appended_data_desp_348.append(df_P348_h[df_P348_h.index.date == Desp_Pira_348.index.date[i]])
Appended_data_desp_348 = pd.concat(Appended_data_desp_348)
Appended_data_nuba_975 = []
for i in range(len(Nuba_Pira_975.index.values)):
Appended_data_nuba_975.append(df_P975_h[df_P975_h.index.date == Nuba_Pira_975.index.date[i]])
Appended_data_nuba_975 = pd.concat(Appended_data_nuba_975)
Appended_data_nuba_350 = []
for i in range(len(Nuba_Pira_350.index.values)):
Appended_data_nuba_350.append(df_P350_h[df_P350_h.index.date == Nuba_Pira_350.index.date[i]])
Appended_data_nuba_350 = pd.concat(Appended_data_nuba_350)
Appended_data_nuba_348 = []
for i in range(len(Nuba_Pira_348.index.values)):
Appended_data_nuba_348.append(df_P348_h[df_P348_h.index.date == Nuba_Pira_348.index.date[i]])
Appended_data_nuba_348 = pd.concat(Appended_data_nuba_348)
#------------------HISTOGRAMAS DE RADIACION PARA CADA PUNTO EN LOS DOS CASOS----------------##
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Appended_data_desp_348['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax1.hist(Appended_data_nuba_348['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax1.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Appended_data_desp_350['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax2.hist(Appended_data_nuba_350['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax2.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Appended_data_desp_975['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax3.hist(Appended_data_nuba_975['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax3.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoRadiacionNubaDespTotal.png')
plt.show()
#------------------PRUEBA DE KOLMOGOROV-SMIRNOV PARA LA BONDAD DE AJUSTE ----------------##
'Se aplica la prueba de bondad KOLMOGOROV-SMIRNOV sobre los datos de los dias nublados y los'
'despejados con respecto a la serie general de los datos, para evaluar si pertenecen a la '
'funcion de distribución de probabilidad. Se usa un nivel de significancia del 5%. Esta prueba es'
'mas sensible a los valores cercanos a la media que a los extremos, por lo que en general puede'
'usarse para evitar los outliers. La hipotesis nula, será que los datos de ambas series siguen'
'una misma distribución. La hipotesis alternativa sugiere que no sigen la misma distribución.'
Significancia = 0.05
SK_desp_348 = ks_2samp(Appended_data_desp_348['radiacion'].values,df_P348_h['radiacion'].values)
stat_348_desp = SK_desp_348[0]
pvalue_348_desp = SK_desp_348[1]
SK_nuba_348 = ks_2samp(Appended_data_nuba_348['radiacion'].values,df_P348_h['radiacion'].values)
stat_348_nuba = SK_nuba_348[0]
pvalue_348_nuba = SK_nuba_348[1]
if pvalue_348_nuba <= Significancia:
print ('los dias nublados en JV no pertenecen a la misma distribución')
else:
print ('los dias nublados en JV pertenecen a la misma distribución')
if pvalue_348_desp <= Significancia:
print ('los dias despejados en JV no pertenecen a la misma distribución')
else:
print ('los dias despejados en JV pertenecen a la misma distribución')
SK_desp_350 = ks_2samp(Appended_data_desp_350['radiacion'].values,df_P350_h['radiacion'].values)
stat_350_desp = SK_desp_350[0]
pvalue_350_desp = SK_desp_350[1]
SK_nuba_350 = ks_2samp(Appended_data_nuba_350['radiacion'].values,df_P350_h['radiacion'].values)
stat_350_nuba = SK_nuba_350[0]
pvalue_350_nuba = SK_nuba_350[1]
if pvalue_350_nuba <= Significancia:
print ('los dias nublados en CI no pertenecen a la misma distribución')
else:
print ('los dias nublados en CI pertenecen a la misma distribución')
if pvalue_350_desp <= Significancia:
print ('los dias despejados en CI no pertenecen a la misma distribución')
else:
print ('los dias despejados en CI pertenecen a la misma distribución')
SK_desp_975 = ks_2samp(Appended_data_desp_975['radiacion'].values,df_P975_h['radiacion'].values)
stat_975_desp = SK_desp_975[0]
pvalue_975_desp = SK_desp_975[1]
SK_nuba_975 = ks_2samp(Appended_data_nuba_975['radiacion'].values,df_P975_h['radiacion'].values)
stat_975_nuba = SK_nuba_975[0]
pvalue_975_nuba = SK_nuba_975[1]
if pvalue_975_nuba <= Significancia:
print ('los dias nublados en TS no pertenecen a la misma distribución')
else:
print ('los dias nublados en TS pertenecen a la misma distribución')
if pvalue_975_desp <= Significancia:
print ('los dias despejados en TS no pertenecen a la misma distribución')
else:
print ('los dias despejados en TS pertenecen a la misma distribución')
#------------------HISTOGRAMAS DE EFICIENCIA PARA CADA PUNTO EN LOS DOS CASOS----------------##
Desp_Efi_348 = []
for i in range(len(Desp_Pira_348.index.values)):
Desp_Efi_348.append(df_Rad_P348[df_Rad_P348.index.date == Desp_Pira_348.index.date[i]])
Desp_Efi_348 = pd.concat(Desp_Efi_348)
Desp_Efi_350 = []
for i in range(len(Desp_Pira_350.index.values)):
Desp_Efi_350.append(df_Rad_P350[df_Rad_P350.index.date == Desp_Pira_350.index.date[i]])
Desp_Efi_350 = pd.concat(Desp_Efi_350)
Desp_Efi_975 = []
for i in range(len(Desp_Pira_975.index.values)):
Desp_Efi_975.append(df_Rad_P975[df_Rad_P975.index.date == Desp_Pira_975.index.date[i]])
Desp_Efi_975 = pd.concat(Desp_Efi_975)
Nuba_Efi_348 = []
for i in range(len(Nuba_Pira_348.index.values)):
Nuba_Efi_348.append(df_Rad_P348[df_Rad_P348.index.date == Nuba_Pira_348.index.date[i]])
Nuba_Efi_348 = pd.concat(Nuba_Efi_348)
Nuba_Efi_350 = []
for i in range(len(Nuba_Pira_350.index.values)):
Nuba_Efi_350.append(df_Rad_P350[df_Rad_P350.index.date == Nuba_Pira_350.index.date[i]])
Nuba_Efi_350 = pd.concat(Nuba_Efi_350)
Nuba_Efi_975 = []
for i in range(len(Nuba_Pira_975.index.values)):
Nuba_Efi_975.append(df_Rad_P975[df_Rad_P975.index.date == Nuba_Pira_975.index.date[i]])
Nuba_Efi_975 = pd.concat(Nuba_Efi_975)
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Desp_Efi_348['Efi_Transp'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax1.hist(Nuba_Efi_348['Efi_Transp'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax1.set_title(u'Distribución de la eficiencia \n en dias despejados y nublados en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Eficiencia', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Desp_Efi_350['Efi_Transp'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax2.hist(Nuba_Efi_350['Efi_Transp'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax2.set_title(u'Distribución de la eficiencia \n en dias despejados y nublados en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Eficiencia', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Desp_Efi_975['Efi_Transp'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax3.hist(Nuba_Efi_975['Efi_Transp'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax3.set_title(u'Distribución de la eficiencia \n en dias despejados y nublados en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Eficiencia', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoEficiencianNubaDespTotal.png')
plt.show()
SK_desp_Efi_348 = ks_2samp(Desp_Efi_348['radiacion'].values,df_P348_h['radiacion'].values)
Efi_348_desp = SK_desp_Efi_348[0]
Efi_348_desp = SK_desp_Efi_348[1]
SK_nuba_Efi_348 = ks_2samp(Nuba_Efi_348['radiacion'].values,df_P348_h['radiacion'].values)
Efi_348_nuba = SK_nuba_Efi_348[0]
Efi_348_nuba = SK_nuba_Efi_348[1]
if Efi_348_nuba <= Significancia:
print ('los dias nublados en JV no pertenecen a la misma distribución')
else:
print ('los dias nublados en JV pertenecen a la misma distribución')
if Efi_348_desp <= Significancia:
print ('los dias despejados en JV no pertenecen a la misma distribución')
else:
print ('los dias despejados en JV pertenecen a la misma distribución')
SK_desp_Efi_350 = ks_2samp(Desp_Efi_350['radiacion'].values,df_P350_h['radiacion'].values)
Efi_350_desp = SK_desp_Efi_350[0]
Efi_350_desp = SK_desp_Efi_350[1]
SK_nuba_Efi_350 = ks_2samp(Nuba_Efi_350['radiacion'].values,df_P350_h['radiacion'].values)
Efi_350_nuba = SK_nuba_Efi_350[0]
Efi_350_nuba = SK_nuba_Efi_350[1]
if Efi_350_nuba <= Significancia:
print ('los dias nublados en CI no pertenecen a la misma distribución')
else:
print ('los dias nublados en CI pertenecen a la misma distribución')
if Efi_350_desp <= Significancia:
print ('los dias despejados en CI no pertenecen a la misma distribución')
else:
print ('los dias despejados en CI pertenecen a la misma distribución')
SK_desp_Efi_975 = ks_2samp(Desp_Efi_975['radiacion'].values,df_P975_h['radiacion'].values)
Efi_975_desp = SK_desp_Efi_975[0]
Efi_975_desp = SK_desp_Efi_975[1]
SK_nuba_Efi_975 = ks_2samp(Nuba_Efi_975['radiacion'].values,df_P975_h['radiacion'].values)
Efi_975_nuba = SK_nuba_Efi_975[0]
Efi_975_nuba = SK_nuba_Efi_975[1]
if Efi_975_nuba <= Significancia:
print ('los dias nublados en TS no pertenecen a la misma distribución')
else:
print ('los dias nublados en TS pertenecen a la misma distribución')
if Efi_975_desp <= Significancia:
print ('los dias despejados en TS no pertenecen a la misma distribución')
else:
print ('los dias despejados en TS pertenecen a la misma distribución')
#------------------ESTIMACIÓN DE LA AUTOCORRELACIÓN EN CADA PUNTO----------------##
def estimated_autocorrelation(x):
"""
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))
result = r/(variance*(np.arange(n, 0, -1)))
return result
Auto_corr_975 = estimated_autocorrelation(df_P975_h['radiacion'].values)
X = df_P975_h[df_P975_h['radiacion'].values>0]['radiacion'].values
lag = [1, 6, 12, 24]
AutoCorr_lag = []
for j in range(1, len(lag)+1):
print(j)
c = []
for i in range(0,len(X)-j, j):
c.append(pearsonr(X[i:], X[:-(i -len(X))])[1])
AutoCorr_lag.append(sum(c))
###############################################################################
##-------------------RADIACION TEORICA PARA UN AÑO DE DATOS------------------##
###############################################################################
'Se espera encontrar con una año de datos de radiacion teorica para el estable-'
'cimiento de los escenario de prediccion y de los rendimentos teoricos. Pensado'
'para los datos de 2018.'
## ---LECTURA DE DATOS DE PIRANÓMETRO --- ##
df1 = df1.set_index(["fecha_hora"])
df1.index = df1.index.tz_localize('UTC').tz_convert('America/Bogota')
df1.index = df1.index.tz_localize(None)
## ---AGRUPACION DE LOS DATOS HORARIOS A UN AÑO--- ##
df1_hora = df1.groupby(pd.Grouper(freq="H")).mean()
df1_hora = df1_hora[(df1_hora.index >= '2018-01-01 00:00:00') & (df1_hora.index <= '2018-12-31 23:59:00')]
df1_hora = df1_hora.between_time('06:00', '17:00') ##--> Seleccionar solo los datos de horas del dia
## ---CREACIÓN DE LA RADIACIÓN EN SUPERFICIE POR DIA Y AGRUPACION DE LOS DATOS DIARIOS A UN AÑO--- ##
Io_dia = Io.groupby(pd.Grouper(freq="D")).mean()
df1_dia = df1.groupby(pd.Grouper(freq="D")).mean()
df1_dia = df1_dia[(df1_dia.index >= '2018-01-01') & (df1_dia.index <= '2018-12-31')]
## ---CONDICIONANDO LA RESOLUCIÓN TEMPORAL CON LA QUE SE TRABAJARÁ--- ##
if resolucion == 'diaria':
Io = Io_dia
df1_rad = df1_dia
elif resolucion == 'horaria':
Io = Io_hora
df1_rad = df1_hora
## ---CREACIÓN DE LOS ESCENARIOS DE ANÁLISIS EFICIENCIA TEÓRICA--- ##
if len(Io)==len(df1_rad):
df1_rad['TAR'] = Io
df1_rad = df1_rad.drop([u'Unnamed: 0', u'idestacion'], axis=1)
df1_rad['Efi_Teorica'] = df1_rad[u'radiacion']/df1_rad[u'TAR']
else:
print (u'No hay un año de datos con el piranometro')
## --Máximo absosluto
df1_radr_max = df1_rad.loc[lambda df_hora: df_hora['Efi_Teorica'] == np.nanmax(df1_rad.Efi_Teorica)]
## -- Percentil 90 absoluto
df1_rad90 = df1_rad.quantile(0.90)
## -- Percentil 50 absoluto
df1_rad50 = df1_rad.quantile(0.50)
## -- Percentil 10 absoluto
df1_rad10 = df1_rad.quantile(0.10)
## -----MENSUAL----- ##
df1_hm_mean = df1_rad.Efi_Teorica.groupby(pd.Grouper(freq="M")).mean()
df1_hm_mean_90 = df1_hm_mean.loc[lambda df1_hm_mean: df1_hm_mean.round(3) >= round(df1_hm_mean.quantile(0.90), 2)]
df1_hm_mean_50 = df1_hm_mean.loc[lambda df1_hm_mean: df1_hm_mean.round(3) >= round(df1_hm_mean.quantile(0.50), 2)]
df1_hm_mean_10 = df1_hm_mean.loc[lambda df1_hm_mean: df1_hm_mean.round(3) >= round(df1_hm_mean.quantile(0.10), 2)]
## -- Percentil 90 de cada mes
df1_hm_quantile90 = df1_rad.Efi_Teorica.groupby(pd.Grouper(freq="M")).quantile(0.90)
## -- Percentil 50 de cada mes
df1_hm_quantile50 = df1_rad.Efi_Teorica.groupby(pd.Grouper(freq="M")).quantile(0.50)
## -- Percentil 10 de cada mes
df1_hm_quantile10 = df1_rad.Efi_Teorica.groupby(pd.Grouper(freq="M")).quantile(0.10)
## -----GRÁFICA PARA OBTENER LOS ESCENARIOS----- ##
new_index = [df1_hm_quantile90.index[i].replace(day=1) for i in range(len(df1_hm_quantile90.index))]
df1_hm_quantile90.index = new_index
fechas_grafica = [str(df1_hm_quantile90.index[i])[0:10] for i in range(len(df1_hm_quantile90.index))]
ind_f = np.arange(len(fechas_grafica) )
if Estacion == '6001':
EstacionLoc = 'Torre SIATA'
elif Estacion== '6002':
EstacionLoc = 'Consejo Ita'
elif Estacion== '6003':
EstacionLoc = 'Joaquin Vallejo'
fig = plt.figure(figsize=(12, 9))
plt.rc('axes', edgecolor='gray')
ax = fig.add_subplot(111)
ax.plot(df1_hm_quantile90.index, df1_hm_quantile90*100, color='#52B7C4', linewidth=2, label = 'P_Mensual 90')
ax.plot(df1_hm_quantile90.index, df1_hm_quantile50*100, color='#ffa040', linewidth=2, label = 'P_Mensual 50')
ax.plot(df1_hm_quantile90.index, df1_hm_quantile10*100, color='#0b6623', linewidth=2, label = 'P_Mensual 10')
ax.legend()
ax.scatter(df1_hm_quantile90.index, df1_hm_quantile90*100, color='#52B7C4', s=20)
ax.scatter(df1_hm_quantile90.index, df1_hm_quantile50*100, color='#ffa040', s=20)
ax.scatter(df1_hm_quantile90.index, df1_hm_quantile10*100, color='#0b6623', s=20)
ax.set_ylim(0, np.nanmax(df1_hm_quantile90*100)*1.1)
ax.set_xlim(fechas_grafica[0], fechas_grafica[-1])
ax.axhline(y=df1_rad90.Efi_Teorica*100)
ax.axhline(y=df1_rad50.Efi_Teorica*100)
ax.axhline(y=df1_rad10.Efi_Teorica*100)
plt.ylabel('Rendimiento', fontsize=14, fontproperties=prop, color='gray')
plt.xlabel('Meses', fontsize=18, fontproperties=prop, color='gray')
plt.title(u'Percentiles mensuales y absolutos de los datos de radiación de 2018 en: ' + EstacionLoc, size=16, fontproperties=prop_2, color='gray')
hfmt = mdates.DateFormatter('%Y-%m')
ax.tick_params(color='gray', labelcolor='gray')
for spine in ax.spines.values():
spine.set_edgecolor('gray')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_major_formatter(hfmt)
plt.grid(which='major', linestyle=':', linewidth=0.5, alpha=0.7)
plt.savefig('/home/nacorreasa/Escritorio/Figuras/Escenarios_Perc.png')
plt.show()
## ---RELACIÓN CON LAS DEMÁS VARIABLES IMPLICADAS--- ##
data_thiess = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Meteorologicas/Historico_Brillo_1019.txt', sep=',')
## -- Filtrar por calidad
df_brillo = data_thiess[data_thiess['calidad'] < 100]
df_brillo.index = df_brillo['fecha_hora']
df_brillo = df_brillo.drop(['fecha_hora'], axis=1)
df_brillo.index = pd.to_datetime(df_brillo.index)
df_brillo = df_brillo[(df_brillo.index >= '2018-01-01 00:00:00') & (df_brillo.index <= '2018-12-31 23:59:00')]
df_brillo_h = df_brillo.groupby(pd.Grouper(freq="H")).mean()
df1_rad['T'] = df_brillo_h['T']
df1_rad['BM'] = df_brillo_h['BM']
## -- Obtener solo los datos de dia
df1_rad_dia = df1_rad[(df1_rad.index.hour >= 6) & (df1_rad.index.hour < 18)]
## -- Sacar los datos para las fechas de los escenarios
## -- Escenarios máximos
fh_i_max = ['2018-06-01 00:00:00', '2018-08-01 00:00:00', '2018-12-01 00:00:00']
fh_f_max = ['2018-06-30 23:59:00', '2018-08-31 23:59:00', '2018-12-31 23:59:00']
df_max_esc01 = df1_rad_dia[(df1_rad_dia.index >= fh_i_max[0]) & (df1_rad_dia.index <= fh_f_max[0])]
df_max_esc02 = df1_rad_dia[(df1_rad_dia.index >= fh_i_max[1]) & (df1_rad_dia.index <= fh_f_max[1])]
df_max_esc03 = df1_rad_dia[(df1_rad_dia.index >= fh_i_max[2]) & (df1_rad_dia.index <= fh_f_max[2])]
## -- Acotando los fuera de rango
df_max_esc01 = df_max_esc01[df_max_esc01[u'Efi_Teorica'] < 2.5]
df_max_esc02 = df_max_esc02[df_max_esc02[u'Efi_Teorica'] < 2.5]
df_max_esc03 = df_max_esc03[df_max_esc03[u'Efi_Teorica'] < 2.5]
## -- Correlaciones de la eficiencia teórica
corr_BM_max_esc01, p_value_BM_max_esc01 = pearsonr(df_max_esc01[u'Efi_Teorica'].values, df_max_esc01[u'BM'].values)
corr_T_max_esc01, p_value_BM_max_esc01 = pearsonr(df_max_esc01[u'Efi_Teorica'].values, df_max_esc01[u'T'].values)
corr_BM_max_esc02, p_value_BM_max_esc02 = pearsonr(df_max_esc02[u'Efi_Teorica'].values, df_max_esc02[u'BM'].values)
corr_T_max_esc02, p_value_BM_max_esc02 = pearsonr(df_max_esc02[u'Efi_Teorica'].values, df_max_esc02[u'T'].values)
corr_BM_max_esc03, p_value_BM_max_esc03 = pearsonr(df_max_esc03[u'Efi_Teorica'].values, df_max_esc03[u'BM'].values)
corr_T_max_esc03, p_value_BM_max_esc03 = pearsonr(df_max_esc03[u'Efi_Teorica'].values, df_max_esc03[u'T'].values)
## -- Gráfico con la temperatura
jet = plt.get_cmap('jet')
fig = plt.figure(figsize=[11, 10])
ax1 = fig.add_subplot(1, 3, 1)
sc1 = ax1.scatter(df_max_esc01['radiacion'], df_max_esc01['Efi_Teorica'], s=10, c=df_max_esc01['T'], cmap=jet)
cbar1 = ax1.figure.colorbar(sc1)
cbar1.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar1.ax.tick_params(pad=-15, labelsize=6)
ax1.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax1.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax1.set_title("Escenario del mes:" + str(df_max_esc01.index.month[0]), fontsize=10)
#ax1.text(max(df_max_esc01['radiacion']), max(df_max_esc01['Efi_Teorica']), 'Corr Coef: '+ str(corr_T_max_esc01.round(2)) + ' $P value$: '+ str(p_value_BM_max_esc01.round(2)), style='italic', ha="center", bbox={'facecolor':'#D6EAF8', 'alpha':0.5, 'pad':-1})
ax2 = fig.add_subplot(1, 3, 2)
sc2 = ax2.scatter(df_max_esc02['radiacion'], df_max_esc02['Efi_Teorica'], s=10, c=df_max_esc02['T'], cmap=jet)
cbar2 = plt.colorbar(sc2)
cbar2.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar2.ax.tick_params(pad=-15, labelsize=6)
ax2.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax2.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax2.set_title("Escenario del mes:" + str(df_max_esc02.index.month[0]), fontsize=10)
ax3 = fig.add_subplot(1, 3, 3)
sc3 = ax3.scatter(df_max_esc03['radiacion'], df_max_esc03['Efi_Teorica'], s=10, c=df_max_esc03['T'], cmap=jet)
cbar3 = plt.colorbar(sc3)
cbar3.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar3.ax.tick_params(pad=-15, labelsize=6)
ax3.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax3.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax3.set_title("Escenario del mes:" + str(df_max_esc03.index.month[0]), fontsize=10)
plt.subplots_adjust( wspace=0.3, hspace=1)
fig.suptitle(u"Relación de las eficiencias máximas", fontsize=11, fontweight = "bold", fontproperties = prop)
plt.show()
#plt.savefig('/home/nacorreasa/Escritorio/EScenario1.png')
plt.close()
## -- Escenarios mínimos
fh_i_min = ['2018-02-01 00:00:00', '2018-03-01 00:00:00', '2018-04-01 00:00:00', '2018-05-01 00:00:00', '2018-11-01 00:00:00']
fh_f_min = ['2018-02-28 23:59:00', '2018-03-30 23:59:00', '2018-04-30 23:59:00', '2018-05-31 23:59:00', '2018-11-30 23:59:00']
df_min_esc01 = df1_rad_dia[(df1_rad_dia.index >= fh_i_min[0]) & (df1_rad_dia.index <= fh_f_min[0])]
df_min_esc02 = df1_rad_dia[(df1_rad_dia.index >= fh_i_min[1]) & (df1_rad_dia.index <= fh_f_min[1])]
df_min_esc03 = df1_rad_dia[(df1_rad_dia.index >= fh_i_min[2]) & (df1_rad_dia.index <= fh_f_min[2])]
df_min_esc04 = df1_rad_dia[(df1_rad_dia.index >= fh_i_min[3]) & (df1_rad_dia.index <= fh_f_min[3])]
df_min_esc05 = df1_rad_dia[(df1_rad_dia.index >= fh_i_min[4]) & (df1_rad_dia.index <= fh_f_min[4])]
## -- Acotando los fuera de rango
df_min_esc01 = df_min_esc01[df_min_esc01[u'Efi_Teorica'] < 2.5]
df_min_esc02 = df_min_esc02[df_min_esc02[u'Efi_Teorica'] < 2.5]
df_min_esc03 = df_min_esc03[df_min_esc03[u'Efi_Teorica'] < 2.5]
df_min_esc04 = df_min_esc04[df_min_esc04[u'Efi_Teorica'] < 2.5]
df_min_esc05 = df_min_esc05[df_min_esc05[u'Efi_Teorica'] < 2.5]
## -- Correlaciones de la eficiencia teórica
corr_BM_min_esc01, p_value_BM_min_esc01 = pearsonr(df_min_esc01[u'Efi_Teorica'].values, df_min_esc01[u'BM'].values)
corr_T_min_esc01, p_value_BM_min_esc01 = pearsonr(df_min_esc01[u'Efi_Teorica'].values, df_min_esc01[u'T'].values)
corr_BM_min_esc02, p_value_BM_min_esc02 = pearsonr(df_min_esc02[u'Efi_Teorica'].values, df_min_esc02[u'BM'].values)
corr_T_min_esc02, p_value_BM_min_esc02 = pearsonr(df_min_esc02[u'Efi_Teorica'].values, df_min_esc02[u'T'].values)
corr_BM_min_esc03, p_value_BM_min_esc03 = pearsonr(df_min_esc03[u'Efi_Teorica'].values, df_min_esc03[u'BM'].values)
corr_T_min_esc03, p_value_BM_min_esc03 = pearsonr(df_min_esc03[u'Efi_Teorica'].values, df_min_esc03[u'T'].values)
corr_BM_min_esc04, p_value_BM_min_esc04 = pearsonr(df_min_esc04[u'Efi_Teorica'].values, df_min_esc04[u'BM'].values)
corr_T_min_esc04, p_value_BM_min_esc04 = pearsonr(df_min_esc04[u'Efi_Teorica'].values, df_min_esc04[u'T'].values)
corr_BM_min_esc05, p_value_BM_min_esc05 = pearsonr(df_min_esc05[u'Efi_Teorica'].values, df_min_esc05[u'BM'].values)
corr_T_min_esc05, p_value_BM_min_esc05 = pearsonr(df_min_esc05[u'Efi_Teorica'].values, df_min_esc05[u'T'].values)
## -- Gráfico
jet = plt.get_cmap('jet')
fig = plt.figure(figsize=[11, 10])
ax1 = fig.add_subplot(2, 3, 1)
sc1 = ax1.scatter(df_min_esc01['radiacion'], df_min_esc01['Efi_Teorica'], s=10, c=df_min_esc01['T'], cmap=jet)
cbar1 = ax1.figure.colorbar(sc1)
cbar1.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar1.ax.tick_params(pad=-15, labelsize=6)
ax1.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax1.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax1.set_title("Escenario del mes:" + str(df_min_esc01.index.month[0]), fontsize=10)
#ax1.text(min(df_min_esc01['radiacion']), min(df_min_esc01['Efi_Teorica']), 'Corr Coef: '+ str(corr_T_min_esc01.round(2)) + ' $P value$: '+ str(p_value_BM_min_esc01.round(2)), style='italic', ha="center", bbox={'facecolor':'#D6EAF8', 'alpha':0.5, 'pad':-1})
ax2 = fig.add_subplot(2, 3, 2)
sc2 = ax2.scatter(df_min_esc02['radiacion'], df_min_esc02['Efi_Teorica'], s=10, c=df_min_esc02['T'], cmap=jet)
cbar2 = plt.colorbar(sc2)
cbar2.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar2.ax.tick_params(pad=-15, labelsize=6)
ax2.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax2.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax2.set_title("Escenario del mes:" + str(df_min_esc02.index.month[0]), fontsize=10)
ax3 = fig.add_subplot(2, 3, 3)
sc3 = ax3.scatter(df_min_esc03['radiacion'], df_min_esc03['Efi_Teorica'], s=10, c=df_min_esc03['T'], cmap=jet)
cbar3 = plt.colorbar(sc3)
cbar3.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar3.ax.tick_params(pad=-15, labelsize=6)
ax3.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax3.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax3.set_title("Escenario del mes:" + str(df_min_esc03.index.month[0]), fontsize=10)
ax4 = fig.add_subplot(2, 3, 4)
sc4 = ax4.scatter(df_min_esc04['radiacion'], df_min_esc04['Efi_Teorica'], s=10, c=df_min_esc04['T'], cmap=jet)
cbar4 = plt.colorbar(sc4)
cbar4.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar4.ax.tick_params(pad=-15, labelsize=6)
ax4.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax4.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax4.set_title("Escenario del mes:" + str(df_min_esc04.index.month[0]), fontsize=10)
ax5 = fig.add_subplot(2, 3, 5)
sc5 = ax3.scatter(df_min_esc05['radiacion'], df_min_esc05['Efi_Teorica'], s=10, c=df_min_esc05['T'], cmap=jet)
cbar5 = plt.colorbar(sc5)
cbar5.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar5.ax.tick_params(pad=-15, labelsize=6)
ax5.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax5.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax5.set_title("Escenario del mes:" + str(df_min_esc05.index.month[0]), fontsize=10)
plt.subplots_adjust( wspace=0.3, hspace=1)
fig.suptitle(u"Relación de las eficiencias mínimas", fontsize=11, fontweight = "bold", fontproperties = prop)
plt.show()
#plt.savefig('/home/nacorreasa/Escritorio/EScenario1.png')
plt.close()
## -- Escenarios medios
fh_i_mean = ['2018-01-01 00:00:00', '2018-07-01 00:00:00', '2018-09-01 00:00:00', '2018-10-01 00:00:00']
fh_f_mean = ['2018-01-31 23:59:00', '2018-07-31 23:59:00', '2018-09-30 23:59:00', '2018-10-31 23:59:00']
df_mean_esc01 = df1_rad_dia[(df1_rad_dia.index >= fh_i_mean[0]) & (df1_rad_dia.index <= fh_f_mean[0])]
df_mean_esc02 = df1_rad_dia[(df1_rad_dia.index >= fh_i_mean[1]) & (df1_rad_dia.index <= fh_f_mean[1])]
df_mean_esc03 = df1_rad_dia[(df1_rad_dia.index >= fh_i_mean[2]) & (df1_rad_dia.index <= fh_f_mean[2])]
df_mean_esc04 = df1_rad_dia[(df1_rad_dia.index >= fh_i_mean[3]) & (df1_rad_dia.index <= fh_f_mean[3])]
## -- Acotando los fuera de rango
df_mean_esc01 = df_mean_esc01[df_mean_esc01[u'Efi_Teorica'] < 2.5]
df_mean_esc02 = df_mean_esc02[df_mean_esc02[u'Efi_Teorica'] < 2.5]
df_mean_esc03 = df_mean_esc03[df_mean_esc03[u'Efi_Teorica'] < 2.5]
df_mean_esc04 = df_mean_esc04[df_mean_esc04[u'Efi_Teorica'] < 2.5]
## -- Correlaciones de la eficiencia teórica
corr_BM_mean_esc01, p_value_BM_mean_esc01 = pearsonr(df_mean_esc01[u'Efi_Teorica'].values, df_mean_esc01[u'BM'].values)
corr_T_mean_esc01, p_value_BM_mean_esc01 = pearsonr(df_mean_esc01[u'Efi_Teorica'].values, df_mean_esc01[u'T'].values)
corr_BM_mean_esc02, p_value_BM_mean_esc02 = pearsonr(df_mean_esc02[u'Efi_Teorica'].values, df_mean_esc02[u'BM'].values)
corr_T_mean_esc02, p_value_BM_mean_esc02 = pearsonr(df_mean_esc02[u'Efi_Teorica'].values, df_mean_esc02[u'T'].values)
corr_BM_mean_esc03, p_value_BM_mean_esc03 = pearsonr(df_mean_esc03[u'Efi_Teorica'].values, df_mean_esc03[u'BM'].values)
corr_T_mean_esc03, p_value_BM_mean_esc03 = pearsonr(df_mean_esc03[u'Efi_Teorica'].values, df_mean_esc03[u'T'].values)
corr_BM_mean_esc04, p_value_BM_mean_esc04 = pearsonr(df_mean_esc04[u'Efi_Teorica'].values, df_mean_esc04[u'BM'].values)
corr_T_mean_esc04, p_value_BM_mean_esc04 = pearsonr(df_mean_esc04[u'Efi_Teorica'].values, df_mean_esc04[u'T'].values)
## -- Gráfico
jet = plt.get_cmap('jet')
fig = plt.figure(figsize=[11, 10])
ax1 = fig.add_subplot(2, 2, 1)
sc1 = ax1.scatter(df_mean_esc01['radiacion'], df_mean_esc01['Efi_Teorica'], s=10, c=df_mean_esc01['T'], cmap=jet)
cbar1 = ax1.figure.colorbar(sc1)
cbar1.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar1.ax.tick_params(pad=-15, labelsize=6)
ax1.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax1.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax1.set_title("Escenario del mes:" + str(df_mean_esc01.index.month[0]), fontsize=10)
#ax1.text(mean(df_mean_esc01['radiacion']), mean(df_mean_esc01['Efi_Teorica']), 'Corr Coef: '+ str(corr_T_mean_esc01.round(2)) + ' $P value$: '+ str(p_value_BM_mean_esc01.round(2)), style='italic', ha="center", bbox={'facecolor':'#D6EAF8', 'alpha':0.5, 'pad':-1})
ax2 = fig.add_subplot(2, 2, 2)
sc2 = ax2.scatter(df_mean_esc02['radiacion'], df_mean_esc02['Efi_Teorica'], s=10, c=df_mean_esc02['T'], cmap=jet)
cbar2 = plt.colorbar(sc2)
cbar2.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar2.ax.tick_params(pad=-15, labelsize=6)
ax2.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax2.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax2.set_title("Escenario del mes:" + str(df_mean_esc02.index.month[0]), fontsize=10)
ax3 = fig.add_subplot(2, 2, 3)
sc3 = ax3.scatter(df_mean_esc03['radiacion'], df_mean_esc03['Efi_Teorica'], s=10, c=df_mean_esc03['T'], cmap=jet)
cbar3 = plt.colorbar(sc3)
cbar3.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar3.ax.tick_params(pad=-15, labelsize=6)
ax3.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax3.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax3.set_title("Escenario del mes:" + str(df_mean_esc03.index.month[0]), fontsize=10)
ax4 = fig.add_subplot(2, 2, 4)
sc4 = ax4.scatter(df_mean_esc04['radiacion'], df_mean_esc04['Efi_Teorica'], s=10, c=df_mean_esc04['T'], cmap=jet)
cbar4 = plt.colorbar(sc4)
cbar4.ax.set_ylabel(u"Temperatura $[°C]$", rotation=270, fontsize=6, fontproperties=prop_1, labelpad=15)
cbar4.ax.tick_params(pad=-15, labelsize=6)
ax4.set_xlabel(r"Intensidad de la radicion $[W/m^{2}]$", fontsize=10, fontproperties=prop_1)
ax4.set_ylabel("Eficiencia ", fontsize=10, fontproperties=prop_1)
ax4.set_title("Escenario del mes:" + str(df_mean_esc04.index.month[0]), fontsize=10)
plt.subplots_adjust( wspace=0.3, hspace=0.9)
fig.suptitle(u"Relación de las eficiencias medias", fontsize=11, fontweight = "bold", fontproperties = prop)
plt.show()
#plt.savefig('/home/nacorreasa/Escritorio/EScenario1.png')
plt.close()
## ---RELACIÓN CON LA BANDA DE AEROSOLES DE GOES--- ##
ds = Dataset('/home/nacorreasa/Maestria/Datos_Tesis/GOES/GOES_nc_CREADOS/GOES_VA_C12018.nc')
a_esun = Dataset('/home/nacorreasa/Maestria/Datos_Tesis/GOES/OR_ABI-L1b-RadF-M3C02_G16_s20180150000423_e20180150011190_c20180150011226.nc')
esun = a_esun.variables['esun'][:]
d = (a_esun.variables['earth_sun_distance_anomaly_in_AU'][:])**2
lat = ds.variables['lat'][:, :]
lon = ds.variables['lon'][:, :] + 360
Rad = ds.variables['Radiancias'][:,:,:]
# fr = (Rad*np.pi*d)/esun
# fr[fr[:, :] < 0] = 0
# fr[fr[:, :] > 1] = 1
# fr = np.sqrt(fr)
# fr = fr * 100.0
## -- Selección de un pixel de las radiancias
lat_index = np.where((lat[:, 0] > 6.25) & (lat[:, 0] < 6.26))[0][0]
lon_index = np.where((lon[0, :] < -75.58) & (lon[0, :] > -75.59))[0][0]
Rad_pixel = Rad[:, lat_index, lon_index]
## -- Obtener el tiempo para cada valor
tiempo = ds.variables['time']
fechas_horas = nc.num2date(tiempo[:], units=tiempo.units)
for i in range(len(fechas_horas)):
fechas_horas[i] = fechas_horas[i].strftime('%Y-%m-%d %H:%M')
## -- Creación de dataframe de radiancias
Rad_df = pd.DataFrame()
Rad_df['Fecha_Hora'] = fechas_horas
Rad_df['Radiacias'] = Rad_pixel
Rad_df['Fecha_Hora'] = pd.to_datetime(Rad_df['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df.index = Rad_df['Fecha_Hora']
Rad_df = Rad_df.drop(['Fecha_Hora'], axis=1)
## -- Normalización de las radiancias
#
# min_Rad = min(Rad_df['Radiacias'].values)
# max_Rad = max(Rad_df['Radiacias'].values)
# range_Rad = max_Rad - min_Rad
# Rad_norm = (Rad_df['Radiacias'].values- min_Rad)/range_Rad
# Rad_df['Rad_norm'] = Rad_norm
#
Rad_df_h = Rad_df.groupby(pd.Grouper(freq="H")).mean()
Rad_df_h = Rad_df_h.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
## -- Ciclo diurno de el anio para los aerololes en este punto
Rad_df_CD = Rad_df_h.groupby(by=[Rad_df_h.index.hour]).mean()
##----------------------------------- Ciclo Hidrologico -----------------------------------##
## -- Obteniendo los 4 DF para el ciclo hidrológico
Rad_dfh_DEF = Rad_df_h[(Rad_df_h.index.month == 1) | (Rad_df_h.index.month == 12) | (Rad_df_h.index.month == 2)]
Rad_dfh_MAM = Rad_df_h[(Rad_df_h.index.month == 3) | (Rad_df_h.index.month == 4) | (Rad_df_h.index.month == 5)]
Rad_dfh_JJA = Rad_df_h[(Rad_df_h.index.month == 6) | (Rad_df_h.index.month == 7) | (Rad_df_h.index.month == 8)]
Rad_dfh_SON = Rad_df_h[(Rad_df_h.index.month == 9) | (Rad_df_h.index.month == 10) | (Rad_df_h.index.month == 11)]
## -- Ciclo diurno horario de cada trimestre
Rad_dfh_DEF_CD = Rad_dfh_DEF.groupby(by=[Rad_dfh_DEF.index.hour]).mean()
Rad_dfh_MAM_CD = Rad_dfh_MAM.groupby(by=[Rad_dfh_MAM.index.hour]).mean()
Rad_dfh_JJA_CD = Rad_dfh_JJA.groupby(by=[Rad_dfh_JJA.index.hour]).mean()
Rad_dfh_SON_CD = Rad_dfh_SON.groupby(by=[Rad_dfh_SON.index.hour]).mean()
##--Grafico CD horario de cada trimestre
x_pos = np.arange(len(Rad_dfh_DEF_CD.index))
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(2, 2, 1)
ax1.bar(x_pos, Rad_dfh_DEF_CD['Radiacias'], align='center', alpha=0.5)
ax1.set_xticks(np.arange(0, 13))
ax1.set_xticklabels(Rad_dfh_DEF_CD.index.values)
ax1.set_ylabel(u'Radiancias', fontproperties=prop_1)
ax1.set_xlabel(u'Horas del dia', fontproperties=prop_1)
ax1.set_title('DEF', fontweight = "bold", fontproperties = prop)
ax2 = fig.add_subplot(2, 2, 2)
ax2.bar(x_pos, Rad_dfh_MAM_CD['Radiacias'], align='center', alpha=0.5)
ax2.set_xticks(np.arange(0, 13))
ax2.set_xticklabels(Rad_dfh_MAM_CD.index.values)
ax2.set_ylabel(u'Radiancias', fontproperties=prop_1)
ax2.set_xlabel(u'Horas del dia', fontproperties=prop_1)
ax2.set_title(r'MAM', fontweight = "bold", fontproperties = prop)
ax3 = fig.add_subplot(2, 2, 3)
ax3.bar(x_pos, Rad_dfh_JJA_CD['Radiacias'], align='center', alpha=0.5)
ax3.set_xticks(np.arange(0, 13))
ax3.set_xticklabels(Rad_dfh_JJA_CD.index.values)
ax3.set_ylabel(u'Radiancias', fontproperties=prop_1)
ax3.set_xlabel(u'Horas del dia', fontproperties=prop_1)
ax3.set_title(u'JJA', fontweight = "bold", fontproperties = prop)
ax4 = fig.add_subplot(2, 2, 4)
ax4.bar(x_pos, Rad_dfh_SON_CD['Radiacias'], align='center', alpha=0.5)
ax4.set_xticks(np.arange(0, 13))
ax4.set_xticklabels(Rad_dfh_SON_CD.index.values)
ax4.set_ylabel(u'Radiancias', fontproperties=prop_1)
ax4.set_xlabel(u'Horas del dia', fontproperties=prop_1)
ax4.set_title(u'SON', fontweight = "bold", fontproperties = prop)
plt.subplots_adjust( wspace=0.3, hspace=0.4)
fig.suptitle(u"CD Trimestral en: " + EstacionLoc, fontsize=15, fontweight = "bold", fontproperties = prop)
plt.savefig('/home/nacorreasa/Maestria/Semestre2/Curso_Rad/CD_C1_Trimestre_'+EstacionLoc+'.png')
plt.show()
## -- Histograma de cada trimestre
HistDEF = np.histogram(Rad_dfh_DEF['Rad_norm'].values[~np.isnan(Rad_dfh_DEF['Rad_norm'].values)])
HistMAM = np.histogram(Rad_dfh_MAM['Rad_norm'].values[~np.isnan(Rad_dfh_MAM['Rad_norm'].values)])
HistJJA = np.histogram(Rad_dfh_JJA['Rad_norm'].values[~np.isnan(Rad_dfh_JJA['Rad_norm'].values)])
HistSON = np.histogram(Rad_dfh_SON['Rad_norm'].values[~np.isnan(Rad_dfh_SON['Rad_norm'].values)])
#x_pos = np.arange(len(HistDEF[1])-1)
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(2, 2, 1)
ax1.hist(Rad_dfh_DEF['Radiacias'].values[~np.isnan(Rad_dfh_DEF['Radiacias'].values)], bins='auto')
# ax1.bar(x_pos, HistDEF[0], align='center', alpha=0.5)
# ax1.set_xticks(np.arange(0, 13))
# ax1.set_xticklabels(HistDEF[1].round(2), rotation= 45, fontproperties=prop_1, fontsize= 10)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Radiancia', fontproperties=prop_1)
ax1.set_title('DEF', fontweight = "bold", fontproperties = prop)
ax2 = fig.add_subplot(2, 2, 2)
ax2.hist(Rad_dfh_MAM['Radiacias'].values[~np.isnan(Rad_dfh_MAM['Radiacias'].values)], bins='auto')
# ax2.bar(x_pos, HistMAM[0], align='center', alpha=0.5)
# ax2.set_xticks(np.arange(0, 13))
# ax2.set_xticklabels(HistMAM[1].round(2), rotation= 45)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Radiancia', fontproperties=prop_1)
ax2.set_title(r'MAM', fontweight = "bold", fontproperties = prop)
ax3 = fig.add_subplot(2, 2, 3)
ax3.hist(Rad_dfh_JJA['Radiacias'].values[~np.isnan(Rad_dfh_JJA['Radiacias'].values)], bins='auto')
# ax3.bar(x_pos, HistJJA[0], align='center', alpha=0.5)
# ax3.set_xticks(np.arange(0, 13))
# ax3.set_xticklabels(HistJJA[1].round(2), rotation= 45)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Radiancia', fontproperties=prop_1)
ax3.set_title(u'JJA', fontweight = "bold", fontproperties = prop)
ax4 = fig.add_subplot(2, 2, 4)
ax4.hist(Rad_dfh_SON['Radiacias'].values[~np.isnan(Rad_dfh_SON['Radiacias'].values)], bins='auto')
# ax4.bar(x_pos, HistSON[0], align='center', alpha=0.5)
# ax4.set_xticks(np.arange(0, 13))
# ax4.set_xticklabels(HistSON[1].round(2), rotation= 45)
ax4.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax4.set_xlabel(u'Radiancia', fontproperties=prop_1)
ax4.set_title(u'SON', fontweight = "bold", fontproperties = prop)
plt.subplots_adjust( wspace=0.3, hspace=0.4)
fig.suptitle(u"Histograma Trimestral de aerosoles en: " + EstacionLoc, fontsize=15, fontweight = "bold", fontproperties = prop)
plt.savefig('/home/nacorreasa/Maestria/Semestre2/Curso_Rad/Hist_C1_Trimestre_'+EstacionLoc+'.png')
plt.show()
##----------------------------------- Escenarios de los percentiles -----------------------------------##
## -- Obteniendo los 3 DF prelacionados a los escenarios
Rad_dfh_MAX = Rad_df_h[(Rad_df_h.index.month == 5)| (Rad_df_h.index.month == 6)| (Rad_df_h.index.month == 7) |
(Rad_df_h.index.month == 8) | (Rad_df_h.index.month == 8)]
Rad_dfh_MIN = Rad_df_h[(Rad_df_h.index.month == 10) | (Rad_df_h.index.month == 11) | (Rad_df_h.index.month == 12)]
Rad_dfh_MEAN = Rad_df_h[(Rad_df_h.index.month == 1) | (Rad_df_h.index.month == 2) | (Rad_df_h.index.month == 3)
| (Rad_df_h.index.month == 4)]
## -- Ciclo diurno horario de cada grupo de meses
Rad_dfh_MAX_CD = Rad_dfh_MAX.groupby(by=[Rad_dfh_MAX.index.hour]).mean()
Rad_dfh_MIN_CD = Rad_dfh_MIN.groupby(by=[Rad_dfh_MIN.index.hour]).mean()
Rad_dfh_MEAN_CD = Rad_dfh_MEAN.groupby(by=[Rad_dfh_MEAN.index.hour]).mean()
x_pos = np.arange(len(Rad_dfh_MAX_CD.index))
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.bar(x_pos, Rad_dfh_MAX_CD ['Radiacias'], align='center', alpha=0.5)
ax1.set_xticks(np.arange(0, 13))
ax1.set_xticklabels(Rad_dfh_MAX_CD .index.values, rotation = 45)
ax1.set_ylabel(u'Radiancia', fontproperties=prop_1)
ax1.set_xlabel(u'Horas del dia', fontproperties=prop_1)
ax1.set_title('MAX', fontweight = "bold", fontproperties = prop)
ax2 = fig.add_subplot(1, 3, 2)
ax2.bar(x_pos, Rad_dfh_MIN_CD['Radiacias'], align='center', alpha=0.5)
ax2.set_xticks(np.arange(0, 13))
ax2.set_xticklabels(Rad_dfh_MIN_CD.index.values, rotation = 45)
ax2.set_ylabel(u'Radiancia', fontproperties=prop_1)
ax2.set_xlabel(u'Horas del dia', fontproperties=prop_1)
ax2.set_title(r'MIN', fontweight = "bold", fontproperties = prop)
ax3 = fig.add_subplot(1, 3, 3)
ax3.bar(x_pos, Rad_dfh_MEAN_CD['Radiacias'], align='center', alpha=0.5)
ax3.set_xticks(np.arange(0, 13))
ax3.set_xticklabels(Rad_dfh_MEAN_CD.index.values, rotation = 45)
ax3.set_ylabel(u'Radiancia', fontproperties=prop_1)
ax3.set_xlabel(u'Horas del dia', fontproperties=prop_1)
ax3.set_title(u'MEAN', fontweight = "bold", fontproperties = prop)
plt.subplots_adjust( wspace=0.3, hspace=0.4)
fig.suptitle(u"CD por escenarios en: " + EstacionLoc, fontsize=15, fontweight = "bold", fontproperties = prop)
plt.savefig('/home/nacorreasa/Maestria/Semestre2/Curso_Rad/CD_C1_Escenarios_'+EstacionLoc+'.png')
plt.show()
## -- Histograma de cada grupo de meses
HistMAX = np.histogram(Rad_dfh_MAX['Rad_norm'].values)
HistMIN = np.histogram(Rad_dfh_MIN['Rad_norm'].values)
HistMEAN = np.histogram(Rad_dfh_MEAN['Rad_norm'].values)
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.hist(Rad_dfh_MAX['Radiacias'].values[~np.isnan(Rad_dfh_MAX['Radiacias'].values)], bins='auto')
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Radiancias', fontproperties=prop_1)
ax1.set_title(u'Máximo', fontweight = "bold", fontproperties = prop)
ax2 = fig.add_subplot(1, 3, 2)
ax2.hist(Rad_dfh_MIN['Radiacias'].values[~np.isnan(Rad_dfh_MIN['Radiacias'].values)], bins='auto')
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Radiancias', fontproperties=prop_1)
ax2.set_title(u'Mínimo', fontweight = "bold", fontproperties = prop)
ax3 = fig.add_subplot(1, 3, 3)
ax3.hist(Rad_dfh_MEAN['Radiacias'].values[~np.isnan(Rad_dfh_MEAN['Radiacias'].values)], bins='auto')
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Radiancias', fontproperties=prop_1)
ax3.set_title(u'Medio', fontweight = "bold", fontproperties = prop)
plt.subplots_adjust( wspace=0.3, hspace=0.4)
fig.suptitle(u"Histograma escenarios de aerosoles en: " + EstacionLoc, fontsize=15, fontweight = "bold", fontproperties = prop)
plt.savefig('/home/nacorreasa/Maestria/Semestre2/Curso_Rad/Hist_C1_Escenarios_'+EstacionLoc+'.png')
plt.show()
## -- Histograma diurno de los aerosoles en este punto
fig = plt.figure(figsize=(14, 18))
for i in range(1, 13):
A = Rad_df_h[Rad_df_h.index.hour == (i + 5)]['Radiacias']
ax = fig.add_subplot(4, 3, i )
ax.set_title('Hora '+str(A.index.hour[0]), fontsize=6)
ax.hist(A.values[~np.isnan(A.values)], bins='auto')
ax.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax.set_xlabel(u'Radiancia', fontproperties=prop_1)
plt.subplots_adjust( wspace=0.3, hspace=0.4)
fig.suptitle(u"Histograma horario 2018 de aerosoles en: " + EstacionLoc, fontsize=15, fontweight = "bold", fontproperties = prop)
plt.savefig('/home/nacorreasa/Maestria/Semestre2/Curso_Rad/HistHora_C1_Trimestre_'+EstacionLoc+'.png')
plt.show()
|
{"hexsha": "cbbb4ca34fa8722dd42e1e6ca687460874a08b2d", "size": 61000, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tesis_Eficiencia_Teorica.py", "max_stars_repo_name": "cmcuervol/Estefania", "max_stars_repo_head_hexsha": "13b564261dfc786b93c77fbc442a568018f87cc9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-13T07:55:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-21T13:36:23.000Z", "max_issues_repo_path": "Tesis_Eficiencia_Teorica.py", "max_issues_repo_name": "cmcuervol/Estefania", "max_issues_repo_head_hexsha": "13b564261dfc786b93c77fbc442a568018f87cc9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tesis_Eficiencia_Teorica.py", "max_forks_repo_name": "cmcuervol/Estefania", "max_forks_repo_head_hexsha": "13b564261dfc786b93c77fbc442a568018f87cc9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.5648854962, "max_line_length": 266, "alphanum_fraction": 0.7201311475, "include": true, "reason": "import numpy,from scipy", "num_tokens": 20404}
|
import sys
import numpy as np
import wave
import pyaudio
import os
import os.path
class Distribution(dict):
def __missing__(self, key):
# if missing, return 0
return 0
def renormalize(self):
normalization_constant = sum(self.values())
assert normalization_constant > 0, "Sum of probabilities is 0"
for key in self.keys():
self[key] /= normalization_constant
def load_wav(filepath, t_start=0, t_end=sys.maxsize, only_22k=True):
"""Load a wave file, which must be 22050Hz and 16bit and must be either
mono or stereo.
Inputs:
filepath: audio file
t_start, t_end: (optional) subrange of file to load (in seconds)
only_22k: if True (default), assert if sample rate is different from 22050.
Returns:
a numpy floating-point array with a range of [-1, 1]
"""
wf = wave.open(filepath)
num_channels, sampwidth, fs, end, comptype, compname = wf.getparams()
# for now, we will only accept 16 bit files at 22k
assert(sampwidth == 2)
# assert(fs == 22050)
# start frame, end frame, and duration in frames
f_start = int(t_start * fs)
f_end = min(int(t_end * fs), end)
frames = f_end - f_start
wf.setpos(f_start)
raw_bytes = wf.readframes(frames)
# convert raw data to numpy array, assuming int16 arrangement
samples = np.fromstring(raw_bytes, dtype = np.int16)
# convert from integer type to floating point, and scale to [-1, 1]
samples = samples.astype(np.float)
samples *= (1 / 32768.0)
if num_channels == 1:
return samples
elif num_channels == 2:
return 0.5 * (samples[0::2] + samples[1::2])
else:
raise('Can only handle mono or stereo wave files')
def play_wav(filepath):
# define stream chunk
chunk = 1024
# open a wav format music
f = wave.open(filepath, "rb")
# instantiate PyAudio
p = pyaudio.PyAudio()
# open stream
stream = p.open(format=p.get_format_from_width(f.getsampwidth()),
channels=f.getnchannels(),
rate=f.getframerate(),
output=True)
# read data
data = f.readframes(chunk)
# play stream
while data:
stream.write(data)
data = f.readframes(chunk)
# stop stream
stream.stop_stream()
stream.close()
# close PyAudio
p.terminate()
def play_wav_data(wav_data):
wav_data = np.array(wav_data).astype(np.float32).tostring()
# define stream chunk
chunk = 1024
offset = 0
# instantiate PyAudio
p = pyaudio.PyAudio()
# open stream
stream = p.open(format=pyaudio.paFloat32,
channels=1,
rate=22050,
output=True)
# read data
data = wav_data[offset:offset+chunk]
# play stream
while offset+chunk <= len(wav_data)-1:
stream.write(data)
offset += chunk
data = wav_data[offset:offset+chunk]
# stop stream
stream.stop_stream()
stream.close()
# close PyAudio
p.terminate()
def get_directory_files(dirpath, file_ext=None):
'''Return all files in a directory
Inputs:
dirpath: directory name
file_ext: (optional) only return files ending with that extension.
'''
files = sorted(os.listdir(dirpath))
return [os.path.join(dirpath, f) for f in files if file_ext == None or f.endswith(file_ext)]
def kl_div(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete distributions
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions.
"""
p = np.asarray(p, dtype=np.float) + 1e-5
q = np.asarray(q, dtype=np.float) + 1e-5
return np.sum(p * np.log(p / q))
if __name__ == '__main__':
# path = "/home/josh/Music/cant_sleep_love_pentatonix.wav"
path = "/home/josh/Documents/school/senior/6.804/project/music-perception-mcmc/resources/keys_wav/60.wav"
play_wav(path)
|
{"hexsha": "b57b52c33d88d0288953db4d53a68e049ef3dd6b", "size": 4015, "ext": "py", "lang": "Python", "max_stars_repo_path": "util.py", "max_stars_repo_name": "jhell96/music-perception-mcmc", "max_stars_repo_head_hexsha": "327ad9d15ccfda72c25efd370041fedf28686141", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-14T01:36:16.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-12T10:06:57.000Z", "max_issues_repo_path": "util.py", "max_issues_repo_name": "jhell96/music-perception-mcmc", "max_issues_repo_head_hexsha": "327ad9d15ccfda72c25efd370041fedf28686141", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util.py", "max_forks_repo_name": "jhell96/music-perception-mcmc", "max_forks_repo_head_hexsha": "327ad9d15ccfda72c25efd370041fedf28686141", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4144736842, "max_line_length": 109, "alphanum_fraction": 0.6264009963, "include": true, "reason": "import numpy", "num_tokens": 1043}
|
[STATEMENT]
lemma authKeysI:
"Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key K, Agent Tgs, Number Ta\<rbrace>,
Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key K, Number Ta\<rbrace> \<rbrace> \<in> set evs
\<Longrightarrow> K \<in> authKeys evs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key K, Agent Tgs, Number Ta\<rbrace>, Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key K, Number Ta\<rbrace>\<rbrace> \<in> set evs \<Longrightarrow> K \<in> authKeys evs
[PROOF STEP]
by (auto simp add: authKeys_def)
|
{"llama_tokens": 227, "file": null, "length": 1}
|
# Autogenerated wrapper script for Zellij_jll for i686-linux-gnu
export zellij
JLLWrappers.@generate_wrapper_header("Zellij")
JLLWrappers.@declare_executable_product(zellij)
function __init__()
JLLWrappers.@generate_init_header()
JLLWrappers.@init_executable_product(
zellij,
"bin/zellij",
)
JLLWrappers.@generate_init_footer()
end # __init__()
|
{"hexsha": "28761b1f615726d82097573b4c02a60e56f24a24", "size": 380, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wrappers/i686-linux-gnu.jl", "max_stars_repo_name": "JuliaBinaryWrappers/Zellij_jll.jl", "max_stars_repo_head_hexsha": "0730f1730fc6707c6a401a9968b2d46cdb24be7e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-09T06:31:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T06:31:20.000Z", "max_issues_repo_path": "src/wrappers/i686-linux-gnu.jl", "max_issues_repo_name": "JuliaBinaryWrappers/Zellij_jll.jl", "max_issues_repo_head_hexsha": "0730f1730fc6707c6a401a9968b2d46cdb24be7e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wrappers/i686-linux-gnu.jl", "max_forks_repo_name": "JuliaBinaryWrappers/Zellij_jll.jl", "max_forks_repo_head_hexsha": "0730f1730fc6707c6a401a9968b2d46cdb24be7e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3333333333, "max_line_length": 64, "alphanum_fraction": 0.7552631579, "num_tokens": 97}
|
# implementation of iWare-E for PAWS
# Lily Xu
# May 2019
import sys
import time
import pickle
import pandas as pd
import numpy as np
from scipy.optimize import minimize
from sklearn import metrics
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn import tree
from sklearn.svm import LinearSVC, SVC
from sklearn.gaussian_process import GaussianProcessRegressor
# from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from imblearn.ensemble import BalancedBaggingClassifier
from sklearn.gaussian_process.kernels import RBF
from itertools import product
from gpc import GaussianProcessClassifier
NUM_COLS_TO_SKIP = 6 # number of extraneous columns in 'x' features CSV file
POSITIVE_LABEL = 1 # how a positive label is encoded in the data
RANDOM_SEED = None # could be None
N_JOBS = 1 # -1 to use max
# parameters for bagging classifier
NUM_ESTIMATORS = 32 #32 #50
MAX_SAMPLES = 0.8
MAX_FEATURES = .5
# verbose output if == 1
VERBOSE = 0
###########################################################
# modify GPR to serve as a classifier
# and offer variance results
###########################################################
def gpr_predict_proba(self, x, return_var=False):
mean_r, std = self.predict(x, return_std=True)
prob = 1 / (1 + np.exp(mean_r - 0.5))
prob = prob.reshape(-1, 1)
# form array with predictions for both classes
predictions = np.concatenate((prob, 1 - prob), axis=1)
if return_var:
var = [x**2 for x in std]
return predictions, var
else:
return predictions
# def gpr_get_var(self, x):
# _, std = self.predict(x, return_std=True)
#
# return [x**2 for x in std]
GaussianProcessRegressor.predict_proba = gpr_predict_proba
# GaussianProcessRegressor.get_var = gpr_get_var
def rf_predict_proba(self, x, return_var=False, train_x=None):
predictions = self.predict_proba_orig(x)
import forestci as fci
if return_var:
assert train_x is not None
var = fci.random_forest_error(self, train_x, x)
return predictions, var
else:
return predictions
RandomForestClassifier.predict_proba_orig = RandomForestClassifier.predict_proba
RandomForestClassifier.predict_proba = rf_predict_proba
###########################################################
# utility functions
###########################################################
# given training and testing sets, normalize data to zero mean, unit variance
def normalize_data(train, test):
scaler = StandardScaler()
# fit only on training data
scaler.fit(train)
# apply normalization to training and test data
train = scaler.transform(train)
test = scaler.transform(test)
return train, test
# by maximizing F1 score?
def determine_threshold(label, predict_test_pos_probs, num_thresholds=50):
# TODO: previously, used tpr-(1-fpr)
# fpr, tpr, thresholds = metrics.roc_curve(label, predict_test_pos_probs, pos_label=POSITIVE_LABEL)
# or maybe scaled, like 2*tpr - (1-fpr)?
thresholds = np.linspace(0, 1, num_thresholds)
f1 = np.zeros(thresholds.size)
precision = np.zeros(thresholds.size)
recall = np.zeros(thresholds.size)
auprc = np.zeros(thresholds.size)
for i in range(num_thresholds):
predict_labels = predict_test_pos_probs > thresholds[i]
predict_labels = predict_labels.astype(int)
f1[i] = metrics.f1_score(label, predict_labels)
precision[i] = metrics.precision_score(label, predict_labels, pos_label=POSITIVE_LABEL)
recall[i] = metrics.recall_score(label, predict_labels, pos_label=POSITIVE_LABEL)
precision_vals, recall_vals, _ = metrics.precision_recall_curve(label, predict_test_pos_probs, pos_label=POSITIVE_LABEL)
auprc[i] = metrics.auc(recall_vals, precision_vals)
if VERBOSE:
print('threshold: {:.4f} | f1: {:.4f}, precision: {:.4f}, recall: {:.4f}, AUPRC: {:.4f}'.format(thresholds[i], f1[i], precision[i], recall[i], auprc[i]))
# opt = np.argmax(f1)
opt = np.argmax(auprc)
print('optimal threshold {:.4f}, with f1 {:.4f}, precision {:.4f}, recall {:.4f}, AUPRC {:.4f}'.format(thresholds[opt], f1[opt], precision[opt], recall[opt], auprc[opt]))
return thresholds[opt]
# evaluate the ML model on the test set by print all relevant metrics for the test set
def evaluate_results(test_y, predict_test_pos_probs):
output = []
# compute optimal threshold and determine labels
opt_threshold = determine_threshold(test_y, predict_test_pos_probs)
predict_test = (predict_test_pos_probs > opt_threshold).astype(int)
predict_test_neg_probs = np.ones(predict_test_pos_probs.shape) - predict_test_pos_probs
predict_test_probs = np.concatenate((predict_test_neg_probs.reshape(1,-1), predict_test_pos_probs.reshape(1,-1)), axis=0).transpose()
# select the prediction column with probability of assigned label
predict_test_label_probs = predict_test_probs[[i for i in range(predict_test.shape[0])], tuple(predict_test)]
fpr, tpr, _ = metrics.roc_curve(test_y, predict_test_pos_probs, pos_label=POSITIVE_LABEL)
output.append('AUC: {:.5f}'.format(metrics.auc(fpr, tpr)))
precision_vals, recall_vals, _ = metrics.precision_recall_curve(test_y, predict_test_pos_probs, pos_label=POSITIVE_LABEL)
output.append('AUPRC: {:.5f}'.format(metrics.auc(recall_vals, precision_vals))) # area under precision-recall curve
#output.append('average precision score: {:.5f}'.format(metrics.average_precision_score(test_y, predict_test_pos_probs, pos_label=POSITIVE_LABEL)))
output.append('precision: {:.5f}'.format(metrics.precision_score(test_y, predict_test, pos_label=POSITIVE_LABEL)))
recall = metrics.recall_score(test_y, predict_test, pos_label=POSITIVE_LABEL)
output.append('recall: {:.5f}'.format(recall))
output.append('F1 score: {:.5f}'.format(metrics.f1_score(test_y, predict_test, pos_label=POSITIVE_LABEL)))
percent_positive = np.where(predict_test == POSITIVE_LABEL)[0].shape[0] / predict_test.shape[0]
l_and_l = recall ** 2 / percent_positive
max_ll = 1 / (test_y.sum() / test_y.shape[0])
output.append('L&L %: {:.5f} ({:.5f} / {:.5f})'.format(100 * (l_and_l / max_ll), l_and_l, max_ll))
output.append('cross entropy: {:.5f}'.format(metrics.log_loss(test_y, predict_test_probs)))
output.append('average prediction probability: {:.5f}'.format(predict_test_label_probs.mean()))
output.append('accuracy: {:.5f}'.format(metrics.accuracy_score(test_y, predict_test)))
output.append('cohen\'s kappa: {:.5f}'.format(metrics.cohen_kappa_score(test_y, predict_test))) # measures inter-annotator agreement
output.append('F-beta score: {:.5f}'.format(metrics.fbeta_score(test_y, predict_test, 2, pos_label=POSITIVE_LABEL))) # commonly .5, 1, or 2. if 1, then same as f1
return '\n'.join(output)
###########################################################
# setup data
###########################################################
def setup_data(x_filename, y_filename):
# read in features
features_raw = pd.read_csv(x_filename)
features_raw.drop(columns=features_raw.columns[0], inplace=True)
patrol_effort = features_raw['current_patrol_effort'].values
section_col = features_raw['section'].values
year_col = features_raw['year'].values
# features_raw.drop(columns=['temp', 'precip'], inplace=True)
# don't use current_patrol_effort as a feature
features_raw.drop(columns='current_patrol_effort', inplace=True)
# read in labels
labels_raw = pd.read_csv(y_filename)
labels_raw.drop(columns=labels_raw.columns[0], inplace=True)
features = features_raw.values[:, NUM_COLS_TO_SKIP:]
labels = labels_raw.values[:, NUM_COLS_TO_SKIP]
feature_names = list(features_raw.columns)[NUM_COLS_TO_SKIP:]
print('feature names {}'.format(feature_names))
return features_raw, features, feature_names, labels, patrol_effort, section_col, year_col
###########################################################
# iWare-E class
###########################################################
class iWare:
def __init__(self, method, num_classifiers, park, year):
self.method = method
self.num_classifiers = num_classifiers
self.park = park
self.year = year
self.patrol_thresholds = None
self.classifiers = None
self.weights = None # weights for classifiers
self.train_x_norm = None # normalized numpy array of train_x
# get classifier used as base estimator in bagging classifier
def get_base_estimator(self):
if self.method == 'gp':
# kernel = ConstantKernel(1e-20, (1e-25, 1e-15))* RBF(length_scale=1)
kernel = 1.0 * RBF(length_scale=1.0)
#kernel = 1.0 * RBF(length_scale=20.0)
# look at Matern kernel?
# ********
# Aaron suggests printing out length scale
#kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-05, 1e5))
# optimizer=None to keep kernel parameters in place
# n_restarts_optimizer=5,
base_estimator = GaussianProcessClassifier(kernel=kernel, random_state=RANDOM_SEED, warm_start=True, max_iter_predict=100, n_jobs=-1)
# base_estimator = GaussianProcessRegressor(kernel=kernel, random_state=RANDOM_SEED, n_restarts_optimizer=0, normalize_y=True)
elif self.method == 'svm':
base_estimator = SVC(gamma='auto', random_state=RANDOM_SEED)
elif self.method == 'linear-svc':
base_estimator = LinearSVC(max_iter=5000, random_state=RANDOM_SEED)
elif self.method == 'dt':
base_estimator = tree.DecisionTreeClassifier(random_state=RANDOM_SEED)
else:
raise Exception('method \'{}\' not recognized'.format(self.method))
return base_estimator
# get overall classifier to use
def get_classifier(self, use_balanced):
if self.method == 'rf':
return RandomForestClassifier(n_estimators=NUM_ESTIMATORS,
criterion='gini', max_depth=None, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0,
max_features=MAX_FEATURES, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
bootstrap=True, oob_score=False, n_jobs=N_JOBS,
random_state=RANDOM_SEED, verbose=VERBOSE,
warm_start=False, class_weight=None)
# return RandomForestRegressor(n_estimators=NUM_ESTIMATORS,
# criterion='mse', max_depth=None, min_samples_split=2,
# min_samples_leaf=1, min_weight_fraction_leaf=0.0,
# max_features=MAX_FEATURES, max_leaf_nodes=None,
# min_impurity_decrease=0.0, min_impurity_split=None,
# bootstrap=True, oob_score=False,
# n_jobs=N_JOBS, random_state=RANDOM_SEED,
# verbose=VERBOSE, warm_start=False)
base_estimator = self.get_base_estimator()
# GPs don't need a bagging classifier
# return base_estimator
if self.method == 'gp':
return base_estimator
# balanced bagging classifier used for datasets with strong label imbalance
# (e.g. SWS in Cambodia)
elif use_balanced:
return BalancedBaggingClassifier(base_estimator=base_estimator,
n_estimators=NUM_ESTIMATORS, max_samples=MAX_SAMPLES,
max_features=MAX_FEATURES,
bootstrap=True, bootstrap_features=False,
oob_score=False, warm_start=False,
sampling_strategy='majority', #sampling_strategy=0.8,
replacement=True, n_jobs=N_JOBS,
random_state=RANDOM_SEED, verbose=VERBOSE)
# non-balanced bagging classifier used for other datasets
else:
return BaggingClassifier(base_estimator=base_estimator,
n_estimators=NUM_ESTIMATORS, max_samples=MAX_SAMPLES,
max_features=MAX_FEATURES,
bootstrap=True, bootstrap_features=False,
oob_score=False, warm_start=False, n_jobs=N_JOBS,
random_state=RANDOM_SEED, verbose=VERBOSE)
###########################################################
# classification
###########################################################
def get_patrol_thresholds(self, train_effort):
patrol_threshold_percentile = np.linspace(0, 100, self.num_classifiers, endpoint=False)
patrol_thresholds = np.percentile(train_effort, patrol_threshold_percentile)
print('percentiles {}'.format(patrol_threshold_percentile))
print('patrol thresholds {}'.format(patrol_thresholds))
return patrol_thresholds
def get_vote_matrix(self):
vote_power = np.identity(self.num_classifiers) # identity matrix
# vote_power = np.tril(np.ones((self.num_classifiers, self.num_classifiers))) # lower triangle
# vote_power = np.triu(np.ones((self.num_classifiers, self.num_classifiers))) # upper triangle
# build triangular vote qualification matrix
# vote_qual = np.triu(np.ones((self.num_classifiers, self.num_classifiers)))
vote_qual = np.ones((self.num_classifiers, self.num_classifiers))
# create combined vote matrix
vote_combine = np.multiply(vote_power, vote_qual)
# normalize column-wise
vote_combine = vote_combine / vote_combine.sum(1)[:,None]
return vote_combine
# train a set of classifiers using provided data
def train_classifiers(self, train_x, train_y, train_effort, use_balanced):
classifiers = []
for i in range(self.num_classifiers):
#### filter data
# get training data for this threshold
idx = np.where(np.logical_or(train_effort >= self.patrol_thresholds[i], train_y == POSITIVE_LABEL))[0]
if i > 0 and self.patrol_thresholds[i] == self.patrol_thresholds[i-1]:
print('threshold {} same as previous, value {}. skipping'.format(i, self.patrol_thresholds[i]))
classifiers.append(None)
continue
if idx.size == 0:
print('no training points found for classifier {}, threshold = {}'.format(i, self.patrol_thresholds[i]))
classifiers.append(None)
continue
train_x_filter = train_x[idx, :]
train_y_filter = train_y[idx]
print('filtered data: {}. num positive labels {}'.format(train_x_filter.shape, np.sum(train_y_filter)))
if np.sum(train_y_filter) == 0:
print('no positive labels in this subset of the training data. skipping classifier {}'.format(i))
classifiers.append(None)
continue
# select and train a classifier
classifier = self.get_classifier(use_balanced)
print('classifier {}, threshold {}, num x {}'.format(i, self.patrol_thresholds[i], train_x_filter.shape))
start_time = time.time()
# fit training data
classifier.fit(train_x_filter, train_y_filter)
print(' train time: {:.2f} seconds, score: {:.5f}'.format(
time.time() - start_time,
classifier.score(train_x_filter, train_y_filter)))
classifiers.append(classifier)
print('-------------------------------------------')
return classifiers
# given a set of trained classifiers, compute optimal weights
def get_classifier_weights(self, classifiers, reserve_x, reserve_y):
# test classifiers on all data points
predictions = []
for i in range(self.num_classifiers):
if classifiers[i] is None:
predictions.append(np.zeros(reserve_y.shape))
continue
curr_predictions = classifiers[i].predict(reserve_x)
predictions.append(curr_predictions)
predictions = np.array(predictions).transpose()
# define loss function
def evaluate_ensemble(weights):
# ensure we don't get NaN values
if np.isnan(weights).any():
return 1e9
weighted_predictions = np.multiply(predictions, weights)
weighted_predictions = np.sum(weighted_predictions, axis=1)
score = metrics.log_loss(reserve_y, weighted_predictions)
return score
# evaluate score
# auprc = metrics.average_precision_score(reserve_y, weighted_predictions, pos_label=POSITIVE_LABEL)
#
# # pass in negative to minimize
# return -auprc
# constrain weights to sum to 1
cons = ({'type': 'eq', 'fun': lambda w: 1 - sum(w)})
# bound weights to be between 0 and 1
bounds = [(0,1)] * self.num_classifiers
# random restarts with random initial weights
#best_weights = np.ones(self.num_classifiers) / self.num_classifiers # default: equal weights
best_weights = None
best_score = 1e9
# ensure we have enough positive samples
unique_vals, unique_counts = np.unique(reserve_y, return_counts=True)
unique_dict = dict(zip(unique_vals, unique_counts))
if VERBOSE:
print(unique_dict)
if 1 not in unique_dict or unique_dict[1] < 5:
print(' not enough positive labels. skipping')
return best_weights
for _ in range(10):
w = np.random.rand(self.num_classifiers)
w = w / np.sum(w)
res = minimize(evaluate_ensemble, w, method='SLSQP', bounds=bounds, constraints=cons)
if res.fun < best_score:
best_weights = res.x
best_score = res.fun
if VERBOSE:
print('best score {}, weights {}'.format(best_score, np.around(best_weights, 3)))
return best_weights
def train_iware(self, all_train_x, all_train_y, all_train_effort, use_balanced=False, nsplits=5):
self.patrol_thresholds = self.get_patrol_thresholds(all_train_effort)
print('shape x', all_train_x.shape)
print('shape y', all_train_y.shape)
print('shape train_effort', all_train_effort.shape)
# print('k-fold cross validation, k = {}'.format(nsplits))
# skf = StratifiedKFold(nsplits, shuffle=True)
#
# all_weights = np.zeros((nsplits, self.num_classifiers, self.num_classifiers))
#
#
#
# # reserve some test data as validation set
# # to assign weights to classifiers
# k = 0
# for train_index, reserve_index in skf.split(all_train_x, all_train_y):
# train_x = all_train_x[train_index]
# train_y = all_train_y[train_index]
# train_effort = all_train_effort[train_index]
#
# reserve_x = all_train_x[reserve_index]
# reserve_y = all_train_y[reserve_index]
# reserve_effort = all_train_effort[reserve_index]
#
#
# print('-------------------------------------------')
# print('training classifiers with limited train data, k = {}'.format(k))
# print('-------------------------------------------')
#
# classifiers = self.train_classifiers(train_x, train_y, train_effort, use_balanced)
#
#
# print('-------------------------------------------')
# print('finding weights for classifiers')
# print('-------------------------------------------')
#
# # ----------------------------------------------
# # find appropriate weights for classifiers
# # ----------------------------------------------
# for i in range(self.num_classifiers):
# #### filter data
# # find points within specified threshold
# if i == 0:
# idx = np.where(reserve_effort < self.patrol_thresholds[i+1])[0]
# elif i == self.num_classifiers - 1:
# idx = np.where(self.patrol_thresholds[i] <= reserve_effort)[0]
# else:
# idx = np.where(np.logical_and(self.patrol_thresholds[i] <= reserve_effort, reserve_effort < self.patrol_thresholds[i+1]))[0]
#
# filter_x = reserve_x[idx]
# filter_y = reserve_y[idx]
# print('classifier {}: {} points, {} positive'.format(i, filter_x.shape[0], np.count_nonzero(filter_y == POSITIVE_LABEL)))
# weights = self.get_classifier_weights(classifiers, filter_x, filter_y)
#
# # if weights were not set, assign classifier weight to just 1 at classifier location (corresponding to the matrix diagonal)
# if weights is None:
# weights = np.zeros(self.num_classifiers)
# weights[i] = 1
#
# all_weights[k, i, :] = weights
#
# k += 1
#
# # average all classifier weights
# self.weights = all_weights.mean(0)
# print('weights: ', np.around(self.weights, 4))
# self.weights = np.eye(self.num_classifiers)
self.weights = self.get_vote_matrix()
print('-------------------------------------------')
print('training classifiers with all train data')
print('-------------------------------------------')
self.classifiers = self.train_classifiers(all_train_x, all_train_y, all_train_effort, use_balanced)
# TODO: does this need to be moved?
# need train_x later for random forest variance
if self.method == 'rf':
self.train_x_norm = np.copy(all_train_x)
def test_iware(self, test_x, test_y, test_effort, output_path):
if self.patrol_thresholds is None:
raise ValueError('No patrol thresholds. test_iware() may not have been called.')
if self.classifiers is None:
raise ValueError('No classifiers. test_iware() may not have been called.')
if self.weights is None:
raise ValueError('No weights. test_iware() may not have been called.')
for i in range(len(self.weights)):
print('classifier {}, weights {}, sum {}'.format(i, np.around(self.weights[i], 4), self.weights[i].sum()))
# # test classifiers on all data points
# predictions = []
# for i in range(self.num_classifiers):
# if self.classifiers[i] is None:
# # predictions.append(None)
# predictions.append(np.zeros(test_x.shape))
# continue
#
# curr_predictions = self.classifiers[i].predict(test_x)
# predictions.append(curr_predictions)
# predictions = np.array(predictions).transpose()
#
# weighted_predictions = np.multiply(predictions, self.weights)
# weighted_predictions = np.sum(weighted_predictions, axis=1)
#
# evaluate_results(test_y, weighted_predictions)
#
# return
###########
# predicted probability of illegal activity observation on each data point
num_test = test_y.shape[0]
weighted_predictions = np.zeros(num_test)
weighted_variances = np.zeros(num_test)
all_predictions = np.zeros((num_test, self.num_classifiers))
if self.method == 'gp' or self.method == 'rf':
all_variances = np.zeros((num_test, self.num_classifiers))
# TODO: can i do this portion more efficiently?
# compute the classification interval for each point
classification = np.zeros(num_test)
for i in range(num_test):
smaller_thresholds = np.where(test_effort[i] > self.patrol_thresholds)[0]
# patrol effort at this point may be less than all threshold values
if len(smaller_thresholds) == 0:
classification[i] = 0
else:
classification[i] = smaller_thresholds[-1]
classification = classification.astype(int)
for i in range(self.num_classifiers):
if self.classifiers[i] is None:
print('classifier {} is none; skipping'.format(i))
continue
start_time = time.time()
# compute variance
if self.method == 'gp' or self.method == 'rf':
if self.method == 'rf':
assert self.train_x_norm is not None
curr_predictions, curr_variances = self.classifiers[i].predict_proba(test_x, return_var=True, train_x=self.train_x_norm)
elif self.method == 'gp':
# curr_predictions, curr_variances = self.classifiers[i].predict_proba(test_x, return_var=True)
curr_predictions = self.classifiers[i].predict_proba(test_x)
curr_variances = self.classifiers[i].predict_var(test_x)
# curr_variances = curr_variances[:, 1]
# normalize variance values
curr_variances = curr_variances - np.min(curr_variances)
curr_variances = curr_variances / np.max(curr_variances)
all_variances[:, i] = curr_variances
# this method doesn't allow variance :(
else:
curr_predictions = self.classifiers[i].predict_proba(test_x)
curr_predictions = curr_predictions[:, 1] # probability of positive label
all_predictions[:, i] = curr_predictions
# TODO: make more efficient!
multiplier = np.zeros(num_test)
for j in range(num_test):
multiplier[j] = self.weights[classification[j]][i]
# scale increase by the multiplier for each data point
weighted_predictions += np.multiply(curr_predictions, multiplier)
if self.method == 'gp' or self.method == 'rf':
weighted_variances += np.multiply(curr_variances, multiplier)
print(' classifier {}, test time {:.3f}'.format(i, time.time() - start_time))
# save out predictions to CSV
print(' save out predictions...')
predictions_df = pd.DataFrame(data=all_predictions, columns=['threshold={}'.format(thresh) for thresh in self.patrol_thresholds])
predictions_out = '{}/{}_{}_predictions.csv'.format(output_path, self.method, self.num_classifiers)
print(' {}'.format(predictions_out))
predictions_df.to_csv(predictions_out)
# save out variances to CSV
if self.method == 'gp' or self.method == 'rf':
print(' save out variances...')
variances_df = pd.DataFrame(data=all_variances, columns=['threshold={}'.format(thresh) for thresh in self.patrol_thresholds])
variances_df.to_csv('{}/{}_{}_variances.csv'.format(output_path, self.method, self.num_classifiers))
combined_df = pd.DataFrame({'predictions': weighted_predictions, 'variances': weighted_variances})
combined_df.to_csv('{}/{}_{}_weighted_pred_var.csv'.format(output_path, self.method, self.num_classifiers))
### evaluate
results = evaluate_results(test_y, weighted_predictions)
print(results)
f = open('{}/{}_{}.txt'.format(output_path, self.method, self.num_classifiers), 'w')
f.write('park {}, test year {}\n'.format(self.park, self.year))
f.write('method {}, num_classifiers {}\n'.format(self.method, self.num_classifiers))
f.write('thresholds {}\n'.format(self.patrol_thresholds))
f.write('\n\n')
f.write(results)
f.write('\n\n\n')
f.write('weights\n{}\n'.format(np.around(self.weights, 5)))
f.close()
pickle_data = {'park': self.park,
'num_classifiers': self.num_classifiers, 'method': self.method,
#'classifiers': self.classifiers,
'weights': self.weights,
'thresholds': self.patrol_thresholds,
'predictions': weighted_predictions,
'results': results
}
pickle.dump(pickle_data, open('{}/{}_{}.p'.format(output_path, self.method, self.num_classifiers), 'wb'))
# # display performance on only first classifier
# # only using the first is the same as no iWare-E ensembling
# print('-------------------------------------------')
# print('testing - only first classifier')
# print('-------------------------------------------')
#
# predict_test_probs = classifiers[0].predict_proba(test_x)
# predict_test_pos_probs = predict_test_probs[:,1]
# evaluate_results(test_y, predict_test_pos_probs)
#
# # write out predictions to file
# predict_out = pd.DataFrame(data={'predictions': predict_test_pos_probs, 'labels': test_y})
# predict_out.to_csv('output/test_predictions_{}_{}_method_{}.csv'.format(self.park, TEST_YEAR, self.method))
###########################################################
# run train/test code to evaluate predictive models
###########################################################
# prepare data: split into train/test and normalize
def train_test_split_by_year(self, features, labels, patrol_effort, year_col, test_year, test_section=None, section_col=None):
# specifying the section is optional
if test_section is not None:
assert section_col is not None
if test_section:
# just one section of test data
train_idx = np.where(np.logical_or(year_col < test_year, section_col < test_section))[0]
test_idx = np.where(np.logical_and(year_col == test_year, section_col == test_section))[0]
else:
# full year of test data
train_idx = np.where(year_col < test_year)[0]
test_idx = np.where(year_col == test_year)[0]
train_x = features[train_idx, :]
train_y = labels[train_idx]
train_effort = patrol_effort[train_idx]
test_x = features[test_idx, :]
test_y = labels[test_idx]
test_effort = patrol_effort[test_idx]
train_x, test_x = normalize_data(train_x, test_x)
print('train x, y', train_x.shape, train_y.shape)
print('test x, y ', test_x.shape, test_y.shape)
print('patrol effort train, test ', train_effort.shape, test_effort.shape)
return train_x, test_x, train_y, test_y, train_effort, test_effort
###########################################################
# iWare-E for predicting future risk
###########################################################
# use all provided data to make predictions
def make_predictions(self, predict_section, features_raw, features, feature_names,
labels, patrol_effort, section_col, input_static_feats, output_path,
test_temp=None, test_precip=None, gpp_filename=None):
print('time to make some predictions!')
predict_year = self.year
# ----------------------------------------------
# get training data
# ----------------------------------------------
# use all data before specified (predict_year, predict_section)
train_idx = np.where(np.logical_or(features_raw['year'] < predict_year,
np.logical_and(features_raw['year'] == predict_year, features_raw['section'] < predict_section)))[0]
print(' features shape', features_raw.shape)
print(' train_dx ', len(train_idx), train_idx)
train_x = features[train_idx, :]
train_y = labels[train_idx]
train_patrol_effort = patrol_effort[train_idx]
# ----------------------------------------------
# get data to predict on
# ----------------------------------------------
if predict_section == 0:
prev_year = predict_year - 1
num_section = np.max(section_col)
print(' num section', num_section)
prev_section = num_section
else:
prev_year = predict_year
prev_section = predict_section - 1
print(' test section: year {}, section {}'.format(predict_year, predict_section))
print(' prev section: year {}, section {}'.format(prev_year, prev_section))
# ----------------------------------------------
# set up data arrays
# ----------------------------------------------
# get past patrol effort for the test section
prev_section_idx = np.where(np.logical_and(features_raw['year'] == prev_year, features_raw['section'] == prev_section))
past_patrol_effort = patrol_effort[prev_section_idx]
prev_section_spatial_id = features_raw['spatial_id'].values[prev_section_idx]
patrol_effort_df = pd.DataFrame({'spatial_id': prev_section_spatial_id,
'past_patrol_effort': past_patrol_effort})
# get all static features
static_features = pd.read_csv(input_static_feats)
static_features.drop(columns=static_features.columns[0], inplace=True)
# create features array and add in past_patrol_effort
predict_x_df = static_features.join(patrol_effort_df.set_index('spatial_id'), on='spatial_id', how='left')
predict_x_df['past_patrol_effort'].fillna(0, inplace=True)
# add climate info
if test_temp is not None and test_precip is not None:
predict_x_df['temp'] = test_temp * np.ones(static_features.shape[0])
predict_x_df['precip'] = test_precip * np.ones(static_features.shape[0])
# add GPP info
if gpp_filename is not None:
new_gpp = pd.read_csv('../preprocess_consolidate/belum_traponly_combined/1000/output/all_3month/GPP_2019_0.csv')
predict_x_df['gpp'] = new_gpp['2019-0']
# arrange columns to match training data
store_columns = predict_x_df[['spatial_id', 'x', 'y']]
predict_x_df.drop(columns=['spatial_id', 'x', 'y'], inplace=True)
predict_x_df = predict_x_df[feature_names]
predict_x = predict_x_df.values
# normalize data
train_x, predict_x = normalize_data(train_x, predict_x)
# ----------------------------------------------
# train classifiers
# ----------------------------------------------
print('training classifiers on {} points...'.format(train_x.shape))
train_start_time = time.time()
classifiers = self.train_iware(train_x, train_y, train_patrol_effort)
total_train_time = time.time() - train_start_time
print('total train time {:.3f}'.format(total_train_time))
# ----------------------------------------------
# run classifiers to get set of predictions
# ----------------------------------------------
# intiialize array to store predictions from each classifier
print('making predictions on year {} section {}... {} points'.format(predict_year, predict_section, predict_x.shape))
final_predictions = np.zeros((predict_x.shape[0], self.num_classifiers))
if self.method == 'gp' or self.method == 'rf':
final_variances = np.zeros((predict_x.shape[0], self.num_classifiers))
# make predictions with each classifier
for i in range(self.num_classifiers):
start_time = time.time()
# this classifier had no training points, so we skip it
if self.classifiers[i] is None:
final_predictions[:, i] = np.zeros((final_predictions.shape[0]))
continue
if self.method == 'gp' or self.method == 'rf':
if self.method == 'rf':
curr_predictions, curr_variances = self.classifiers[i].predict_proba(predict_x, return_var=True, train_x=train_x)
else:
# curr_predictions, curr_variances = self.classifiers[i].predict_proba(predict_x, return_var=True)
curr_predictions = self.classifiers[i].predict_proba(predict_x)
curr_variances = self.classifiers[i].predict_var(predict_x)
# curr_variances = curr_variances[:, 1]
print('variance min {} max {}'.format(np.min(curr_variances), np.max(curr_variances)))
# normalize variance values
# curr_variances = curr_variances - np.min(curr_variances)
# curr_variances = curr_variances / np.max(curr_variances)
final_variances[:, i] = curr_variances
else:
curr_predictions = self.classifiers[i].predict_proba(predict_x)
curr_predictions = curr_predictions[:, 1] # probability of positive label
final_predictions[:, i] = curr_predictions
# predict_x_df.to_csv('predict_x.csv', encoding='utf-16')
# np.savetxt('predict_x_norm.csv', predict_x, delimiter=',', encoding='utf-16', fmt='%.3f')
# np.savetxt('train_x.csv', self.train_x_norm, delimiter=',', encoding='utf-16', fmt='%.3e')
# np.savetxt('train_x_float.csv', self.train_x_norm, delimiter=',', encoding='utf-16', fmt='%.3f')
# save out predictions to CSV
print(' save out predictions...')
predictions_df = pd.DataFrame(data=final_predictions, columns=['threshold={}'.format(thresh) for thresh in self.patrol_thresholds])
predictions_df = pd.concat([store_columns, predictions_df], axis=1)
predictions_filename = '{}/predictions_{}_{}_method_{}_{}.csv'.format(output_path, self.park, predict_year, self.method, self.num_classifiers)
print(' {}'.format(predictions_filename))
predictions_df.to_csv(predictions_filename)
# save out variances to CSV
if self.method == 'gp' or self.method == 'rf':
print(' save out variances...')
variances_df = pd.DataFrame(data=final_variances, columns=['threshold={}'.format(thresh) for thresh in self.patrol_thresholds])
variances_df = pd.concat([store_columns, variances_df], axis=1)
variances_df.to_csv('{}/variances_{}_{}_method_{}_{}.csv'.format(output_path, self.park, predict_year, self.method, self.num_classifiers))
return predictions_df
# used for post-hoc analysis of field test data
# (we want to ignore the true data and pretend we don't know it)
def field_test_make_predictions(self, predict_year, predict_section, features, labels, patrol_effort, input_static_feats,
feature_names):
print('time to make some predictions!')
# ----------------------------------------------
# GET TRAINING DATA
# ----------------------------------------------
# get last quarter of patrol effort
predict_mask = np.logical_and(features_raw['year'] == predict_year, features_raw['section'] == predict_section)
predict_train_idx = np.where(np.logical_not(predict_mask))[0]
train_x = features[predict_train_idx, :]
train_patrol_effort = patrol_effort[predict_train_idx]
train_y = labels[predict_train_idx]
# ----------------------------------------------
# GET DATA FOR PREDICTIONS
# ----------------------------------------------
# get indices from available cells at the specified time interval
predict_idx = np.where(predict_mask)[0]
# get past patrol effort for those available cells
predict_spatial_id = features_raw['spatial_id'].values[predict_idx]
predict_patrol_effort = patrol_effort[predict_idx]
patrol_effort_df = pd.DataFrame({'spatial_id': predict_spatial_id, 'past_patrol_effort': predict_patrol_effort})
# get all static features
static_features = pd.read_csv(input_static_feats)
# create features array
predict_x_df = static_features.join(patrol_effort_df.set_index('spatial_id'), on='Var1', how='left')
predict_x_df['past_patrol_effort'].fillna(0, inplace=True)
predict_x_df.drop(columns=['Var1', 'x', 'y'], inplace=True)
# arrange columns to match training data
predict_x_df = predict_x_df[feature_names]
predict_x = predict_x_df.values
# ----------------------------------------------
# normalize data
# ----------------------------------------------
train_x, predict_x = normalize_data(train_x, predict_x)
# ----------------------------------------------
# train classifiers
# ----------------------------------------------
# print('training classifiers on {} points...'.format(predict_train_x.shape))
train_start_time = time.time()
classifiers = self.train_iware(predict_train_x, train_y, train_patrol_effort)
total_train_time = time.time() - train_start_time
print('total train time {:.3f}'.format(total_train_time))
# ----------------------------------------------
# run classifiers to get set of predictions
# ----------------------------------------------
# intiialize array to store predictions from each classifier
print('making predictions on year {} section {}... {} points'.format(predict_year, predict_section, predict_x.shape))
final_predictions = np.zeros((predict_x.shape[0], self.num_classifiers))
# make predictions with each classifier
for i in range(self.num_classifiers):
start_time = time.time()
# this classifier had no training points, so we skip it
if classifiers[i] is None:
final_predictions[:, i] = np.zeros((final_predictions.shape[0]))
continue
curr_predictions = classifiers[i].predict_proba(predict_x)
curr_predictions = curr_predictions[:, 1] # probability of positive label
final_predictions[:, i] = curr_predictions
# save out predictions to CSV
print('save out predictions...')
predictions_df = pd.DataFrame(data=final_predictions, columns=['threshold={}'.format(thresh) for thresh in patrol_thresholds])
# start indexing from 1 to be consistent with other files
predictions_df.index = np.arange(1, len(predictions_df) + 1)
predictions_df.to_csv('predictions_{}_{}_method_{}.csv'.format(self.park, predict_year, self.method)) #float_format='%.4f',
###########################################################
# variation attempts for filtering data
###########################################################
#### filter data
# get training data for this threshold
# # MY MODIFIED APPROACH
# # makes things run faster, and sometimes get decent results
# # only points within threshold interval
# if i == 0:
# idx = np.where(train_effort < patrol_thresholds[i+1])[0]
# elif i == num_classifiers - 1:
# idx = np.where(train_effort >= patrol_thresholds[i])[0]
# else:
# idx = np.where(np.logical_and(train_effort >= patrol_thresholds[i], train_effort < patrol_thresholds[i+1]))[0]
# # points within threshold interval AND all positive points
# if i == 0:
# idx = np.where(np.logical_or(train_effort < patrol_thresholds[i+1], train_y == POSITIVE_LABEL))[0]
# elif i == num_classifiers - 1:
# idx = np.where(np.logical_or(train_effort >= patrol_thresholds[i], train_y == POSITIVE_LABEL))[0]
# else:
# idx = np.where(np.logical_or(np.logical_and(train_effort >= patrol_thresholds[i], train_effort < patrol_thresholds[i+1]), train_y == POSITIVE_LABEL))[0]
# ------------------------------------------------------------------
# this is the original iWare-E approach
# all points above threshold
# if PARK == 'SWS':
# # don't keep positive labels for SWS because of the strong label imbalance
# idx = np.where(train_effort >= patrol_thresholds[i])[0]
# else:
# # AND POINTS WHERE LABEL IS POSITIVE
# idx = np.where(np.logical_or(train_effort >= patrol_thresholds[i], train_y == POSITIVE_LABEL))[0]
###########################################################
# calibration curves
###########################################################
# from calibration_curves import *
# run_all_calibration_curves(train_x, train_y, test_x, test_y)
# sys.exit(0)
|
{"hexsha": "fd924bc61dfe40c5d9c56a1cc38c70fdec605c48", "size": 45033, "ext": "py", "lang": "Python", "max_stars_repo_path": "iware/iware.py", "max_stars_repo_name": "lily-x/PAWS-public", "max_stars_repo_head_hexsha": "32f79d10a1187686f301be447de9f4d0e83cf127", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-01-20T12:56:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-04T19:39:15.000Z", "max_issues_repo_path": "iware/iware.py", "max_issues_repo_name": "lily-x/PAWS-public", "max_issues_repo_head_hexsha": "32f79d10a1187686f301be447de9f4d0e83cf127", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "iware/iware.py", "max_forks_repo_name": "lily-x/PAWS-public", "max_forks_repo_head_hexsha": "32f79d10a1187686f301be447de9f4d0e83cf127", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-17T02:01:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-17T02:01:01.000Z", "avg_line_length": 43.8490749757, "max_line_length": 174, "alphanum_fraction": 0.605089601, "include": true, "reason": "import numpy,from scipy", "num_tokens": 9585}
|
SUBROUTINE DHC (P,PA,PB,XI,NAT,IF,IM,IL,JF,JM,JL,
1NORBS,DENER)
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
DIMENSION P(*), PA(*), PB(*)
DIMENSION XI(3,*),NFIRST(2),NMIDLE(2),NLAST(2),NAT(*)
C***********************************************************************
C
C DHC CALCULATES THE ENERGY CONTRIBUTIONS FROM THOSE PAIRS OF ATOMS
C THAT HAVE BEEN MOVED BY SUBROUTINE DERIV.
C
C***********************************************************************
COMMON /KEYWRD/ KEYWRD
1 /ONELEC/ USS(107),UPP(107),UDD(107)
COMMON /EULER / TVEC(3,3), ID
COMMON /NUMCAL/ NUMCAL
CHARACTER*80 KEYWRD
LOGICAL UHF
DIMENSION H(171), SHMAT(9,9), F(171),
1 WJ(100), E1B(10), E2A(10), WK(100), W(100)
DATA ICALCN /0/
IF( ICALCN.NE.NUMCAL) THEN
ICALCN=NUMCAL
WLIM=4.D0
IF(ID.EQ.0)WLIM=0.D0
UHF=(INDEX(KEYWRD,'UHF') .NE. 0)
ENDIF
NFIRST(1)=1
NMIDLE(1)=IM-IF+1
NLAST(1)=IL-IF+1
NFIRST(2)=NLAST(1)+1
NMIDLE(2)=NFIRST(2)+JM-JF
NLAST(2)=NFIRST(2)+JL-JF
LINEAR=(NLAST(2)*(NLAST(2)+1))/2
DO 10 I=1,LINEAR
F(I)=0.D0
10 H(I)=0.0D00
DO 20 I=1,LINEAR
20 F(I)=H(I)
JA=NFIRST(2)
JB=NLAST(2)
JC=NMIDLE(2)
IA=NFIRST(1)
IB=NLAST(1)
IC=NMIDLE(1)
JT=JB*(JB+1)/2
J=2
I=1
NJ=NAT(2)
NI=NAT(1)
CALL H1ELEC(NI,NJ,XI(1,1),XI(1,2),SHMAT)
IF(NAT(1).EQ.102.OR.NAT(2).EQ.102) THEN
K=(JB*(JB+1))/2
DO 30 J=1,K
30 H(J)=0.D0
ELSE
J1=0
DO 40 J=JA,JB
JJ=J*(J-1)/2
J1=J1+1
I1=0
DO 40 I=IA,IB
JJ=JJ+1
I1=I1+1
H(JJ)=SHMAT(I1,J1)
F(JJ)=SHMAT(I1,J1)
40 CONTINUE
ENDIF
KR=1
IF(ID.EQ.0)THEN
CALL ROTATE (NJ,NI,XI(1,2),XI(1,1),W(KR),KR,E2A,E1B,ENUCLR,100.
1D0)
ELSE
CALL SOLROT (NJ,NI,XI(1,2),XI(1,1),WJ,WK,KR,E2A,E1B,ENUCLR,100.
1D0)
ENDIF
IF(WJ(1).LT.WLIM)THEN
DO 50 I=1,KR-1
50 WK(I)=0.D0
ENDIF
C
C * ENUCLR IS SUMMED OVER CORE-CORE REPULSION INTEGRALS.
C
I2=0
DO 60 I1=IA,IC
II=I1*(I1-1)/2+IA-1
DO 60 J1=IA,I1
II=II+1
I2=I2+1
H(II)=H(II)+E1B(I2)
60 F(II)=F(II)+E1B(I2)
DO 70 I1=IC+1,IB
II=(I1*(I1+1))/2
F(II)=F(II)+E1B(1)
70 H(II)=H(II)+E1B(1)
I2=0
DO 80 I1=JA,JC
II=I1*(I1-1)/2+JA-1
DO 80 J1=JA,I1
II=II+1
I2=I2+1
H(II)=H(II)+E2A(I2)
80 F(II)=F(II)+E2A(I2)
DO 90 I1=JC+1,JB
II=(I1*(I1+1))/2
F(II)=F(II)+E2A(1)
90 H(II)=H(II)+E2A(1)
CALL FOCK2D(F,P,PA,W, WJ, WK,2,NFIRST,NMIDLE,NLAST)
EE=HELECT(NLAST(2),PA,H,F)
IF( UHF ) THEN
DO 100 I=1,LINEAR
100 F(I)=H(I)
CALL FOCK2D(F,P,PB,W, WJ, WK,2,NFIRST,NMIDLE,NLAST)
EE=EE+HELECT(NLAST(2),PB,H,F)
ELSE
EE=EE*2.D0
ENDIF
DENER=EE+ENUCLR
RETURN
C
END
|
{"hexsha": "ff9e739df7bc6bdd8e0004ff235b568b1af42bb0", "size": 3159, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "1989_MOPAC5/dhc.f", "max_stars_repo_name": "openmopac/MOPAC-archive", "max_stars_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-16T20:53:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T20:54:11.000Z", "max_issues_repo_path": "1989_MOPAC5/dhc.f", "max_issues_repo_name": "openmopac/MOPAC-archive", "max_issues_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "1989_MOPAC5/dhc.f", "max_forks_repo_name": "openmopac/MOPAC-archive", "max_forks_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1074380165, "max_line_length": 72, "alphanum_fraction": 0.4561570117, "num_tokens": 1369}
|
from os import system
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
####################################################################
def initTheta(X,degree):
size=getThetaSizeFromDegree(X,degree)
return np.zeros((size, 1))
####################################################################
def listToArray(xlist):
return np.array(xlist)
####################################################################
def addBiasVector(X):
r=np.column_stack((np.ones((X.shape[0],1)),X))
return r
def concatenateVectors(X,Y):
r=np.column_stack((X,Y))
return r
####################################################################
def clearScreen():
system('cls')
return
####################################################################
def loadData(fileName):
data= np.loadtxt(fileName, delimiter=',')
if (len(data.shape)==1):
data.shape=(data.shape[0],1)
return data
####################################################################
def plotPlane(theta,X,y):
degree=getDegreeFromTheta(theta,X)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#plt.subplot(122)
aX=X[:,0]
aY=X[:,1]
aZ=y
ax.scatter(aX,aY,aZ,marker="o",color="r")
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
u = np.linspace(x_min, x_max,20)
v = np.linspace(y_min, y_max, 20)
z = np.zeros(( len(u), len(v) ))
U,V=np.meshgrid(u,v)
for i in range(len(u)):
for j in range(len(v)):
uv= concatenateVectors(np.array([[u[i]]]),np.array([[v[j]]]))
z[i,j] =np.sum( np.matmul(mapFeature(uv,degree),theta) )
z = np.transpose(z)
ax.scatter(U,V,z,marker="+")
plt.show()
####################################################################
def plotLine3d(theta1,theta,X,y):
degree=getDegreeFromTheta(theta,X)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#plt.subplot(122)
aX=X[:,0]
aY=X[:,1]
aZ=y
ax.scatter(aX,aY,aZ,marker="o",color="r")
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
u = np.linspace(x_min, x_max,50)
u.shape=(len(u),1)
v=predict(theta1,u)
z = np.zeros( len(u))
for i in range(len(u)):
uv= concatenateVectors(np.array([[u[i]]]),np.array([[v[i]]]))
z[i] =np.sum( np.matmul(mapFeature(uv,degree),theta) )
z = np.transpose(z)
ax.plot(u,v,z)
plt.show()
####################################################################
def plotLine(theta,X,y):
plt.subplot(122)
plt.scatter(X,y)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
u = np.linspace(x_min, x_max, 100)
u.shape=(len(u),1)
v=predict(theta,u)
plt.plot(u, v,color='r')
plt.show()
####################################################################
def getDegreeFromTheta(theta,X):
sz=theta.shape[0]
if (X.shape[1]==2):
degree=(np.sqrt(sz*8+1)-3)/2
degree=int(degree)
else:
degree=sz-1
return degree
####################################################################
def getThetaSizeFromDegree(X,degree):
sz=X.shape[1]
if (sz==2):
sz=(degree+1)*(degree+2)/2
sz=int(sz)
else:
sz=degree+1
return sz
####################################################################
def predict(theta,X):
degree=getDegreeFromTheta(theta,X)
X=mapFeature(X,degree)
Py=np.matmul(X,theta) #Hypothesis
return Py
####################################################################
def accurracy(Y1,Y2):
m=np.mean(Y1==Y2)
return m*100
####################################################################
def computeCost(theta,X,y):
m = X.shape[0]
h= X @ theta #Hypothesis
h.shape=y.shape
err=h-y
errSqr=np.multiply(err,err)
J=(1.0/(2.0*m))* np.sum(errSqr)
return J
####################################################################
def mapFeature(X,degree):
sz=getThetaSizeFromDegree(X,degree)
out=np.ones((X.shape[0],sz))
sz=X.shape[1]
if (sz==2):
X1=X[:, 0:1]
X2=X[:, 1:2]
col=1
for i in range(1,degree+1):
for j in range(0,i+1):
out[:,col:col+1]= np.multiply(np.power(X1,i-j),np.power(X2,j))
col+=1
return out
else:
for i in range(1,degree+1):
out[:,i:i+1]= np.power(X,i)
return out
####################################################################
def computeGradient(theta,X,y):
m,n = X.shape
theta.shape = (n,1)
h=np.matmul( X,theta) #Hypothesis
h.shape=y.shape
err=h-y
d=np.dot(err.T,X)
g= (1.0/m)*d
return g.flatten()
####################################################################
def optimizedGradientDescent(X, y, theta,degree):
oldShape=theta.shape
X=mapFeature(X,degree)
myargs=(X, y[:,0])
Result = op.minimize(fun = computeCost, x0 = theta.flatten(), args =myargs, method = 'TNC',jac = computeGradient)
theta = Result.x
#theta = op.fmin(computeCost, x0=theta, args=myargs)
#theta,_,_,_,_,_,_= op.fmin_bfgs(computeCost, x0=theta, args=myargs, full_output=True)
theta.shape=oldShape
return theta
|
{"hexsha": "ea86414736b62f298a610f507789bb655fd57eaa", "size": 5481, "ext": "py", "lang": "Python", "max_stars_repo_path": "06_LinearRegression_Line3d/linearRegressionPlane.py", "max_stars_repo_name": "ManMohan291/PyProgram", "max_stars_repo_head_hexsha": "edcaa927bd70676bd14355acad7262ae2d32b8e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-09-07T17:44:54.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-07T17:44:57.000Z", "max_issues_repo_path": "06_LinearRegression_Line3d/linearRegressionPlane.py", "max_issues_repo_name": "ManMohan291/PyProgram", "max_issues_repo_head_hexsha": "edcaa927bd70676bd14355acad7262ae2d32b8e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "06_LinearRegression_Line3d/linearRegressionPlane.py", "max_forks_repo_name": "ManMohan291/PyProgram", "max_forks_repo_head_hexsha": "edcaa927bd70676bd14355acad7262ae2d32b8e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3989637306, "max_line_length": 118, "alphanum_fraction": 0.4469987229, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1438}
|
import numpy as np
import pandas as pd
from plotnine import ggplot, aes, geom_bar, geom_col, geom_histogram
from plotnine import after_stat, theme, scale_x_sqrt, geom_text
from plotnine.tests import layer_data
n = 10 # Some even number greater than 2
# ladder: 0 1 times, 1 2 times, 2 3 times, ...
df = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)),
'z': np.repeat(range(n//2), range(3, n*2, 4))})
_theme = theme(subplots_adjust={'right': 0.85})
def test_bar_count():
p = ggplot(df, aes('x')) + geom_bar(aes(fill='factor(z)'))
assert p + _theme == 'bar-count'
def test_col():
# The color indicates reveals the edges and the stacking
# that is going on.
p = (ggplot(df) +
geom_col(aes('x', 'z', fill='factor(z)'), color='black'))
assert p + _theme == 'col'
def test_histogram_count():
p = (ggplot(df, aes('x')) +
geom_histogram(aes(fill='factor(z)'), bins=n))
assert p + _theme == 'histogram-count'
def test_scale_transformed_breaks():
df = pd.DataFrame({'x': np.repeat(range(1, 5), range(1, 5))})
p = ggplot(df, aes('x')) + geom_histogram(breaks=[1, 2.5, 4])
out1 = layer_data(p)
out2 = layer_data(p + scale_x_sqrt())
np.testing.assert_allclose(out1.xmin, [1, 2.5])
np.testing.assert_allclose(out2.xmin, np.sqrt([1, 2.5]))
def test_stat_count_int():
df = pd.DataFrame({'x': ['a', 'b'], 'weight': [1, 2]})
p = (ggplot(df)
+ aes(x='x', weight='weight', fill='x')
+ geom_bar()
+ geom_text(aes(label=after_stat('count')), stat='count')
)
assert p + _theme == 'stat-count-int'
def test_stat_count_float():
df = pd.DataFrame({'x': ['a', 'b'], 'weight': [1.5, 2.5]})
p = (ggplot(df)
+ aes(x='x', weight='weight', fill='x')
+ geom_bar()
+ geom_text(aes(label=after_stat('count')), stat='count')
)
assert p + _theme == 'stat-count-float'
|
{"hexsha": "0ed2a64289678f47000780443817cbebdd35ba99", "size": 1950, "ext": "py", "lang": "Python", "max_stars_repo_path": "venv/Lib/site-packages/plotnine/tests/test_geom_bar_col_histogram.py", "max_stars_repo_name": "EkremBayar/bayar", "max_stars_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "venv/Lib/site-packages/plotnine/tests/test_geom_bar_col_histogram.py", "max_issues_repo_name": "EkremBayar/bayar", "max_issues_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "venv/Lib/site-packages/plotnine/tests/test_geom_bar_col_histogram.py", "max_forks_repo_name": "EkremBayar/bayar", "max_forks_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4647887324, "max_line_length": 68, "alphanum_fraction": 0.5923076923, "include": true, "reason": "import numpy", "num_tokens": 592}
|
# GUI改善版(画像でyolo実行可能)
from tkinter import *
import tkinter.ttk as ttk
import tkinter.filedialog
import os
from PIL import Image, ImageTk
from key_frame import get_keyframe
from key_frame import detect_cloth_by_yolo
import numpy as np
import cv2
class Tab1(ttk.Frame):
def __init__(self,mode, master=None, new_parameter=None, width=None, height=None):
super().__init__(master=master, width=width, height=height)
# サイズを維持するための関数
self.grid_propagate(0)
self.pack()
# 最終的に返却するパラメータ
self.n_para = new_parameter
self.mode = mode
# 画像の表示領域
self.canvas1 = Canvas(self,bg="black", width=100, height=100)
self.canvas1.grid(row=0, column=0)
self.canvas1.photo = None
self.image_on_canvas = self.canvas1.create_image( # キャンバス上にイメージを配置
0, # x座標
0, # y座標
image=self.canvas1.photo, # 配置するイメージオブジェクトを指定
tag="illust", # タグで引数を追加する。
anchor=NW # 配置の起点となる位置を左上隅に指定
)
self.button1 = Button(self, text="Open Explorer", command=self.open_explorer1)
self.button1.grid(row=1, column=0)
def open_explorer1(self):
# fTyp = [("jpg","*.jpg"),("mp4","*.mp4;")]
fTyp = [("","*")]
iDir = os.path.abspath(os.path.dirname(__file__))
file = tkinter.filedialog.askopenfilename(filetypes = fTyp,initialdir = iDir)
self.n_para.parameter["mode"] = self.mode
self.n_para.parameter[self.mode]["query1"]["path"] = file
self.set_image(file)
def set_image(self, file):
if file.split(".")[-1] == "jpg":
img = Image.open(open(file, "rb"))
img.thumbnail((100,100), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
else:
self.n_para.parameter[self.mode]["query1"]["video"] = True
img = Image.open(open("動画アイコン.jpg", "rb"))
img.thumbnail((100,100), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
self.canvas1.photo = img
self.canvas1.itemconfig(self.image_on_canvas, image=self.canvas1.photo)
class Tab4(ttk.Frame):
def __init__(self,mode, master=None, new_parameter=None, width=None, height=None):
super().__init__(master=master, width=width, height=height)
self.grid_propagate(0)
self.pack()
self.n_para = new_parameter
self.mode = mode
self.label1 = Label(self, text="色画像")
self.label1.grid(row=0,column=0)
self.label2 = Label(self, text="形状画像")
self.label2.grid(row=0,column=1)
self.canvas1 = Canvas(self,bg="black", width=100, height=100)
self.canvas1.grid(row=1, column=0)
self.canvas1.photo = None
self.image_on_canvas1 = self.canvas1.create_image(0,0,image=self.canvas1.photo,tag="illust",anchor=NW)
self.canvas2 = Canvas(self,bg="black", width=100, height=100)
self.canvas2.grid(row=1, column=1)
self.canvas2.photo = None
self.image_on_canvas2 = self.canvas2.create_image(0,0,image=self.canvas2.photo,tag="illust",anchor=NW)
self.button1 = Button(self, text="Open Explorer", command=self.open_explorer1)
self.button1.grid(row=2, column=0)
self.button2 = Button(self, text="Open Explorer", command=self.open_explorer2)
self.button2.grid(row=2, column=1)
def open_explorer1(self):
# fTyp = [("jpg","*.jpg"),("mp4","*.mp4")]
fTyp = [("","*")]
iDir = os.path.abspath(os.path.dirname(__file__))
file = tkinter.filedialog.askopenfilename(filetypes = fTyp,initialdir = iDir)
self.n_para.parameter["mode"] = self.mode
self.n_para.parameter[self.mode]["query1"]["path"] = file
self.set_image1(file)
def open_explorer2(self):
fTyp = [("","*")]
iDir = os.path.abspath(os.path.dirname(__file__))
file = tkinter.filedialog.askopenfilename(filetypes = fTyp,initialdir = iDir)
self.n_para.parameter["mode"] = self.mode
self.n_para.parameter[self.mode]["query2"]["path"] = file
self.set_image2(file)
def set_image1(self, file):
if file.split(".")[-1] == "jpg":
img = Image.open(open(file, "rb"))
img.thumbnail((100,100), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
else:
self.n_para.parameter[self.mode]["query1"]["video"] = True
img = Image.open(open("動画アイコン.jpg", "rb"))
img.thumbnail((100,100), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
self.canvas1.photo = img
self.canvas1.itemconfig(self.image_on_canvas1, image=self.canvas1.photo)
def set_image2(self, file):
if file.split(".")[-1] == "jpg":
img = Image.open(open(file, "rb"))
img.thumbnail((100,100), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
else:
self.n_para.parameter[self.mode]["query2"]["video"] = True
img = Image.open(open("動画アイコン.jpg", "rb"))
img.thumbnail((100,100), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
self.canvas2.photo = img
self.canvas2.itemconfig(self.image_on_canvas2, image=self.canvas2.photo)
class NotebookSample(ttk.Frame):
def __init__(self, master, new_parameter):
super().__init__(master)
self.create_widgets(new_parameter)
self.pack()
def create_widgets(self, new_parameter):
note = ttk.Notebook(self)
note.pack()
note1 = Tab1("normal",note,new_parameter,width=300,height=200)
note2 = Tab1("color",note,new_parameter,width=300,height=200)
note3 = Tab1("type",note,new_parameter,width=300,height=200)
note4 = Tab4("concat",note,new_parameter,width=300,height=200)
note.add(note1,text="通常")
note.add(note2,text="色")
note.add(note3,text="形状")
note.add(note4,text="混合")
class New_parameter():
def __init__(self):
self.parameter = {
"mode":None,
"normal":{
"query1":{
"video":False,
"path":None,
"feature":None
}
},
"color":{
"query1":{
"video":False,
"path":None,
"feature":None
}
},
"type":{
"query1":{
"video":False,
"path":None,
"feature":None
}
},
"concat":{
"query1":{
"video":False,
"path":None,
"feature":None
},
"query2":{
"video":False,
"path":None,
"feature":None
}
}
}
#######################################################################################
# ここまで通常の設定画面
#######################################################################################
class Key_select(ttk.Frame):
def __init__(self, master=None, keyframes=None):
super().__init__(master=master)
self.index = None
self.var = IntVar()
self.var.set(0)
for i, image in enumerate(keyframes):
# ラベル
label = Label(self, text=str(i))
label.grid(row=int(i/5), column=i%5)
# 画像
canvas = Canvas(self,bg="black", width=100, height=100)
canvas.grid(row=int(i/5)+1, column=i%5)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
image = cv2pil(image)
image.thumbnail((100,100), Image.ANTIALIAS)
image = ImageTk.PhotoImage(image)
canvas.photo = image
canvas.create_image(0,0,image=canvas.photo,tag="illust",anchor=NW)
# ボタン
radio = Radiobutton(self, value=i, variable=self.var)
radio.grid(row=int(i/5)+2, column=i%5)
def get_index(self):
return self.var.get()
class Image_select(ttk.Frame):
def __init__(self, master=None, keyframes=None):
super().__init__(master=master)
self.index = None
self.var = IntVar()
self.var.set(0)
for i, image in enumerate(keyframes):
# ラベル
label = Label(self, text=str(i))
label.grid(row=int(i/5), column=i%5)
# 画像
canvas = Canvas(self,bg="black", width=100, height=100)
canvas.grid(row=int(i/5)+1, column=i%5)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
image = cv2pil(image)
image.thumbnail((100,100), Image.ANTIALIAS)
image = ImageTk.PhotoImage(image)
canvas.photo = image
canvas.create_image(0,0,image=canvas.photo,tag="illust",anchor=NW)
# ボタン
radio = Radiobutton(self, value=i, variable=self.var)
radio.grid(row=int(i/5)+2, column=i%5)
def get_index(self):
return self.var.get()
def cv2pil(image):
''' OpenCV型 -> PIL型 '''
new_image = image.copy()
if new_image.ndim == 2: # モノクロ
pass
elif new_image.shape[2] == 3: # カラー
new_image = new_image[:, :, ::-1]
elif new_image.shape[2] == 4: # 透過
new_image = new_image[:, :, [2, 1, 0, 3]]
new_image = Image.fromarray(new_image)
return new_image
def select_key_frame(new_parameter, query_num, system_parameter):
# pathの取り出し
input_video_path = new_parameter.parameter[new_parameter.parameter["mode"]][query_num]["path"]
# yoloに入力
keyframes, key_features = get_keyframe(input_video_path, system_parameter)
# 出力画像群をguiで選択
if len(keyframes) == 1:
img = keyframes[0]
feature = key_features[0]
else:
root = Tk()
# root.geometry("400x400")
x = Key_select(root, keyframes)
x.pack()
Button(root, text="実行", command=root.destroy).pack()
root.mainloop()
img = keyframes[x.get_index()]
feature = key_features[x.get_index()]
# 選択した画像を保存
os.makedirs("key_query", exist_ok=True)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = cv2pil(img)
img.save("key_query/"+query_num+".jpg")
# パラメータの更新
new_parameter.parameter[new_parameter.parameter["mode"]][query_num]["path"] = "key_query/"+query_num+".jpg"
new_parameter.parameter[new_parameter.parameter["mode"]][query_num]["feature"] = feature
return new_parameter
def yolo_for_image(new_parameter, query_num, system_parameter):
# pathの取り出し
input_image_path = new_parameter.parameter[new_parameter.parameter["mode"]][query_num]["path"]
# 画像の読み込み
images = []
images.append(cv2.imread(input_image_path))
# yoloに入力
detected_images = detect_cloth_by_yolo(images)
# 出力画像群をguiで選択
if len(detected_images) == 1:
img = detected_images[0]
feature = detected_images[0]
else:
root = Tk()
# root.geometry("400x400")
x = Image_select(root, detected_images)
x.pack()
Button(root, text="実行", command=root.destroy).pack()
root.mainloop()
img = detected_images[x.get_index()]
# 選択した画像を保存
os.makedirs("key_query", exist_ok=True)
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = cv2pil(img)
img.save("key_query/"+query_num+".jpg")
# パラメータの更新
new_parameter.parameter[new_parameter.parameter["mode"]][query_num]["path"] = "key_query/"+query_num+".jpg"
return new_parameter
def select_image_and_video(system_parameter):
new_parameter = New_parameter()
master = Tk()
master.title("検索設定")
master.geometry("400x400")
NotebookSample(master, new_parameter)
Button(master, text="実行", command=master.destroy).pack()
master.mainloop()
if system_parameter["gui"]["use_yolo"] == True:
# yoloによる画像の指定を行う
if new_parameter.parameter["mode"] != "concat":
#混合検索
if new_parameter.parameter[new_parameter.parameter["mode"]]["query1"]["video"] == True:
new_parameter = select_key_frame(new_parameter, "query1", system_parameter)
else:
# 画像に対するyolo処理
new_parameter = yolo_for_image(new_parameter, "query1", system_parameter)
else:
if new_parameter.parameter[new_parameter.parameter["mode"]]["query1"]["video"] == True:
new_parameter = select_key_frame(new_parameter, "query1", system_parameter)
else:
# 画像に対するyolo処理
new_parameter = yolo_for_image(new_parameter, "query1", system_parameter)
if new_parameter.parameter[new_parameter.parameter["mode"]]["query2"]["video"] == True:
new_parameter = select_key_frame(new_parameter, "query2", system_parameter)
else:
# 画像に対するyolo処理
new_parameter = yolo_for_image(new_parameter, "query2", system_parameter)
else:
# 動画を使用している時
if new_parameter.parameter["mode"] != "concat":
if new_parameter.parameter[new_parameter.parameter["mode"]]["query1"]["video"] == True:
new_parameter = select_key_frame(new_parameter, "query1", system_parameter)
else:
if new_parameter.parameter[new_parameter.parameter["mode"]]["query1"]["video"] == True:
new_parameter = select_key_frame(new_parameter, "query1", system_parameter)
if new_parameter.parameter[new_parameter.parameter["mode"]]["query2"]["video"] == True:
new_parameter = select_key_frame(new_parameter, "query2", system_parameter)
return new_parameter.parameter
if __name__ == '__main__':
from parameters import set_parameters
system_parameter = set_parameters()
a = select_image_and_video(system_parameter)
print(a)
|
{"hexsha": "c0c8857b1ffbf7555cbea4121f087c5d66af6d96", "size": 14347, "ext": "py", "lang": "Python", "max_stars_repo_path": "FIRS/gui.py", "max_stars_repo_name": "yuichikano/Fashion-Image-Retrieval-System", "max_stars_repo_head_hexsha": "5d712a4e400716e84337defe08f51c2165d44ade", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FIRS/gui.py", "max_issues_repo_name": "yuichikano/Fashion-Image-Retrieval-System", "max_issues_repo_head_hexsha": "5d712a4e400716e84337defe08f51c2165d44ade", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FIRS/gui.py", "max_forks_repo_name": "yuichikano/Fashion-Image-Retrieval-System", "max_forks_repo_head_hexsha": "5d712a4e400716e84337defe08f51c2165d44ade", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-30T10:20:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-30T10:20:13.000Z", "avg_line_length": 38.3609625668, "max_line_length": 112, "alphanum_fraction": 0.5677842058, "include": true, "reason": "import numpy", "num_tokens": 3605}
|
# File: test_bayesian_optimization.py
# File Created: Tuesday, 5th November 2019 10:00:04 am
# Author: Steven Atkinson (212726320@ge.com)
import os
import sys
import numpy as np
import torch
import pytest
base_path = os.path.join(os.path.dirname(__file__), "..")
if not base_path in sys.path:
sys.path.append(base_path)
from src import bayesian_optimization
torch_dtype = torch.double
class MockBase(bayesian_optimization.Base):
pass
class TestBase(object):
pass
class _TBase(object):
"""
Set up reusable parts of the testing
"""
@staticmethod
def _mock_train_function():
pass
@staticmethod
def _mock_append_function():
pass
@staticmethod
def _mock_predict_function(x, diag=True):
n = x.shape[0]
mean = x @ torch.ones(2, 1, dtype=torch_dtype)
if diag:
var = torch.ones(n, 1, dtype=torch_dtype)
return mean, var
else:
cov = 0.99 * torch.ones(n, n) + 0.01 * torch.eye(n, dtype=torch_dtype)
return mean, cov
class TestStaticDataset(_TBase):
@classmethod
def setup_class(cls):
cls.x_all = np.array([[1.0, 2.0], [20.0, 30.0]])
cls.y_all = np.array([[3.0], [4.0]])
def test_init(self):
bo = bayesian_optimization.StaticDataset(
self.x_all,
self.y_all,
self._mock_train_function,
self._mock_predict_function,
self._mock_append_function
)
def test_get_p_best(self):
"""
Note: because the prediction function is linear with a strong slope in
the (+, +) direction and the covariance is comparatively small, we
expect that the first input in x_all will be predicted as lower
basically all the time.
So, p_best should probably be exactly [1, 0].
A snapshot test (2019-11-05) shows this to be the case.
"""
bo = bayesian_optimization.StaticDataset(
self.x_all,
self.y_all,
self._mock_train_function,
self._mock_predict_function,
self._mock_append_function
)
# RNG seeds to ensure that this test replicates
np.random.seed(0)
torch.manual_seed(0)
x = bo.x_all
p_best = bo._get_p_best(x)
assert isinstance(p_best, np.ndarray)
assert p_best.ndim == 1
assert p_best.size == x.shape[0]
# Super rare that this wouldn't be the case (and shouldn't happen at all
# since I seeded the RNG and checked).
assert p_best[0] == 1.0, "p(best, 0) = %f? (should be 1.0)" % p_best[0]
assert p_best[1] == 0.0, "p(best, 1) = %f? (should be 0.0)" % p_best[1]
|
{"hexsha": "e2fb843b920235d6ce31be21d450adfe7711f417", "size": 2747, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_bayesian_optimization.py", "max_stars_repo_name": "212726320/BEBO-1", "max_stars_repo_head_hexsha": "2909b3a00161b2e29fad667add30392abc11a968", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_bayesian_optimization.py", "max_issues_repo_name": "212726320/BEBO-1", "max_issues_repo_head_hexsha": "2909b3a00161b2e29fad667add30392abc11a968", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_bayesian_optimization.py", "max_forks_repo_name": "212726320/BEBO-1", "max_forks_repo_head_hexsha": "2909b3a00161b2e29fad667add30392abc11a968", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.47, "max_line_length": 82, "alphanum_fraction": 0.6072078631, "include": true, "reason": "import numpy", "num_tokens": 713}
|
import numpy as np
import pytest
from pandas._libs import join as libjoin
from pandas._libs.join import inner_join, left_outer_join
import pandas._testing as tm
class TestIndexer:
@pytest.mark.parametrize(
"dtype", ["int32", "int64", "float32", "float64", "object"]
)
def test_outer_join_indexer(self, dtype):
indexer = libjoin.outer_join_indexer
left = np.arange(3, dtype=dtype)
right = np.arange(2, 5, dtype=dtype)
empty = np.array([], dtype=dtype)
result, lindexer, rindexer = indexer(left, right)
assert isinstance(result, np.ndarray)
assert isinstance(lindexer, np.ndarray)
assert isinstance(rindexer, np.ndarray)
tm.assert_numpy_array_equal(result, np.arange(5, dtype=dtype))
exp = np.array([0, 1, 2, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, 0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(empty, right)
tm.assert_numpy_array_equal(result, right)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(left, empty)
tm.assert_numpy_array_equal(result, left)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
def test_cython_left_outer_join(self):
left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = np.array([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind="mergesort")
exp_rs = right.argsort(kind="mergesort")
exp_li = np.array([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = np.array(
[0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 4, 5, 4, 5, 4, 5, -1, -1]
)
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_right_outer_join(self):
left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = np.array([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind="mergesort")
exp_rs = right.argsort(kind="mergesort")
# 0 1 1 1
exp_li = np.array(
[
0,
1,
2,
3,
4,
5,
3,
4,
5,
3,
4,
5,
# 2 2 4
6,
7,
8,
6,
7,
8,
-1,
]
)
exp_ri = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_inner_join(self):
left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = np.array([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = inner_join(left, right, max_group)
exp_ls = left.argsort(kind="mergesort")
exp_rs = right.argsort(kind="mergesort")
exp_li = np.array([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8])
exp_ri = np.array([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
@pytest.mark.parametrize("readonly", [True, False])
def test_left_join_indexer_unique(readonly):
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
if readonly:
# GH#37312, GH#37264
a.setflags(write=False)
b.setflags(write=False)
result = libjoin.left_join_indexer_unique(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_left_outer_join_bug():
left = np.array(
[
0,
1,
0,
1,
1,
2,
3,
1,
0,
2,
1,
2,
0,
1,
1,
2,
3,
2,
3,
2,
1,
1,
3,
0,
3,
2,
3,
0,
0,
2,
3,
2,
0,
3,
1,
3,
0,
1,
3,
0,
0,
1,
0,
3,
1,
0,
1,
0,
1,
1,
0,
2,
2,
2,
2,
2,
0,
3,
1,
2,
0,
0,
3,
1,
3,
2,
2,
0,
1,
3,
0,
2,
3,
2,
3,
3,
2,
3,
3,
1,
3,
2,
0,
0,
3,
1,
1,
1,
0,
2,
3,
3,
1,
2,
0,
3,
1,
2,
0,
2,
],
dtype=np.int64,
)
right = np.array([3, 1], dtype=np.int64)
max_groups = 4
lidx, ridx = libjoin.left_outer_join(left, right, max_groups, sort=False)
exp_lidx = np.arange(len(left), dtype=np.int64)
exp_ridx = -np.ones(len(left), dtype=np.int64)
exp_ridx[left == 1] = 1
exp_ridx[left == 3] = 0
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = libjoin.inner_join_indexer(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([2, 4], dtype=np.int64)
bexp = np.array([1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = libjoin.inner_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = libjoin.outer_join_indexer(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int64)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = libjoin.outer_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = libjoin.left_join_indexer(a, b)
tm.assert_almost_equal(index, a)
aexp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
bexp = np.array([-1, -1, 1, -1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = libjoin.left_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer2():
idx = np.array([1, 1, 2, 5], dtype=np.int64)
idx2 = np.array([1, 2, 5, 7, 9], dtype=np.int64)
res, lidx, ridx = libjoin.left_join_indexer(idx2, idx)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_outer_join_indexer2():
idx = np.array([1, 1, 2, 5], dtype=np.int64)
idx2 = np.array([1, 2, 5, 7, 9], dtype=np.int64)
res, lidx, ridx = libjoin.outer_join_indexer(idx2, idx)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_inner_join_indexer2():
idx = np.array([1, 1, 2, 5], dtype=np.int64)
idx2 = np.array([1, 2, 5, 7, 9], dtype=np.int64)
res, lidx, ridx = libjoin.inner_join_indexer(idx2, idx)
exp_res = np.array([1, 1, 2, 5], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
|
{"hexsha": "37e1cf4dbc733f7bf1c967e42cf3a83a9892acc0", "size": 11296, "ext": "py", "lang": "Python", "max_stars_repo_path": "mypython/Lib/site-packages/pandas/tests/libs/test_join.py", "max_stars_repo_name": "lilianatang/data-modelling-with-postgresql", "max_stars_repo_head_hexsha": "4b5d057d23c346cc36695dc0548f11908aeb5431", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mypython/Lib/site-packages/pandas/tests/libs/test_join.py", "max_issues_repo_name": "lilianatang/data-modelling-with-postgresql", "max_issues_repo_head_hexsha": "4b5d057d23c346cc36695dc0548f11908aeb5431", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mypython/Lib/site-packages/pandas/tests/libs/test_join.py", "max_forks_repo_name": "lilianatang/data-modelling-with-postgresql", "max_forks_repo_head_hexsha": "4b5d057d23c346cc36695dc0548f11908aeb5431", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-26T22:41:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-26T22:41:56.000Z", "avg_line_length": 29.1134020619, "max_line_length": 89, "alphanum_fraction": 0.4958392351, "include": true, "reason": "import numpy", "num_tokens": 3635}
|
[STATEMENT]
lemma knows_Spy_Inputs_secureM_sr:
"\<lbrakk> A \<noteq> Spy; evs \<in>sr \<rbrakk> \<Longrightarrow> knows Spy (Inputs A C X # evs) = knows Spy evs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>A \<noteq> Spy; evs \<in> sr\<rbrakk> \<Longrightarrow> knows Spy (Inputs A C X # evs) = knows Spy evs
[PROOF STEP]
apply (simp (no_asm_simp))
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 191, "file": null, "length": 2}
|
[STATEMENT]
lemma UGroupHomI:
assumes "\<And>g g'. T (g + g') = T g + T g'"
shows "UGroupHom T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. UGroupHom T
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
T (?g + ?g') = T ?g + T ?g'
goal (1 subgoal):
1. UGroupHom T
[PROOF STEP]
by unfold_locales auto
|
{"llama_tokens": 150, "file": "Buildings_Algebra", "length": 2}
|
from swarms.lib.agent import Agent
# from swarms.objects import Sites
from swarms.lib.model import Model
from swarms.lib.time import SimultaneousActivation
from swarms.lib.space import Grid
from unittest import TestCase
from swarms.utils.bt import BTConstruct
import py_trees
from py_trees import Blackboard
import numpy as np
# import xml.etree.ElementTree as ET
from swarms.behaviors.sbehaviors import ( # noqa: F401
IsCarryable, IsSingleCarry, SingleCarry,
NeighbourObjects, IsMultipleCarry, IsInPartialAttached,
InitiateMultipleCarry, IsEnoughStrengthToCarry,
Move, GoTo, IsMotionTrue, RandomWalk, IsMoveable,
MultipleCarry, Away, Towards, DoNotMove
)
from ponyge.operators.initialisation import initialisation
from ponyge.fitness.evaluation import evaluate_fitness
from ponyge.operators.crossover import crossover
from ponyge.operators.mutation import mutation
from ponyge.operators.replacement import replacement
from ponyge.operators.selection import selection
# Global variables for width and height
width = 100
height = 100
class GEBTAgent(Agent):
""" An minimalistic GE agent """
def __init__(self, name, model):
super().__init__(name, model)
self.location = ()
self.direction = model.random.rand() * (2 * np.pi)
self.speed = 2
self.radius = 3
# self.exchange_time = model.random.randint(2, 4)
# This doesn't help. Maybe only perform genetic operations when
# an agents meet 10% of its total population
# """
self.operation_threshold = 2
self.genome_storage = []
# Define a BTContruct object
self.bt = BTConstruct(None, self)
self.blackboard = Blackboard()
self.blackboard.shared_content = dict()
self.shared_content = dict()
# Grammatical Evolution part
from ponyge.algorithm.parameters import Parameters
parameter = Parameters()
parameter_list = ['--parameters', 'swarm.txt']
# Comment when different results is desired.
# Else set this for testing purpose
parameter.params['RANDOM_SEED'] = name
# np.random.randint(1, 99999999)
parameter.params['POPULATION_SIZE'] = self.operation_threshold // 2
parameter.set_params(parameter_list)
self.parameter = parameter
individual = initialisation(self.parameter, 1)
individual = evaluate_fitness(individual, self.parameter)
self.individual = individual
self.bt.xmlstring = self.individual[0].phenotype
self.bt.construct()
def step(self):
# """
# Doing this is equivalent of using behavior tree with four classes
# in this order, Move, HasMoney, NeighbourCondition, ShareMoney
# self.move()
# execute BT
py_trees.logging.level = py_trees.logging.Level.DEBUG
# output = py_trees.display.ascii_tree(self.bt.behaviour_tree.root)
# print ('bt tree', output, self.individual[0].phenotype)
self.bt.behaviour_tree.tick()
cellmates = self.model.grid.get_objects_from_grid(
'GEBTAgent', self.location)
# print (cellmates)
if len(self.genome_storage) >= self.operation_threshold:
self.exchange_chromosome(cellmates)
self.bt.xmlstring = self.individual[0].phenotype
self.bt.construct()
if len(cellmates) > 1:
self.store_genome(cellmates)
def advance(self):
pass
def move(self):
new_location = ()
x = int(self.location[0] + np.cos(self.direction) * self.speed)
y = int(self.location[1] + np.sin(self.direction) * self.speed)
new_location, direction = self.model.grid.check_limits(
(x, y), self.direction)
self.model.grid.move_object(self.location, self, new_location)
self.location = new_location
self.direction = direction
def store_genome(self, cellmates):
# cellmates.remove(self)
self.genome_storage += [agent.individual[0] for agent in cellmates]
def exchange_chromosome(self, cellmates):
print('from exchange', self.name)
individuals = self.genome_storage
parents = selection(self.parameter, individuals)
cross_pop = crossover(self.parameter, parents)
new_pop = mutation(self.parameter, cross_pop)
new_pop = evaluate_fitness(new_pop, self.parameter)
individuals = replacement(self.parameter, new_pop, individuals)
individuals.sort(reverse=False)
self.individual = [individuals[0]]
self.genome_storage = []
class GEEnvironmentModel(Model):
""" A environemnt to model swarms """
def __init__(self, N, width, height, grid=10, seed=None):
if seed is None:
super(GEEnvironmentModel, self).__init__(seed=None)
else:
super(GEEnvironmentModel, self).__init__(seed)
self.num_agents = N
self.grid = Grid(width, height, grid)
self.schedule = SimultaneousActivation(self)
for i in range(self.num_agents):
a = GEBTAgent(i, self)
self.schedule.add(a)
# Add the agent to a random grid cell
# x = self.random.randint(
# -self.grid.width / 2, self.grid.width / 2)
x = 0
# y = self.random.randint(
# -self.grid.height / 2, self.grid.height / 2)
y = 0
a.location = (x, y)
self.grid.add_object_to_grid((x, y), a)
a.operation_threshold = 2 # self.num_agents // 10
def step(self):
self.schedule.step()
class TestGEBTSmallGrid(TestCase):
def setUp(self):
self.environment = GEEnvironmentModel(10, 100, 100, 10, 123)
for i in range(2):
self.environment.step()
# for agent in self.environment.schedule.agents:
# self.target_phenotype = agent.individual[0].phenotype
# self.target_fitness = agent.individual[0].fitness
# print(
# 'Step', i, agent.name, agent.individual[0].fitness,
# agent.location)
# def test_target_string(self):
# self.assertEqual('<?xml version="1.0" encoding="UTF-8"?><Sequence><Sequence><Sequence><cond>IsMoveable</cond><cond>IsMupltipleCarry</cond><act>RandomWalk</act></Sequence> <Sequence><cond>IsMotionTrue</cond><cond>IsMoveable</cond><cond>IsMotionTrue</cond><act>SingleCarry</act></Sequence></Sequence> <Selector><cond>IsMotionTrue</cond><cond>IsCarryable</cond><cond>IsMupltipleCarry</cond><act>GoTo</act></Selector></Sequence>', self.target_phenotype)
def test_one_traget(self):
self.assertEqual(14.285714285714285, self.environment.schedule.agents[0].individual[0].fitness)
|
{"hexsha": "cd66d75e56e445f8bb285108337d374765a279c2", "size": 6782, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_full_bt.py", "max_stars_repo_name": "aadeshnpn/swarm", "max_stars_repo_head_hexsha": "873e5d90de4a3b3f69d4edc8de55eb9311226c2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-03-26T22:22:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-30T20:45:27.000Z", "max_issues_repo_path": "test/test_full_bt.py", "max_issues_repo_name": "aadeshnpn/swarm", "max_issues_repo_head_hexsha": "873e5d90de4a3b3f69d4edc8de55eb9311226c2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-06T12:45:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-12T07:21:53.000Z", "max_forks_repo_path": "test/test_full_bt.py", "max_forks_repo_name": "aadeshnpn/swarm", "max_forks_repo_head_hexsha": "873e5d90de4a3b3f69d4edc8de55eb9311226c2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-04-22T00:27:09.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-22T00:27:09.000Z", "avg_line_length": 37.4696132597, "max_line_length": 458, "alphanum_fraction": 0.6585078148, "include": true, "reason": "import numpy", "num_tokens": 1576}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 7 14:57:18 2018
@author: root
"""
import numpy as np
from matplotlib import pyplot as plt
def load_band(file:str):
band=np.loadtxt(file, skiprows=1,delimiter=",")
return band
def open_figure(rows,cols):
fig,ax=plt.subplots(rows,cols)
return fig,ax
def test_plot(band,style,subplot):
subplot.plot(band[...,0],band[...,1],style)
def fwhm(band):
wavelength=band[...,0]
watts=band[...,1]
half=(np.max(watts))/2
left=np.where(watts>=half)[0].min()-1
b=np.where((watts<=half))[0]
right=b[b>left].min()+1
cut_watt=watts[left:right]
cut_watt=np.resize(cut_watt,(len(cut_watt),1))
cut_wl=wavelength[left:right]
cut_wl=np.resize(cut_wl,(len(cut_wl),1))
return np.hstack((cut_wl,cut_watt))
def convolve(bandRSR,spec_sign):
"""
inputs
bandRSR (type: numpy.array(), dtype: float, shape(x,2))
x is the bandwidth in nanometers (ie the number of bands provided in the official RSR of that multispectral band).
the columns contain
0: the wavelengths,
1: the watts
spec_sign (type: numpy.array(), dtype: float, shape(y,z))
y is the resolution of the hyperspectral spectral signature (ie the number of bands recorded)
the columns contain
0: the wavelengths
1,...,z : the recorded reflectance of the different spectral signature z
"""
hyper_pos=np.empty((0),dtype=int)
watt_pos=np.empty((0),dtype=int)
index=spec_sign[...,[0]]
cbi=spec_sign[:,1:]
indB=bandRSR[:,[0]]
valB=bandRSR[:,[1]]
for i,element in enumerate(indB):
for n,l in enumerate(index):
if element==l:
hyper_pos=np.append(hyper_pos,n)#print (i, element, n, l)
watt_pos=np.append(watt_pos,i)
selcbi=cbi[hyper_pos]
selband=valB[watt_pos]
bandvalue=(np.sum(np.multiply(selcbi,selband),axis=0))/np.sum(selband)
return bandvalue
|
{"hexsha": "6b388da8c6235c2f6b3e4ae921f729a0d7eb053b", "size": 2060, "ext": "py", "lang": "Python", "max_stars_repo_path": "methods.py", "max_stars_repo_name": "Massetting/Spectral_Convolution", "max_stars_repo_head_hexsha": "f0e86d707d3ab64f39a24ab0181d7c280356da60", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "methods.py", "max_issues_repo_name": "Massetting/Spectral_Convolution", "max_issues_repo_head_hexsha": "f0e86d707d3ab64f39a24ab0181d7c280356da60", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "methods.py", "max_forks_repo_name": "Massetting/Spectral_Convolution", "max_forks_repo_head_hexsha": "f0e86d707d3ab64f39a24ab0181d7c280356da60", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-12-08T00:41:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-09T22:34:01.000Z", "avg_line_length": 34.3333333333, "max_line_length": 126, "alphanum_fraction": 0.617961165, "include": true, "reason": "import numpy", "num_tokens": 573}
|
"""
Class for the gravity and magnetic field 'gradient' tensors.
"""
import numpy as _np
import matplotlib as _mpl
import matplotlib.pyplot as _plt
import copy as _copy
from scipy.linalg import eigvalsh as _eigvalsh
import xarray as _xr
from .shgrid import SHGrid as _SHGrid
class Tensor(object):
"""
Generic class for gravity and magnetic field tensors. To initialize the
class, use the method tensor() of an SHGravCoeffs or SHMagCoeffs
class instance.
"""
def __init__(self):
"""Unused constructor of the main class."""
print('Initialize the class using one of the two methods:\n'
'>>> pyshtools.SHGravCoeffs.tensor\n'
'>>> pyshtools.SHMagCoeffs.tensor\n')
def compute_invar(self):
"""
Compute the three invariants (I0, I1, I2) of the tensor, as well as
the quantity I = -(I2/2)**2 / (I1/3)**3.
"""
self.i0 = self.vxx + self.vyy + self.vzz
self.i1 = (self.vxx*self.vyy + self.vyy*self.vzz + self.vxx*self.vzz -
self.vxy**2 - self.vyz**2 - self.vxz**2)
self.i2 = (self.vxx*(self.vyy*self.vzz - self.vyz**2) +
self.vxy*(self.vyz*self.vxz - self.vxy*self.vzz) +
self.vxz*(self.vxy*self.vyz - self.vxz*self.vyy))
self.i = (-1.) * (self.i2 / 2.)**2
self.i.data[1:self.nlat-self.extend, :] /= \
(self.i1.data[1:self.nlat-self.extend, :] / 3.)**3
def compute_eig(self):
"""
Compute the three eigenvalues of the tensor: eig1, eig2, ei3.
"""
self.eig1 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
self.eig2 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
self.eig3 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
for i in range(self.nlat):
for j in range(self.nlon):
a = _np.array([[self.vxx.data[i, j],
self.vxy.data[i, j],
self.vxz.data[i, j]],
[self.vyx.data[i, j],
self.vyy.data[i, j],
self.vyz.data[i, j]],
[self.vzx.data[i, j],
self.vzy.data[i, j],
self.vzz.data[i, j]]])
eigs = _eigvalsh(a)
self.eig1.data[i, j] = eigs[2]
self.eig2.data[i, j] = eigs[1]
self.eig3.data[i, j] = eigs[0]
def compute_eigh(self):
"""
Compute the two horizontal eigenvalues of the tensor (eigh1, and
eigh2), as well as the combined maximum absolute value of the two
(eighh).
"""
self.eigh1 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
self.eigh2 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
self.eighh = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
for i in range(self.nlat):
for j in range(self.nlon):
a = _np.array([[self.vxx.data[i, j],
self.vxy.data[i, j]],
[self.vyx.data[i, j],
self.vyy.data[i, j]]])
eigs = _eigvalsh(a)
self.eigh1.data[i, j] = eigs[1]
self.eigh2.data[i, j] = eigs[0]
if abs(eigs[0]) >= abs(eigs[1]):
self.eighh.data[i, j] = eigs[0]
else:
self.eighh.data[i, j] = eigs[1]
def copy(self):
"""
Return a deep copy of the class instance.
Usage
-----
copy = x.copy()
"""
return _copy.deepcopy(self)
def info(self):
"""
Print a summary of the data stored in the SHGravTensor class instance.
Usage
-----
x.info()
"""
print(repr(self))
def plot_vxx(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vxx component of the tensor.
Usage
-----
x.plot_vxx([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{xx}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vxx_label
return self.vxx.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vyy(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vyy component of the tensor.
Usage
-----
x.plot_vyy([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{yy}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vyy_label
return self.vyy.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vzz(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vzz component of the tensor.
Usage
-----
x.plot_vzz([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{zz}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vzz_label
return self.vzz.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vxy(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vxx component of the tensor.
Usage
-----
x.plot_vxy([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{xy}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vxy_label
return self.vxy.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vyx(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vyx component of the tensor.
Usage
-----
x.plot_vyx([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{yx}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vyx_label
return self.vyx.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vxz(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vxz component of the tensor.
Usage
-----
x.plot_vxz([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{xz}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vxz_label
return self.vxz.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vzx(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vzx component of the tensor.
Usage
-----
x.plot_vzx([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{zx}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vzx_label
return self.vzx.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vyz(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vyz component of the tensor.
Usage
-----
x.plot_vyz([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{yz}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vyz_label
return self.vyz.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_vzy(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the Vzy component of the tensor.
Usage
-----
x.plot_vzy([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$V_{zy}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._vzy_label
return self.vzy.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot(self, projection=None, tick_interval=[90, 90],
minor_tick_interval=[30, 30], xlabel='', ylabel='',
colorbar='bottom', cmap='viridis', cmap_limits=None,
cmap_reverse=False, cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=8,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=8, show=True,
ax=None, fname=None):
"""
Plot the 9 components of the tensor.
Usage
-----
x.plot([projection, tick_interval, minor_tick_interval, ticks, xlabel,
ylabel, colorbar, cmap, cmap_limits, cmap_reverse,
cb_triangles, cb_label, cb_ylabel, cb_tick_interval,
cb_minor_tick_interval, cb_offset, cb_width, grid,
axes_labelsize, tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [90, 90]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = ''
Label for the longitude axis.
ylabel : str, optional, default = ''
Label for the latitude axis.
colorbar : str, optional, default = 'bottom'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = None
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
axes_labelsize : int, optional, default = 8
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = 8
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if colorbar is not None:
if colorbar in set(['bottom', 'top']):
scale = 0.9
else:
scale = 0.45
else:
scale = 0.55
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, ax = _plt.subplots(3, 3, figsize=figsize)
self.plot_vxx(projection=projection, ax=ax.flat[0],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vxy(projection=projection, ax=ax.flat[1],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vxz(projection=projection, ax=ax.flat[2],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vyx(projection=projection, ax=ax.flat[3],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vyy(projection=projection, ax=ax.flat[4],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vyz(projection=projection, ax=ax.flat[5],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vzx(projection=projection, ax=ax.flat[6],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vzy(projection=projection, ax=ax.flat[7],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_vzz(projection=projection, ax=ax.flat[8],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
fig.tight_layout(pad=0.5)
if fname is not None:
fig.savefig(fname)
return fig, ax
def plot_i0(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None, fname=None):
"""
Plot the first invariant I0 (the trace) of the tensor
I0 = vxx + vyy + vzz
which should be identically zero.
Usage
-----
x.plot_i0([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = 'Tr $V_{ij}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._i0_label
if self.i0 is None:
self.compute_invar()
return self.i0.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_i1(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None, fname=None):
"""
Plot the second invariant I1 of the tensor:
I1 = vxx*vyy + vyy*vzz + vxx*vzz - vxy**2 - vyz**2 - vxz**2
Usage
-----
x.plot_i1([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$I_1$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._i1_label
if self.i1 is None:
self.compute_invar()
return self.i1.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_i2(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None, fname=None):
"""
Plot the third invariant I2 (the determinant) of the tensor:
I2 = vxx*(vyy*vzz - vyz**2) + vxy*(vyz*vxz - vxy*vzz)
+ vxz*(vxy*vyz - vxz*vyy)
Usage
-----
x.plot_i2([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = 'det $V_{ij}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._i2_label
if self.i2 is None:
self.compute_invar()
return self.i2.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_i(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None, fname=None):
"""
Plot the dimensionless quantity I of Pedersen and Rasmussen (1990)
I = -(I2/2)**2 / (I1/3)**3
that is bounded by 0 and 1.
Usage
-----
x.plot_i([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$-(I_2/2)^{2} / (I_1/3)^{3}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._i_label
if self.i is None:
self.compute_invar()
return self.i.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_invar(self, projection=None, tick_interval=[60, 60],
minor_tick_interval=[30, 30], xlabel='',
ylabel='', colorbar='bottom', cmap='viridis',
cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=9,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=8, show=True,
ax=None, fname=None):
"""
Plot the three invariants of the tensor and the derived quantity I.
Usage
-----
x.plot_invar([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, axes_labelsize, tick_labelsize, ax, show,
fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [60, 60]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = ''
Label for the longitude axis.
ylabel : str, optional, default = ''
Label for the latitude axis.
colorbar : str, optional, default = 'bottom'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = None
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
axes_labelsize : int, optional, default = 9
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = 8
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if colorbar is not None:
if colorbar in set(['bottom', 'top']):
scale = 0.8
else:
scale = 0.5
else:
scale = 0.6
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, ax = _plt.subplots(2, 2, figsize=figsize)
self.plot_i0(projection=projection, ax=ax.flat[0],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_i1(projection=projection, ax=ax.flat[1],
tick_interval=tick_interval, cb_offset=cb_offset,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_i2(projection=projection, ax=ax.flat[2],
tick_interval=tick_interval, cb_offset=cb_offset,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_i(projection=projection, ax=ax.flat[3],
tick_interval=tick_interval, cb_offset=cb_offset,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
fig.tight_layout(pad=0.5)
if fname is not None:
fig.savefig(fname)
return fig, ax
def plot_eig1(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the first eigenvalue of the tensor.
Usage
-----
x.plot_eig1([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_1$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eig1_label
if self.eig1 is None:
self.compute_eig()
return self.eig1.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eig2(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the second eigenvalue of the tensor.
Usage
-----
x.plot_eig2([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_2$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eig2_label
if self.eig1 is None:
self.compute_eig()
return self.eig2.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eig3(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None, cb_tick_interval=None,
grid=False, axes_labelsize=None, tick_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, show=True, ax=None,
fname=None):
"""
Plot the third eigenvalue of the tensor.
Usage
-----
x.plot_eig3([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_3$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eig3_label
if self.eig1 is None:
self.compute_eig()
return self.eig3.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eigs(self, projection=None, tick_interval=[60, 60],
minor_tick_interval=[30, 30], xlabel='',
ylabel='', colorbar='bottom', cmap='viridis',
cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=9,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=8, show=True,
ax=None, fname=None):
"""
Plot the three eigenvalues of the tensor.
Usage
-----
x.plot_eigs([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, colorbar, cmap, cmap_limits, cmap_reverse,
cb_triangles, cb_label, cb_ylabel, cb_tick_interval,
cb_minor_tick_interval, cb_offset, cb_width, grid,
axes_labelsize, tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [60, 60]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = ''
Label for the longitude axis.
ylabel : str, optional, default = ''
Label for the latitude axis.
colorbar : str, optional, default = 'bottom'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = None
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
axes_labelsize : int, optional, default = 9
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = 8
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if colorbar is not None:
if colorbar in set(['bottom', 'top']):
scale = 2.3
else:
scale = 1.4
else:
scale = 1.65
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, ax = _plt.subplots(3, 1, figsize=figsize)
self.plot_eig1(projection=projection, ax=ax.flat[0],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_eig2(projection=projection, ax=ax.flat[1],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_eig3(projection=projection, ax=ax.flat[2],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
fig.tight_layout(pad=0.5)
if fname is not None:
fig.savefig(fname)
return fig, ax
def plot_eigh1(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=None,
show=True, ax=None, fname=None):
"""
Plot the first eigenvalue of the horizontal tensor.
Usage
-----
x.plot_eigh1([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize,
tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_{h1}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eigh1_label
if self.eigh1 is None:
self.compute_eigh()
return self.eigh1.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eigh2(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=None,
show=True, ax=None, fname=None):
"""
Plot the second eigenvalue of the horizontal tensor.
Usage
-----
x.plot_eigh2([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize,
tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_{h2}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eigh2_label
if self.eigh1 is None:
self.compute_eigh()
return self.eigh2.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eighh(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=None,
show=True, ax=None, fname=None):
"""
Plot the maximum absolute value eigenvalue of the horizontal tensor.
Usage
-----
x.plot_eighh([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize,
tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\lambda_{hh}$'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if cb_label is None:
cb_label = self._eighh_label
if self.eigh1 is None:
self.compute_eigh()
return self.eighh.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot_eigh(self, projection=None, tick_interval=[60, 60],
minor_tick_interval=[30, 30], xlabel='',
ylabel='', colorbar='bottom', cmap='viridis',
cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label=None,
cb_tick_interval=None, grid=False, axes_labelsize=9,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_offset=None, cb_width=None, tick_labelsize=8, show=True,
ax=None, fname=None):
"""
Plot the two eigenvalues and maximum absolute value eigenvalue of the
horizontal tensor.
Usage
-----
x.plot_eigh([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, colorbar, cmap, cmap_limits, cmap_reverse,
cb_triangles, cb_label, cb_ylabel, cb_tick_interval,
cb_minor_tick_interval, cb_offset, cb_width, grid,
axes_labelsize, tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [60, 60]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = ''
Label for the longitude axis.
ylabel : str, optional, default = ''
Label for the latitude axis.
colorbar : str, optional, default = 'bottom'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = None
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
axes_labelsize : int, optional, default = 9
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = 8
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
"""
if colorbar is not None:
if colorbar in set(['bottom', 'top']):
scale = 2.3
else:
scale = 1.4
else:
scale = 1.65
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, ax = _plt.subplots(3, 1, figsize=figsize)
self.plot_eigh1(projection=projection, ax=ax.flat[0],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_eigh2(projection=projection, ax=ax.flat[1],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
self.plot_eighh(projection=projection, ax=ax.flat[2],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_label=cb_label, cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks, cb_offset=cb_offset,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, tick_labelsize=tick_labelsize,
show=show)
fig.tight_layout(pad=0.5)
if fname is not None:
fig.savefig(fname)
return fig, ax
def to_xarray(self, title='', description='',
comment='pyshtools grid'):
"""
Return all tensor gridded data as an xarray DataSet.
Usage
-----
x.to_xarray([title, description, comment])
Parameters
----------
title : str, optional, default = ''
Title of the dataset.
description : str, optional, default = ''
Description of the dataset ('Remark' in gmt grd files).
comment : str, optional, default = 'pyshtools grid'
Additional information about how the data were generated.
"""
attrs = {'title': title,
'description': description,
'comment': comment,
'nlat': self.nlat,
'nlon': self.nlon,
'lmax': self.lmax,
'lmax_calc': self.lmax_calc,
'sampling': self.sampling,
'grid': self.grid,
'a': self.a,
'f': self.f,
'n': self.n,
'extend': repr(self.extend)
}
if isinstance(self, SHGravTensor):
attrs['gm'] = self.gm
if self.epoch is not None:
attrs['epoch'] = self.epoch
desc = 'gravity tensor component '
else:
if self.year is not None:
attrs['year'] = self.year
desc = 'magnetic field tensor component '
_vxx = self.vxx.to_xarray(title=desc+'(Vxx)', long_name='$V_{xx}$',
units=self._vii_units)
_vxy = self.vxy.to_xarray(title=desc+'(Vxy)', long_name='$V_{xy}$',
units=self._vii_units)
_vxz = self.vxz.to_xarray(title=desc+'(Vxz)', long_name='$V_{xz}$',
units=self._vii_units)
_vyx = self.vyx.to_xarray(title=desc+'(Vyx)', long_name='$V_{yx}$',
units=self._vii_units)
_vyy = self.vyy.to_xarray(title=desc+'(Vyy)', long_name='$V_{yy}$',
units=self._vii_units)
_vyz = self.vyz.to_xarray(title=desc+'(Vyz)', long_name='$V_{yz}$',
units=self._vii_units)
_vzx = self.vzx.to_xarray(title=desc+'(Vzx)', long_name='$V_{zx}$',
units=self._vii_units)
_vzy = self.vzy.to_xarray(title=desc+'(Vzy)', long_name='$V_{zy}$',
units=self._vii_units)
_vzz = self.vzz.to_xarray(title=desc+'(Vzz)', long_name='$V_{zz}$',
units=self._vii_units)
dataset = _xr.Dataset({'vxx': _vxx, 'vxy': _vxy, 'vxz': _vxz,
'vyx': _vyx, 'vyy': _vyy, 'vyz': _vyz,
'vzx': _vzx, 'vzy': _vzy, 'vzz': _vzz},
attrs=attrs)
if self.i0 is not None:
if isinstance(self, SHGravTensor):
desc0 = 'First invariant of the gravity tensor'
desc1 = 'Second invariant of the gravity tensor'
desc2 = 'Third invariant of the gravity tensor'
desc = 'Unitless invariant of the gravity tensor'
else:
desc0 = 'First invariant of the magnetic field tensor'
desc1 = 'Second invariant of the magnetic field tensor'
desc2 = 'Third invariant of the magnetic field tensor'
desc = 'Unitless invariant of the magnetic field tensor'
_i0 = self.i0.to_xarray(title=desc0,
long_name='$I_0$, Tr $V_{ii}$',
units=self._i0_units)
_i1 = self.i1.to_xarray(title=desc1, long_name='$I_1$',
units=self._i1_units)
_i2 = self.i2.to_xarray(title=desc2,
long_name='$I_2$, det $V_{ij}$',
units=self._i2_units)
_i = self.i.to_xarray(title=desc,
long_name='$-(I_2/2)^{2} / ' +
'(I_1/3)^{3}$',
units='none')
dataset['i0'] = _i0
dataset['i1'] = _i1
dataset['i2'] = _i2
dataset['i'] = _i
if self.eig1 is not None:
if isinstance(self, SHGravTensor):
desc1 = 'First eigenvalue of the gravity tensor'
desc2 = 'Second eigenvalue of the gravity tensor'
desc3 = 'Third eigenvalue of the gravity tensor'
else:
desc1 = 'First eigenvalue of the magnetic field tensor'
desc2 = 'Second eigenvalue of the magnetic field tensor'
desc3 = 'Third eigenvalue of the magnetic field tensor'
_eig1 = self.eig1.to_xarray(title=desc1,
long_name='${\lambda}_1$',
units=self._vii_units)
_eig2 = self.eig2.to_xarray(title=desc2,
long_name='${\lambda}_2$',
units=self._vii_units)
_eig3 = self.eig3.to_xarray(title=desc3,
long_name='${\lambda}_3$',
units=self._vii_units)
dataset['eig1'] = _eig1
dataset['eig2'] = _eig2
dataset['eig3'] = _eig3
if self.eighh is not None:
if isinstance(self, SHGravTensor):
desc1 = 'First horizontal eigenvalue of the gravity tensor'
desc2 = 'Second horizontal eigenvalue of the gravity tensor'
desc3 = 'Combined horizontal eigenvalue of the gravity tensor'
else:
desc1 = 'First horizontal eigenvalue of the magnetic ' \
+ 'field tensor'
desc2 = 'Second horizontal eigenvalue of the magnetic ' \
+ 'field tensor'
desc3 = 'Combined horizontal eigenvalue of the magnetic ' \
+ 'field tensor'
_eigh1 = self.eigh1.to_xarray(title=desc1,
long_name='${\lambda}_{h1}$',
units=self._vii_units)
_eigh2 = self.eigh2.to_xarray(title=desc2,
long_name='${\lambda}_{h2}$',
units=self._vii_units)
_eighh = self.eighh.to_xarray(title=desc3,
long_name='${\lambda}_{hh}$',
units=self._vii_units)
dataset['eigh1'] = _eigh1
dataset['eigh2'] = _eigh2
dataset['eighh'] = _eighh
return dataset
class SHGravTensor(Tensor):
"""
Class for the gravity field tensor and eigenvalues. The class is
initialized from a class instance of SHGravCoeffs using the method
tensor().
Attributes:
vxx, vxy, vzz, : The 9 components of the gravity tensor.
vyx, vyy, vyz,
vzx, vzy, vzz
i0, i1, i2, i : The three invariants of the gravity tensor and a
derived quantity that is bounded between 0 and 1.
These are computed by a call to compute_invar().
eig1, eig2, eig3 : The three eigenvalues of the gravity tensor, which are
computed by a call to compute_eig().
eigh1, eigh2, : The horizontal eigenvalues of the gravity tensor, which
eighh are computed by a call to compute_eigh().
gm : The gravitational constant times the mass of the body.
a : Semimajor axis of the reference ellipsoid.
f : Flattening of the reference ellipsoid, f=(a-b)/a.
lmax : The maximum spherical harmonic degree resolvable by the
grids.
lmax_calc : The maximum spherical harmonic degree of the
gravitational potential used in creating the grids.
units : The units of the gridded data.
epoch : The epoch time of the gravity model.
nlat, nlon : The number of latitude and longitude bands in the grids.
n : The number of samples in latitude.
sampling : The longitudinal sampling for Driscoll and Healy grids.
Either 1 for equally sampled grids (nlat=nlon) or 2 for
equally spaced grids in degrees.
extend : True if the grid contains the redundant column for
360 E and the unnecessary row for 90 S.
Methods:
plot() : Plot all 9 components of the gravity tensor.
plot_vxx() : Plot the vxx component of the gravity tensor.
plot_vxy() : Plot the vxy component of the gravity tensor.
plot_vxz() : Plot the vxz component of the gravity tensor.
plot_vyx() : Plot the vyx component of the gravity tensor.
plot_vyy() : Plot the vyy component of the gravity tensor.
plot_vyz() : Plot the vyz component of the gravity tensor.
plot_vzx() : Plot the vzx component of the gravity tensor.
plot_vzy() : Plot the vzy component of the gravity tensor.
plot_vzz() : Plot the vzz component of the gravity tensor.
compute_invar() : Compute the invariants of the gravity tensor.
plot_i0() : Plot the first invariant I0 of the gravity tensor.
plot_i1() : Plot the second invariant I1 of the gravity tensor.
plot_i2() : Plot the third invariant I2 of the gravity tensor.
plot_i() : Plot the derived quantity I = -(I2/2)**2 / (I1/3)**3.
compute_eig() : Compute the three eigenvalues of the gravity tensor.
plot_eig() : Plot the three eigenvalues of the gravity tensor.
plot_eig1() : Plot the first eigenvalue of the gravity tensor.
plot_eig2() : Plot the second eigenvalue of the gravity tensor.
plot_eig3() : Plot the third eigenvalue of the gravity tensor.
compute_eigh() : Compute the horizontal eigenvalues of the gravity tensor.
plot_eigh() : Plot the two horizontal eigenvalues and the combined
maximum absolute eigenvalue of the gravity tensor.
plot_eigh1() : Plot the first horizontal eigenvalue of the gravity
tensor.
plot_eigh2() : Plot the second horizontal eigenvalue of the gravity
tensor.
plot_eighh() : Plot the combined maximum absolute eigenvalue of the
gravity tensor.
to_xarray() : Return an xarray DataSet of all gridded data.
copy() : Return a copy of the class instance.
info() : Print a summary of the data stored in the SHGravTensor
instance.
"""
def __init__(self, vxx, vyy, vzz, vxy, vxz, vyz, gm, a, f, lmax,
lmax_calc, units='Eötvös', epoch=None):
"""
Initialize the SHGravTensor class.
"""
self.vxx = _SHGrid.from_array(vxx, grid='DH', units=units)
self.vyy = _SHGrid.from_array(vyy, grid='DH', units=units)
self.vzz = _SHGrid.from_array(vzz, grid='DH', units=units)
self.vxy = _SHGrid.from_array(vxy, grid='DH', units=units)
self.vxz = _SHGrid.from_array(vxz, grid='DH', units=units)
self.vyz = _SHGrid.from_array(vyz, grid='DH', units=units)
self.vyx = self.vxy
self.vzx = self.vxz
self.vzy = self.vyz
self.grid = self.vxx.grid
self.sampling = self.vxx.sampling
self.nlat = self.vxx.nlat
self.nlon = self.vxx.nlon
self.n = self.vxx.n
self.extend = self.vxx.extend
self.gm = gm
self.a = a
self.f = f
self.lmax = lmax
self.lmax_calc = lmax_calc
self.i0 = None
self.i1 = None
self.i2 = None
self.i = None
self.eig1 = None
self.eig2 = None
self.eig3 = None
self.eigh1 = None
self.eigh2 = None
self.eighh = None
self.units = units
self.epoch = epoch
self._vxx_label = '$V_{xx}$, ' + self.units
self._vxy_label = '$V_{xy}$, ' + self.units
self._vxz_label = '$V_{xz}$, ' + self.units
self._vyx_label = '$V_{yx}$, ' + self.units
self._vyy_label = '$V_{yy}$, ' + self.units
self._vyz_label = '$V_{yz}$, ' + self.units
self._vzx_label = '$V_{zx}$, ' + self.units
self._vzy_label = '$V_{zy}$, ' + self.units
self._vzz_label = '$V_{zz}$, ' + self.units
self._i0_label = 'Tr $V_{ii}$, ' + self.units
self._i1_label = '$I_1$, ' + self.units + '$^2$'
self._i2_label = 'det $V_{ij}$, ' + self.units + '$^3$'
self._i_label = '$-(I_2/2)^{2} / (I_1/3)^{3}$'
self._eig1_label = '$\lambda_1$, ' + self.units
self._eig2_label = '$\lambda_2$, ' + self.units
self._eig3_label = '$\lambda_3$, ' + self.units
self._eigh1_label = '$\lambda_{h1}$, ' + self.units
self._eigh2_label = '$\lambda_{h2}$, ' + self.units
self._eighh_label = '$\lambda_{hh}$, ' + self.units
def __repr__(self):
str = ('grid = {:s}\n'
'nlat = {:d}\n'
'nlon = {:d}\n'
'n = {:d}\n'
'sampling = {:d}\n'
'extend = {}\n'
'lmax = {:d}\n'
'lmax_calc = {:d}\n'
'gm (m3 / s2) = {:e}\n'
'a (m)= {:e}\n'
'f = {:e}\n'
'units = {:s}\n'
'epoch = {:s}'
.format(self.grid, self.nlat, self.nlon, self.n, self.sampling,
self.extend, self.lmax, self.lmax_calc, self.gm, self.a,
self.f, repr(self.units), repr(self.epoch)))
return str
class SHMagTensor(Tensor):
"""
Class for the magnetic field tensor and eigenvalues. The class is
initialized from a class instance of SHMagCoeffs using the method
tensor().
Attributes:
vxx, vxy, vzz, : The 9 components of the magnetic field tensor.
vyx, vyy, vyz,
vzx, vzy, vzz
i0, i1, i2, i : The three invariants of the magnetic field tensor and a
derived quantity that is bounded between 0 and 1.
eig1, eig2, eig3 : The three eigenvalues of the magnetic field tensor,
which are computed by a call to compute_eig().
eigh1, eigh2, : The horizontal eigenvalues of the magnetic field
eighh tensor, which are computed by a call to compute_eigh().
a : Semimajor axis of the reference ellipsoid.
f : Flattening of the reference ellipsoid, f=(a-b)/a.
lmax : The maximum spherical harmonic degree resolvable by the
grids.
lmax_calc : The maximum spherical harmonic degree of the
magnetic potential used in creating the grids.
units : The units of the gridded data.
year : The year of the time-variable magnetic field data.
nlat, nlon : The number of latitude and longitude bands in the grids.
sampling : The longitudinal sampling for Driscoll and Healy grids.
Either 1 for equally sampled grids (nlat=nlon) or 2 for
equally spaced grids in degrees.
extend : True if the grid contains the redundant column for
360 E and the unnecessary row for 90 S.
Methods:
plot() : Plot all 9 components of the magnetic field tensor.
plot_vxx() : Plot the vxx component of the magnetic field tensor.
plot_vxy() : Plot the vxy component of the magnetic field tensor.
plot_vxz() : Plot the vxz component of the magnetic field tensor.
plot_vyx() : Plot the vyx component of the magnetic field tensor.
plot_vyy() : Plot the vyy component of the magnetic field tensor.
plot_vyz() : Plot the vyz component of the magnetic field tensor.
plot_vzx() : Plot the vzx component of the magnetic field tensor.
plot_vzy() : Plot the vzy component of the magnetic field tensor.
plot_vzz() : Plot the vzz component of the magnetic field tensor.
compute_invar() : Compute the invariants of the magnetic field tensor.
plot_i0() : Plot the first invariant I0 of the magnetic field tensor.
plot_i1() : Plot the second invariant I1 of themagnetic field tensor.
plot_i2() : Plot the third invariant I2 of the magnetic field tensor.
plot_i() : Plot the derived quantity I = -(I2/2)**2 / (I1/3)**3.
compute_eig() : Compute the three eigenvalues of the magnetic field
tensor.
plot_eig() : Plot the three eigenvalues of the magnetic field tensor.
plot_eig1() : Plot the first eigenvalue of the magnetic field tensor.
plot_eig2() : Plot the second eigenvalue of the magnetic field tensor.
plot_eig3() : Plot the third eigenvalue of the magnetic field tensor.
compute_eigh() : Compute the horizontal eigenvalues of the magnetic field
tensor.
plot_eigh() : Plot the two horizontal eigenvalues and the combined
maximum absolute eigenvalue of the magnetic field tensor.
plot_eigh1() : Plot the first horizontal eigenvalue of the magnetic
field tensor.
plot_eigh2() : Plot the second horizontal eigenvalue of the magnetic
field tensor.
plot_eighh() : Plot the combined maximum absolute eigenvalue of the
magnetic field tensor.
to_xarray() : Return an xarray DataSet of all gridded data.
copy() : Return a copy of the class instance.
info() : Print a summary of the data stored in the SHMagTensor
instance.
"""
def __init__(self, vxx, vyy, vzz, vxy, vxz, vyz, a, f, lmax,
lmax_calc, units=None, year=None):
"""
Initialize the SHMagTensor class.
"""
self.vxx = _SHGrid.from_array(vxx, grid='DH', units=units)
self.vyy = _SHGrid.from_array(vyy, grid='DH', units=units)
self.vzz = _SHGrid.from_array(vzz, grid='DH', units=units)
self.vxy = _SHGrid.from_array(vxy, grid='DH', units=units)
self.vxz = _SHGrid.from_array(vxz, grid='DH', units=units)
self.vyz = _SHGrid.from_array(vyz, grid='DH', units=units)
self.vyx = self.vxy
self.vzx = self.vxz
self.vzy = self.vyz
self.grid = self.vxx.grid
self.sampling = self.vxx.sampling
self.nlat = self.vxx.nlat
self.nlon = self.vxx.nlon
self.n = self.vxx.n
self.extend = self.vxx.extend
self.a = a
self.f = f
self.lmax = lmax
self.lmax_calc = lmax_calc
self.i0 = None
self.i1 = None
self.i2 = None
self.i = None
self.eig1 = None
self.eig2 = None
self.eig3 = None
self.eigh1 = None
self.eigh2 = None
self.eighh = None
self.units = units
self.year = year
if self.units.lower() == 'nt/m':
self._units_formatted = 'nT m$^{-1}$'
self._i1_units = 'nT$^2$ m$^{-2}$'
self._i2_units = 'nT$^3$ m$^{-3}$'
else:
self._units_formatted = 'T m$^{-1}$'
self._i1_units = 'T$^2$ m$^{-2}$'
self._i2_units = 'T$^3$ m$^{-3}$'
self._vxx_label = '$V_{xx}$, ' + self._units_formatted
self._vxy_label = '$V_{xy}$, ' + self._units_formatted
self._vxz_label = '$V_{xz}$, ' + self._units_formatted
self._vyx_label = '$V_{yx}$, ' + self._units_formatted
self._vyy_label = '$V_{yy}$, ' + self._units_formatted
self._vyz_label = '$V_{yz}$, ' + self._units_formatted
self._vzx_label = '$V_{zx}$, ' + self._units_formatted
self._vzy_label = '$V_{zy}$, ' + self._units_formatted
self._vzz_label = '$V_{zz}$, ' + self._units_formatted
self._i0_label = 'Tr $V_{ii}$, ' + self._units_formatted
self._i1_label = '$I_1$, ' + self._i1_units
self._i2_label = 'det $V_{ij}$, ' + self._i2_units
self._i_label = '$-(I_2/2)^{2} / (I_1/3)^{3}$'
self._eig1_label = '$\lambda_1$, ' + self._units_formatted
self._eig2_label = '$\lambda_2$, ' + self._units_formatted
self._eig3_label = '$\lambda_3$, ' + self._units_formatted
self._eigh1_label = '$\lambda_{h1}$, ' + self._units_formatted
self._eigh2_label = '$\lambda_{h2}$, ' + self._units_formatted
self._eighh_label = '$\lambda_{hh}$, ' + self._units_formatted
def __repr__(self):
str = ('grid = {:s}\n'
'nlat = {:d}\n'
'nlon = {:d}\n'
'n = {:d}\n'
'sampling = {:d}\n'
'extend = {}\n'
'lmax = {:d}\n'
'lmax_calc = {:d}\n'
'a (m)= {:e}\n'
'f = {:e}\n'
'units = {:s}\n'
'year = {:s}'
.format(self.grid, self.nlat, self.nlon, self.n, self.sampling,
self.extend, self.lmax, self.lmax_calc, self.a,
self.f, repr(self.units), repr(self.year)))
return str
|
{"hexsha": "167c9f2314570c1155e94a618557e4d5e0a04fe7", "size": 176595, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyshtools/shclasses/shtensor.py", "max_stars_repo_name": "nephanth/SHTOOLS", "max_stars_repo_head_hexsha": "663d267715639de65f244b1e5ff8826cda0e9c8d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2015-03-11T10:03:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-01T20:00:47.000Z", "max_issues_repo_path": "pyshtools/shclasses/shtensor.py", "max_issues_repo_name": "nephanth/SHTOOLS", "max_issues_repo_head_hexsha": "663d267715639de65f244b1e5ff8826cda0e9c8d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-07-18T15:14:19.000Z", "max_issues_repo_issues_event_max_datetime": "2016-07-18T15:14:19.000Z", "max_forks_repo_path": "pyshtools/shclasses/shtensor.py", "max_forks_repo_name": "nephanth/SHTOOLS", "max_forks_repo_head_hexsha": "663d267715639de65f244b1e5ff8826cda0e9c8d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-11-27T03:14:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-28T01:42:53.000Z", "avg_line_length": 53.2072913528, "max_line_length": 79, "alphanum_fraction": 0.5823890824, "include": true, "reason": "import numpy,from scipy", "num_tokens": 37866}
|
import numpy as np
import pandas as pd
import pytest
import tempfile
import calliope
from . import common
from .common import assert_almost_equal
class TestInitialization:
def test_model_initialization_default(self):
model = calliope.Model()
assert hasattr(model, 'data')
assert hasattr(model, 'config_run')
assert hasattr(model, 'config_model')
assert model.config_run.mode == 'plan'
def test_model_initialization_follow_import_statements(self):
model = calliope.Model()
assert 'techs' in model.config_model
def test_model_initialization_follow_nested_import_statements(self):
model = calliope.Model()
assert 'links' in model.config_model
def test_model_initialization_override_dict(self):
override = {'output.save': True}
with pytest.raises(AssertionError):
calliope.Model(override=override)
def test_model_initialization_override_attrdict(self):
override = calliope.utils.AttrDict({'output': {'save': True}})
model = calliope.Model(override=override)
assert model.config_run.output.save is True
def test_model_initialization_simple_model(self):
common.simple_model()
def test_gettimeres_1hourly(self):
model = common.simple_model()
assert model.get_timeres() == 1
def test_gettimeres_6hourly(self):
path = common._add_test_path('common/t_6h')
model = common.simple_model(path=path)
assert model.get_timeres() == 6
def test_gettimeres_verify_1hourly(self):
model = common.simple_model()
assert model.get_timeres(verify=True) == 1
def test_gettimeres_verify_erroneous(self):
path = common._add_test_path('common/t_erroneous')
model = common.simple_model(path=path)
with pytest.raises(AssertionError):
model.get_timeres(verify=True)
@pytest.fixture
def sine_wave(self):
return pd.DataFrame((np.sin(np.arange(0, 10, 0.1)) + 1.0) * 5/2 + 5)
def test_scale_to_peak_positive(self, sine_wave):
model = common.simple_model()
scaled = model.scale_to_peak(sine_wave, 100)
assert_almost_equal(float(scaled.max()), 100, tolerance=0.01)
assert_almost_equal(float(scaled.min()), 50, tolerance=0.01)
def test_scale_to_peak_negative(self, sine_wave):
model = common.simple_model()
df = sine_wave * -1
scaled = model.scale_to_peak(df, -100)
assert_almost_equal(float(scaled.max()), -50, tolerance=0.01)
assert_almost_equal(float(scaled.min()), -100, tolerance=0.01)
def test_scale_to_peak_scale_time_res_true(self, sine_wave):
path = common._add_test_path('common/t_6h')
model = common.simple_model(path=path)
scaled = model.scale_to_peak(sine_wave, 100)
assert_almost_equal(float(scaled.max()), 600, tolerance=0.1)
assert_almost_equal(float(scaled.min()), 300, tolerance=0.1)
def test_scale_to_peak_scale_time_res_false(self, sine_wave):
path = common._add_test_path('common/t_6h')
model = common.simple_model(path=path)
scaled = model.scale_to_peak(sine_wave, 100, scale_time_res=False)
assert_almost_equal(float(scaled.max()), 100, tolerance=0.1)
assert_almost_equal(float(scaled.min()), 50, tolerance=0.1)
def test_scale_to_peak_positive_and_negative(self, sine_wave):
model = common.simple_model()
df = sine_wave - 6
scaled = model.scale_to_peak(df, 10)
assert_almost_equal(float(scaled.max()), 10, tolerance=0.01)
assert_almost_equal(float(scaled.min()), -2.5, tolerance=0.01)
def test_initialize_parents_defaults(self):
override = """
override:
techs:
bad_tech:
parent: defaults
"""
override = calliope.utils.AttrDict.from_yaml_string(override)
with pytest.raises(calliope.exceptions.ModelError):
model = common.simple_model(override=override)
def test_initialize_sets_timesteps(self):
model = common.simple_model()
daterange = pd.Index(pd.date_range('2005-01-01 00:00', '2005-02-28 23:00', freq='1H'))
assert (model._sets['t'] == daterange).all()
assert model._sets['t'][0].minute == 0
assert model._sets['t'][0].hour == 0
assert model._sets['t'][0].day == 1
assert model._sets['t'][0].month == 1
assert model.data.attrs['time_res'] == 1
assert model.data['_time_res'].to_series().tolist() == [1] * 1416
assert model.data.attrs['startup_time_bounds'] == pd.Timestamp('2005-01-01 12:00')
def test_initialize_sets_timesteps_subset(self):
config_run = """
mode: plan
model: ['{techs}', '{locations}']
subset_t: ['2005-01-02', '2005-01-03']
"""
model = common.simple_model(config_run=config_run)
daterange = pd.Index(pd.date_range('2005-01-02 00:00', '2005-01-03 23:00', freq='1H'))
assert (model._sets['t'] == daterange).all()
assert model._sets['t'][0].minute == 0
assert model._sets['t'][0].hour == 0
assert model._sets['t'][0].day == 2
assert model._sets['t'][0].month == 1
assert model.data.attrs['time_res'] == 1
assert model.data['_time_res'].to_series().tolist() == [1] * 48
assert model.data.attrs['startup_time_bounds'] == pd.Timestamp('2005-01-02 12:00')
def test_initialize_sets_technologies(self):
model = common.simple_model()
y = ['ccgt', 'csp', 'demand_power', 'unmet_demand_power']
assert sorted(model._sets['y']) == y
def test_initialize_sets_technologies_loc_invalid_tech(self):
locations = """
override:
locations:
demand:
techs: ['']
"""
override = calliope.utils.AttrDict.from_yaml_string(locations)
with pytest.raises(calliope.exceptions.ModelError):
model = common.simple_model(override=override)
def test_initialize_sets_technologies_subset(self):
config_run = """
mode: plan
model: ['{techs}', '{locations}']
subset_y: ['ccgt', 'demand_power']
"""
model = common.simple_model(config_run=config_run)
assert sorted(model._sets['y']) == ['ccgt', 'demand_power']
def test_initialize_sets_technologies_too_large_subset(self):
config_run = """
mode: plan
model: ['{techs}', '{locations}']
subset_y: ['ccgt', 'demand_power', 'foo', 'bar']
"""
model = common.simple_model(config_run=config_run)
assert sorted(model._sets['y']) == ['ccgt', 'demand_power']
def test_initialize_sets_carriers(self):
model = common.simple_model()
assert sorted(model._sets['c']) == ['power']
# TODO more extensive tests for carriers
def test_initialize_sets_locations(self):
model = common.simple_model()
assert sorted(model._sets['x']) == ['1', '2', 'demand']
def test_initialize_sets_locations_subset(self):
config_run = """
mode: plan
model: ['{techs}', '{locations}']
subset_x: ['1', 'demand']
"""
model = common.simple_model(config_run=config_run)
assert sorted(model._sets['x']) == ['1', 'demand']
def test_initialize_sets_locations_too_large_subset(self):
config_run = """
mode: plan
model: ['{techs}', '{locations}']
subset_x: ['1', 'demand', 'foo', 'bar']
"""
model = common.simple_model(config_run=config_run)
assert sorted(model._sets['x']) == ['1', 'demand']
def test_initialize_locations_matrix(self):
model = common.simple_model()
cols = ['_level', '_override.ccgt.constraints.e_cap.max',
'_within', 'ccgt', 'csp', 'demand_power',
'unmet_demand_power']
assert sorted(model._locations.columns) == cols
assert (sorted(model._locations.index.tolist())
== ['1', '2', 'demand'])
@pytest.fixture
def model_transmission(self):
locations = """
locations:
demand:
techs: ['demand_power']
1,2:
techs: ['ccgt', 'csp']
links:
1,2:
hvac:
constraints:
e_cap.max: 100
"""
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(locations.encode('utf-8'))
print(f.read())
model = common.simple_model(config_locations=f.name)
return model
def test_initialize_sets_locations_with_transmission(self,
model_transmission):
model = model_transmission
y =['ccgt', 'csp', 'demand_power', 'hvac:1', 'hvac:2']
assert sorted(model._sets['y']) == y
def test_initialize_locations_matrix_with_transmission(self,
model_transmission):
model = model_transmission
cols = ['_level',
'_override.hvac:1.constraints.e_cap.max',
'_override.hvac:2.constraints.e_cap.max',
'_within', 'ccgt', 'csp', 'demand_power',
'hvac:1', 'hvac:2']
locations = model._locations
assert sorted(locations.columns) == cols
assert (sorted(locations.index.tolist())
== ['1', '2', 'demand'])
assert locations.at['1', '_override.hvac:2.constraints.e_cap.max'] == 100
assert np.isnan(locations.at['1', '_override.hvac:1.constraints.e_cap.max'])
assert locations.at['2', '_override.hvac:1.constraints.e_cap.max'] == 100
def test_read_data_supply_r_negative_check(self):
path = common._add_test_path('common/t_positive_demand')
override = ('override.techs.demand_power.'
'constraints.r: file=demand-sin_r.csv')
override = calliope.utils.AttrDict.from_yaml_string(override)
with pytest.raises(AssertionError):
model = common.simple_model(path=path, override=override)
class TestOptions:
def test_get_option(self):
model = common.simple_model()
assert model.get_option('ccgt.constraints.e_cap.max') == 50
def test_get_option_default(self):
model = common.simple_model()
assert model.get_option('ccgt.depreciation.plant_life') == 25
def test_get_option_default_unavailable(self):
model = common.simple_model()
with pytest.raises(calliope.exceptions.OptionNotSetError):
model.get_option('ccgt.depreciation.foo')
def test_get_option_specify_default_inexistent(self):
model = common.simple_model()
assert model.get_option('ccgt.depreciation.foo',
default='ccgt.depreciation.plant_life') == 25
def test_get_option_specify_default_exists_but_false(self):
model = common.simple_model()
assert model.get_option('ccgt.constraints.e_eff_ref',
default='ccgt.depreciation.plant_life') is False
def test_get_option_location(self):
model = common.simple_model()
assert model.get_option('ccgt.constraints.e_cap.max', 'demand') == 50
assert model.get_option('ccgt.constraints.e_cap.max', '1') == 100
def test_get_option_location_default(self):
model = common.simple_model()
assert model.get_option('ccgt.depreciation.plant_life', '1') == 25
def test_get_option_location_default_unavailable(self):
model = common.simple_model()
with pytest.raises(calliope.exceptions.OptionNotSetError):
model.get_option('ccgt.depreciation.foo', '1')
def test_set_option(self):
model = common.simple_model()
with pytest.raises(KeyError): # Ensure that option doesn't exist yet
model.config_model.techs.test.option
model.set_option('test.option', True) # Set option
assert model.config_model.techs.test.option is True # Exists now?
def test_set_get_option(self):
model = common.simple_model()
model.set_option('test.option', 'foo')
assert model.get_option('test.option') == 'foo'
def test_get_set_get_option(self):
model = common.simple_model()
assert model.get_option('ccgt.constraints.e_cap.max') == 50
model.set_option('ccgt.constraints.e_cap.max', 'foo')
assert model.get_option('ccgt.constraints.e_cap.max') == 'foo'
|
{"hexsha": "3b4462d1e784593c20888c2c3da1784cc6cffbb3", "size": 13086, "ext": "py", "lang": "Python", "max_stars_repo_path": "calliope/test/test_core.py", "max_stars_repo_name": "sjpfenninger/calliope", "max_stars_repo_head_hexsha": "a4e49c3b7d37f908bafc84543510eec0b4cf5d9f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-11T15:50:16.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-11T15:50:16.000Z", "max_issues_repo_path": "calliope/test/test_core.py", "max_issues_repo_name": "mhdella/calliope", "max_issues_repo_head_hexsha": "a4e49c3b7d37f908bafc84543510eec0b4cf5d9f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "calliope/test/test_core.py", "max_forks_repo_name": "mhdella/calliope", "max_forks_repo_head_hexsha": "a4e49c3b7d37f908bafc84543510eec0b4cf5d9f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-11T15:50:18.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-11T15:50:18.000Z", "avg_line_length": 41.6751592357, "max_line_length": 94, "alphanum_fraction": 0.6053797952, "include": true, "reason": "import numpy", "num_tokens": 2936}
|
#include "functions/transpose.hh"
#include <boost/test/unit_test.hpp>
#include "data/matrix.hh"
BOOST_AUTO_TEST_CASE(tranpose_test) {
using namespace manifolds;
auto m1 = GetRowMatrix<3, 2>(1, 2, 3, 4, 5, 6);
auto m2 = GetColMatrix<2, 3>(1, 2, 3, 4, 5, 6);
auto check = GetMatrix<2, 3>(1, 3, 5, 2, 4, 6);
BOOST_CHECK_EQUAL(check, m2);
BOOST_CHECK_EQUAL(transpose(m1), m2);
}
|
{"hexsha": "578ce7edc5f07a450cc6ee21c21605a1dac37fb6", "size": 388, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "functions/tests/test_transpose.cpp", "max_stars_repo_name": "GuylainGreer/manifolds", "max_stars_repo_head_hexsha": "96f996f67fc523c726f2edbc9705125c212bedae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "functions/tests/test_transpose.cpp", "max_issues_repo_name": "GuylainGreer/manifolds", "max_issues_repo_head_hexsha": "96f996f67fc523c726f2edbc9705125c212bedae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "functions/tests/test_transpose.cpp", "max_forks_repo_name": "GuylainGreer/manifolds", "max_forks_repo_head_hexsha": "96f996f67fc523c726f2edbc9705125c212bedae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8461538462, "max_line_length": 49, "alphanum_fraction": 0.675257732, "num_tokens": 152}
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from keras.layers import Input, Dense, concatenate
from keras.layers.recurrent import GRU
from keras.utils import plot_model
from keras.models import Model, load_model
from keras.callbacks import ModelCheckpoint
import keras
import pandas as pd
import numpy as np
import keras.backend as K
from keras.utils import to_categorical
from keras.losses import categorical_crossentropy
from multiprocessing import Pool, cpu_count
import pickle
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# In[2]:
dataset = "cb12/"
path = "../../data/"
interim_path = path + dataset + "interim/"
processed_path = path + dataset + "processed/"
model_path = "models/"
model_path_valid = "models/valid/"
# In[3]:
def TOP1(y_true, y_pred):
y1 = y_pred * y_true
y2 = K.sum(y1, axis=1)[:, np.newaxis]
y3 = y_true - y1
return (K.sum(K.sigmoid(y_pred - y2)) + y3 * y3) / tf.cast(tf.shape(y_true)[0], tf.float32)
loss = TOP1
def create_prnn_model(left_input_size, right_input_size, batch_size = 512, hidden_units = 100, o_activation='softmax', lr = 0.001):
emb_size = 50
size = emb_size
# left input - item vector
input_left = Input(batch_shape=(batch_size, 1, left_input_size), name='input_left')
gru_left, gru_left_states = GRU(hidden_units, stateful=True, return_state=True, name='gru_left')(input_left)
# right input - feature vector
input_right = Input(batch_shape=(batch_size, 1, right_input_size), name='input_right')
gru_right, gru_right_states = GRU(hidden_units, stateful=True, return_state=True, name='gru_right')(input_right)
# merging both layers and creating the model
merged = concatenate([gru_left, gru_right])
#change softmax per another activation funciton?
output = Dense(left_input_size, activation=o_activation, name='output')(merged)
model = Model(inputs=[input_left, input_right], outputs=output, name='gru4rec')
encoder = Model(inputs=[input_left, input_right], outputs=merged)
# define model's optimizer
#optimizer = optim.Optimizer(optimizer=self.optimizer, lr=self.lr)
#opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
opt = keras.optimizers.Adagrad(lr=lr)
# define model's loss function --> implement here the top1 loss function
# loss_function = loss.LossFunction(loss_type=self.loss_function)
#model.compile(loss=loss_function, optimizer=opt, metrics=['accuracy'])
model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
filepath = model_path_valid + 'prnn_cb12_checkpoint.h5'
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=2, save_best_only=True, mode='min')
callbacks_list = []
model.summary()
#plot_model(model, show_shapes=True, to_file='rnn-structure.png')
return model, encoder
def get_states(model):
#return the actual states of the layers
return [K.get_value(s) for s,_ in model.state_updates]
def freeze_layer(model, layer_name, lr):
if layer_name == 'gru_left':
# gru left layer will not be trained this mini batch
model.get_layer(layer_name).trainable = False
# but gru right will
model.get_layer('gru_right').trainable = True
elif layer_name == 'gru_right':
# gru right layer will not be trained this mini batch
model.get_layer(layer_name).trainable = False
# but gru left will
model.get_layer('gru_left').trainable = True
else:
raise NotImplementedError
# opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
opt = keras.optimizers.Adagrad(lr=lr)
model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
return model
# In[4]:
train_path = '../../data/' + dataset + 'processed/valid_train_14d.csv'
train = pd.read_csv(train_path, sep='\t')[['session_id', 'item_id', 'created_at']]
interactions = pd.read_csv('../../data/' + dataset + 'interim/interactions.csv', header=0, sep='\t')
items = pd.read_csv('../../data/' + dataset + 'interim/items.csv', header=0, sep='\t')
view_fields = ["item_id", "state", "ReqTopic", "DescTopic", "TitTopic"]
common_items = items.merge(interactions, on=['item_id'])[view_fields].drop_duplicates()
print(common_items.head(3))
## to test - delete after
#train = train.head(6)
#print(train.session_id)
#common_items = common_items.merge(train, on=['item_id'])[view_fields].drop_duplicates()
## delete
item_count = len(train['item_id'].unique())
session_count = len(train['created_at'].unique())
print(len(common_items))
# common_items.head(10)
# In[5]:
# CareerBuilder12 items need to be converted to dummies
common = common_items
common["item_id"] = common["item_id"].astype('str')
common["DescTopic"] = common["DescTopic"].astype('str')
common["TitTopic"] = common["TitTopic"].astype('str')
common["ReqTopic"] = common["ReqTopic"].astype('str')
df2 = pd.DataFrame(index=common.index)
s1 = pd.get_dummies(common["state"].fillna("").str.split(",").apply(pd.Series).stack(), prefix="state").sum(level=0)
df2 = pd.concat([df2, s1], axis=1)
s1 = pd.get_dummies(common["ReqTopic"].fillna("").str.split(",").apply(pd.Series).stack(), prefix="ReqTopic").sum(level=0)
df2 = pd.concat([df2, s1], axis=1)
df2 = df2.drop(["state_", "ReqTopic_"], axis=1, errors="ignore")
s1 = pd.get_dummies(common["DescTopic"].fillna("").str.split(",").apply(pd.Series).stack(), prefix="DescTopic").sum(level=0)
df2 = pd.concat([df2, s1], axis=1)
s1 = pd.get_dummies(common["TitTopic"].fillna("").str.split(",").apply(pd.Series).stack(), prefix="TitTopic").sum(level=0)
df2 = pd.concat([df2, s1], axis=1)
df2 = df2.drop(["DescTopic_", "TitTopic_"], axis=1, errors="ignore")
common = common.drop(["state", "ReqTopic", "DescTopic", "TitTopic"], axis=1)
df2 = pd.concat([common, df2], axis=1)
print(df2.head(2))
one_hot = df2
print(one_hot.shape)
# number of content features per item
feature_size = one_hot.shape[1] - 1
item_encodings = {}
for index, row in one_hot.iterrows():
item_id = str(row["item_id"])
item_encodings[item_id] = row.values[1:]
print(len(item_encodings))
# In[6]:
print(feature_size)
print(item_count)
print(len(common.item_id.unique()))
for value in common.item_id.unique():
print(value)
print(item_encodings[value])
print(type(item_encodings[value]))
break
empty_feature_vec = np.zeros(feature_size, dtype=int)
# In[9]:
class SessionDataset:
"""Credit to yhs-968/pyGRU4REC."""
def __init__(self, data, sep='\t', session_key='session_id', item_key='item_id', time_key='created_at', n_samples=-1, itemmap=None, time_sort=False):
"""
Args:
path: path of the csv file
sep: separator for the csv
session_key, item_key, time_key: name of the fields corresponding to the sessions, items, time
n_samples: the number of samples to use. If -1, use the whole dataset.
itemmap: mapping between item IDs and item indices
time_sort: whether to sort the sessions by time or not
"""
self.df = data
self.session_key = session_key
self.item_key = item_key
self.time_key = time_key
self.time_sort = time_sort
self.add_item_indices(itemmap=itemmap)
self.df.sort_values([session_key, time_key], inplace=True)
# Sort the df by time, and then by session ID. That is, df is sorted by session ID and
# clicks within a session are next to each other, where the clicks within a session are time-ordered.
self.click_offsets = self.get_click_offsets()
#array of the positions where there is a change of session.
#len = len(session_idx_arr) + 1
self.session_idx_arr = self.order_session_idx()
#array of sessions [0 1 2 3 4 .... n-1]
def get_click_offsets(self):
"""
Return the offsets of the beginning clicks of each session IDs,
where the offset is calculated against the first click of the first session ID.
"""
offsets = np.zeros(self.df[self.session_key].nunique() + 1, dtype=np.int32)
# group & sort the df by session_key and get the offset values
offsets[1:] = self.df.groupby(self.session_key).size().cumsum()
return offsets
def order_session_idx(self):
""" Order the session indices """
if self.time_sort:
# starting time for each sessions, sorted by session IDs
sessions_start_time = self.df.groupby(self.session_key)[self.time_key].min().values
# order the session indices by session starting times
session_idx_arr = np.argsort(sessions_start_time)
else:
session_idx_arr = np.arange(self.df[self.session_key].nunique())
return session_idx_arr
def add_item_indices(self, itemmap=None):
"""
Add item index column named "item_idx" to the df
Args:
itemmap (pd.DataFrame): mapping between the item Ids and indices
"""
if itemmap is None:
item_ids = self.df[self.item_key].unique() # unique item ids
item2idx = pd.Series(data=np.arange(len(item_ids)),
index=item_ids)
itemmap = pd.DataFrame({self.item_key:item_ids,
'item_idx':item2idx[item_ids].values})
self.itemmap = itemmap
self.df = pd.merge(self.df, self.itemmap, on=self.item_key, how='inner')
@property
def items(self):
return self.itemmap.item_id.unique()
# In[10]:
class SessionDataLoader:
"""Credit to yhs-968/pyGRU4REC."""
def __init__(self, dataset, batch_size):
"""
A class for creating session-parallel mini-batches.
Args:
dataset (SessionDataset): the session dataset to generate the batches from
batch_size (int): size of the batch
"""
self.dataset = dataset
self.batch_size = batch_size
self.done_sessions_counter = 0
def __iter__(self):
""" Returns the iterator for producing session-parallel training mini-batches.
Yields:
input (B,): Item indices that will be encoded as one-hot vectors later.
target (B,): a Variable that stores the target item indices
masks: Numpy array indicating the positions of the sessions to be terminated
"""
df = self.dataset.df
session_key='session_id'
item_key='item_id'
time_key='created_at'
self.n_items = df[item_key].nunique()
click_offsets = self.dataset.click_offsets
#print(click_offsets)
session_idx_arr = self.dataset.session_idx_arr
#print(session_idx_arr)
iters = np.arange(self.batch_size)
#iters = np.arange(1)
maxiter = iters.max()
start = click_offsets[session_idx_arr[iters]]
end = click_offsets[session_idx_arr[iters] + 1]
#print(start)
#print(end)
mask = [] # indicator for the sessions to be terminated
finished = False
while not finished:
#minimum lenght of all the sessions
minlen = (end - start).min()
# Item indices (for embedding) for clicks where the first sessions start
idx_target = df.item_idx.values[start]
for i in range(minlen - 1):
# Build inputs & targets
idx_input = idx_target
idx_target = df.item_idx.values[start + i + 1]
inp = idx_input
target = idx_target
yield inp, target, mask
# click indices where a particular session meets second-to-last element
start = start + (minlen - 1)
# see if how many sessions should terminate
mask = np.arange(len(iters))[(end - start) <= 1]
self.done_sessions_counter = len(mask)
for idx in mask:
maxiter += 1
if maxiter >= len(click_offsets) - 1:
finished = True
break
# update the next starting/ending point
iters[idx] = maxiter
start[idx] = click_offsets[session_idx_arr[maxiter]]
end[idx] = click_offsets[session_idx_arr[maxiter] + 1]
# # Hyperparameter definitions
# In[12]:
batch_size = 512
acts = ['softmax', 'tanh']
l_sizes = [100, 1000]
lrs = [0.001, 0.01]
# # Predict for hyperparameters
# In[ ]:
import keras.losses
keras.losses.TOP1 = TOP1
pd.set_option('display.max_colwidth', -1)
train_dataset = SessionDataset(train)
loader = SessionDataLoader(train_dataset, batch_size=batch_size)
def predict_function(sid, test_session, pr, item_idx_map, idx_item_map, cut_off=20,
session_key='session_id', item_key='item_id', time_key='created_at'):
test_session.sort_values([time_key], inplace=True)
# get first and only session_id (as we grouped it before calling this method)
session_id = test_session[session_key].unique()[0]
log_columns = ["session_id", "input_items", "input_count", "position", "remaining_items", "remaining_count", "predictions"]
log_df = pd.DataFrame(columns = log_columns)
session_length = len(test_session)
il = a = np.zeros((batch_size, 1, len(item_idx_map)))
ir = a = np.zeros((batch_size, 1, 115))
for i in range(session_length -1):
# use current item as reference point (rest is for testing)
current_item_id = test_session[item_key].values[i]
item_vec = np.zeros(len(item_idx_map), dtype=int)
item_idx = item_idx_map[current_item_id]
item_vec[item_idx] = 1
# set vector in batch input
il[i, 0] = item_vec
#item_features = item_encodings[current_item_id]
# use empty feature vec if missing
item_features = empty_feature_vec
if current_item_id in item_encodings.keys():
item_features = item_encodings[result]
#item_features = item_features.reshape(1,1, len(item_features))
ir[i, 0] = item_features
# do batch prediction
pred = model.predict([il, ir], batch_size=batch_size)
# for every subsession prediction
for i in range(session_length-1):
preds = pred[i]
topn_idx_preds = preds.argsort()[-cut_off:][::-1]
predictions = []
# for every recommended item index
for item_idx in topn_idx_preds:
pred_item = idx_item_map[item_idx]
predictions.append(pred_item)
current_input_set = test_session[item_key].values[:i+1]
remaining_test_set = test_session[item_key].values[i+1:]
position = "MID"
if i == 0:
position = "FIRST"
if len(remaining_test_set) == 1:
position = "LAST"
log_df = log_df.append({
"session_id": sid,
"input_items": ','.join(map(str, current_input_set)),
"input_count": len(current_input_set),
"position": position,
"remaining_items": ','.join(map(str, remaining_test_set)),
"remaining_count": len(remaining_test_set),
"predictions": ','.join(map(str, predictions))
}, ignore_index=True)
log_df['input_count'] = log_df['input_count'].astype(int)
log_df['remaining_count'] = log_df['remaining_count'].astype(int)
return log_df
test_path = '../../data/' + dataset + 'processed/valid_test_14d.csv'
test = pd.read_csv(test_path, sep='\t')[['session_id', 'item_id', 'created_at']]
test_dataset = SessionDataset(test)
test_generator = SessionDataLoader(test_dataset, batch_size=batch_size)
session_groups = test.groupby("session_id")
mapitem = loader.dataset.itemmap
item_idx_map = {}
idx_item_map = {}
for index, row in mapitem.iterrows():
item_id = row["item_id"]
item_idx = row["item_idx"]
item_idx_map[item_id] = item_idx
idx_item_map[item_idx] = item_id
predict_path = "../../data/cb12/interim/predict/hyperparam/"
for act in acts:
for ls in l_sizes:
for lr in lrs:
model_name = "cb12_prnn_a_" + act + "_ls_" + str(ls) + "_lr_" + str(lr) + ".model"
model = pickle.load(open(model_path_valid + model_name, 'rb'))
print("Loaded: " + model_name)
res_list = []
# predict
report_freq = len(session_groups) // 5
count = 0
for sid, session in session_groups:
pred_df = predict_function(sid, session, model, item_idx_map, idx_item_map)
res_list.append(pred_df)
# reset states
model.get_layer('gru_left').reset_states()
model.get_layer('gru_right').reset_states()
# print progress
count += 1
if count % report_freq == 0:
print("Predicted for " + str(count) + " sessions. " + str(len(session_groups) - count) + " sessions to go." )
# concat results
res = pd.concat(res_list)
res = res.reindex(columns = ["session_id", "input_items", "input_count", "position", "remaining_items", "remaining_count", "predictions"])
store_name = model_name.replace("cb12_", "").replace(".model", "")
res.to_csv(predict_path + "test_14d_" + store_name + ".csv", sep='\t')
print("Stored predictions: " + predict_path + "test_14d_" + store_name + ".csv")
|
{"hexsha": "cdb6ef3090bfc1b9dd21862cda58aa974b9762c7", "size": 17890, "ext": "py", "lang": "Python", "max_stars_repo_path": "ipython/3_Training_Predicting/prnn_cb12_pred_hyp.py", "max_stars_repo_name": "samuelru/session-knn-ae", "max_stars_repo_head_hexsha": "c6232667dbe57f82391d487875b52f651ca08a21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-12-08T12:58:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-29T23:52:03.000Z", "max_issues_repo_path": "ipython/3_Training_Predicting/prnn_cb12_pred_hyp.py", "max_issues_repo_name": "samuelru/session-knn-ae", "max_issues_repo_head_hexsha": "c6232667dbe57f82391d487875b52f651ca08a21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-20T14:52:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T08:43:11.000Z", "max_forks_repo_path": "ipython/3_Training_Predicting/prnn_cb12_pred_hyp.py", "max_forks_repo_name": "samuelru/session-knn-ae", "max_forks_repo_head_hexsha": "c6232667dbe57f82391d487875b52f651ca08a21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-12-08T13:09:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-04T04:53:51.000Z", "avg_line_length": 36.2880324544, "max_line_length": 153, "alphanum_fraction": 0.6367244271, "include": true, "reason": "import numpy", "num_tokens": 4325}
|
"""
This contains classes used for analyzing the sentiments of input texts
"""
import re
import pprint
import shelve
# import IOMDataService as DS
# from TextFiltration import Sentences, Words, Lemmatized, Bigrams, Trigrams
import numpy as np
from senti_classifier import senti_classifier
import nltk
from nltk.corpus import sentiwordnet as swn
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
from nltk.tokenize import wordpunct_tokenize
class SentiSynsetTools(object):
"""
Tools for loading and working with SentiWordNet stuff
"""
def load_senti_synsets_for_word(self, word):
"""
Get a list of senti_synsets for the word
Args:
word: String to lookup
Returns:
List of senti_synsets
Example:
input: slow
result:
SentiSynset('decelerate.v.01'),
SentiSynset('slow.v.02'),
SentiSynset('slow.v.03'),
SentiSynset('slow.a.01'),
SentiSynset('slow.a.02'),
SentiSynset('slow.a.04'),
SentiSynset('slowly.r.01'),
SentiSynset('behind.r.03')]
"""
return list(swn.senti_synsets('slow'))
def get_scores_from_senti_synset(self, string_name_of_synset, return_format=tuple):
"""
Args:
string_name_of_synset: The string name of the synset that want scores for
return_format: What kind of object to return. Allowed values are tuple, dict
Returns:
On default of tuple returns (positiveScore, negativeScore, objScore)
"""
breakdown = swn.senti_synset(string_name_of_synset)
if return_format is tuple:
return (breakdown.pos_score(), breakdown.neg_score(), breakdown.obj_score())
elif return_format is dict:
return {
'posScore': breakdown.pos_score(),
'negScore': breakdown.neg_score(),
'objScore': breakdown.obj_score()
}
class DisambiguationTools(object):
"""
"""
def disambiguate_word_senses(self, sentence, word):
"""
Attempts to determine the proper sense of the target
word from the sentence in which it appears.
Args:
sentence: String representation of the sentence
word: String represtnation of word
Returns:
Returns a synset which is the best guess.
Example:
disambiguateWordSenses('A cat is a good pet', 'cat')
OUT: Synset('cat.v.01')
"""
wordsynsets = wn.synsets(word)
bestScore = 0.0
result = None
for synset in wordsynsets:
for w in nltk.word_tokenize(sentence):
score = 0.0
for wsynset in wn.synsets(w):
sim = wn.path_similarity(wsynset, synset)
if(sim == None):
continue
else:
score += sim
if (score > bestScore):
bestScore = score
result = synset
return result
class TextPrepare(object):
"""
All tools for preparing text for processing
"""
def __init__(self):
self.stop_words = set(stopwords.words('english'))
self.stop_words.update(['.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}']) # remove it if you need punctuation
def prepare_text(self, tweet_text):
"""
Returns a bag of words
Prospective
Remove emoticons
:param tweet_text:
:return: list
"""
return [i.lower() for i in wordpunct_tokenize(tweet_text) if i.lower() not in self.stop_words]
class ComputeSentiments(object):
"""
"""
def __init__(self):
self.text_preparer = TextPrepare()
self.disambiguator = DisambiguationTools()
self.sentitools = SentiSynsetTools()
def compute_sentiments(self, tweet_text):
"""
:param tweet_text:
:return:
"""
tokens = self.text_preparer.prepare_text(tweet_text)
for word in tokens:
best_synset = self.disambiguator.disambiguate_word_senses(word, tweet_text)
# Compute the scores
scores_tuple = self.sentitools.get_scores_from_senti_synset(best_synset)
class ItemSentimentAnalyzer(object):
"""
This analyzes and returns the sentiment scores for a particular item
"""
def __init__(self):
pass
# DS.IOMService.__init__(self)
def computeSentimentScores(self, record, tokenizer):
"""
record is a dict which must have record['quote_text']. It normally should have record['quote_id'] or record['vin_id']
tokenizer is a tokenizer with a tokenize method. The unit of analysis (e.g., word, ngram, sentence) is determined by the tokenizer passed in
"""
self.text = record['quote_text']
# To allow this to be used with arbitrary inputs
try:
self.quoteID = record['quote_id']
except:
try:
self.quoteID = record['vin_id']
except:
# Make random ID if none exists
self.quoteID = 'ID' + str(np.random.rand())
# Tokenize the text into the appropriate units
self.tokens = tokenizer.tokenize(self.text)
# Calc number of tokens in the record
self.numTokens = len(self.tokens)
# Calc sentiment scores
self.pos_score, self.neg_score = senti_classifier.polarity_scores(self.tokens)
# Averages are needed because otherwise the score will vary with number of sentences
# Average positive sentiment score of the record
self.avgPos = self.pos_score / self.numTokens
# Average negative sentiment of the record
self.avgNeg = (self.neg_score / self.numTokens) * -1
# Net average sentiment of the record
self.netSent = self.avgPos + self.avgNeg
# Objectivity score (from chris potts )
self.obj_score = 1.0 - self.netSent
# Put the results in a dictionary
self.scores = dict(quoteID=self.quoteID, avgPos=self.avgPos, avgNeg=self.avgNeg, netSent=self.netSent)
return self.scores
#def makeDict(self):
# """
# Makes a dictionary for the result
# Keys: quote_id, avgPos, avgNeg, netSent
# """
# self.result_dict = dict(quote_id=self.quote_id, avgPos=self.avgPos, avgNeg=self.avgNeg, netSent=self.netSent)
# return self.result_dict
def saveSentiments(self, filepath):
"""
Saves the results
Args:
filepath: the path to the shelve file where the data is / is to be stored
"""
#self.makeDict()
self.to_save = self.scores
self.save_sentiment_data_to_file(filepath)
class GroupSentiments:
"""
This is used to compute the sentiment scores for a group of items
"""
def __init__(self, data, groupname):
"""
Args:
data: a list of dictionaries that have been prepared by ItemSentiments to be saved
groupname: the name that the result will be stored with/ or the name to retrieve
"""
self.name = groupname
#self.datafile = datafile
self.quoteIDs = []
self.avgPos = []
self.avgNeg = []
self.netSent = []
for d in data:
self.quoteIDs.append(d['quote_id'])
self.avgPos.append(d['avgPos'])
self.avgNeg.append(d['avgNeg'])
self.netSent.append(d['netSent'])
self.overallpos = np.average(self.avgPos)
self.overallneg = np.average(self.avgNeg)
self.overallsent = np.average(self.netSent)
def saveSentiments(self, filepath):
"""
Saves the results
@param filepath The path to the saved data or to where it should be saved
@type string
"""
self.sentiments = dict(name=self.name, overallpos=self.overallpos, overallneg=self.overallneg,
overallsent=self.overallsent)
db = shelve.open(filepath)
db[str(self.sentiments['name'])] = self.sentiments
db.close()
print(self.sentiments)
class MultiItemSentimentAnalyzer(ItemSentimentAnalyzer):
def __init__(self, data_to_analyze, tokenizer, filepath, label):
"""
@param data_to_analyze List of dictionaries with items that itemsentimentanalzer can operate on
@type list
"""
ItemSentimentAnalyzer.__init__(self)
self.to_save = []
for record in data_to_analyze:
self.computeSentimentScores(record, tokenizer)
self.to_save.append(self.scores)
self.save_sentiment_data_to_file(filepath, label)
|
{"hexsha": "1a6dabe5888941644a552c39ff6e5464a6927692", "size": 8919, "ext": "py", "lang": "Python", "max_stars_repo_path": "SentimentTools/SentimentAnalysis.py", "max_stars_repo_name": "AdamSwenson/TwitterProject", "max_stars_repo_head_hexsha": "8c5dc7a57eac611b555058736d609f2f204cb836", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SentimentTools/SentimentAnalysis.py", "max_issues_repo_name": "AdamSwenson/TwitterProject", "max_issues_repo_head_hexsha": "8c5dc7a57eac611b555058736d609f2f204cb836", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-03-24T17:34:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-13T20:14:34.000Z", "max_forks_repo_path": "SentimentTools/SentimentAnalysis.py", "max_forks_repo_name": "AdamSwenson/TwitterProject", "max_forks_repo_head_hexsha": "8c5dc7a57eac611b555058736d609f2f204cb836", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.96875, "max_line_length": 148, "alphanum_fraction": 0.6025339164, "include": true, "reason": "import numpy", "num_tokens": 1979}
|
#include <mpi.h>
#include <sys/time.h>
#include <iostream>
#include <functional>
#include <algorithm>
#include <vector>
#include <string>
#include <sstream>
#ifdef THREADED
#ifndef _OPENMP
#define _OPENMP
#endif
#include <omp.h>
#endif
// These macros should be defined before stdint.h is included
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
#endif
#include <stdint.h>
#ifdef NOTR1
#include <boost/tr1/memory.hpp>
#else
#include <tr1/memory>
#endif
//#include "include/pat_api.h"
double cblas_alltoalltime;
double cblas_allgathertime;
#ifdef _OPENMP
int cblas_splits = omp_get_max_threads();
#else
int cblas_splits = 1;
#endif
#include "../SpTuples.h"
#include "../SpDCCols.h"
#include "../SpParMat.h"
#include "../FullyDistVec.h"
#include "../FullyDistSpVec.h"
#include "../ParFriends.h"
#include "../DistEdgeList.h"
#define ITERS 16
#define EDGEFACTOR 16
using namespace std;
// 64-bit floor(log2(x)) function
// note: least significant bit is the "zeroth" bit
// pre: v > 0
unsigned int highestbitset(uint64_t v)
{
// b in binary is {10,1100, 11110000, 1111111100000000 ...}
const uint64_t b[] = {0x2ULL, 0xCULL, 0xF0ULL, 0xFF00ULL, 0xFFFF0000ULL, 0xFFFFFFFF00000000ULL};
const unsigned int S[] = {1, 2, 4, 8, 16, 32};
int i;
unsigned int r = 0; // result of log2(v) will go here
for (i = 5; i >= 0; i--)
{
if (v & b[i]) // highestbitset is on the left half (i.e. v > S[i] for sure)
{
v >>= S[i];
r |= S[i];
}
}
return r;
}
template <class T>
bool from_string(T & t, const string& s, std::ios_base& (*f)(std::ios_base&))
{
istringstream iss(s);
return !(iss >> f >> t).fail();
}
template <typename PARMAT>
void Symmetricize(PARMAT & A)
{
// boolean addition is practically a "logical or"
// therefore this doesn't destruct any links
PARMAT AT = A;
AT.Transpose();
A += AT;
}
int main(int argc, char* argv[])
{
MPI::Init(argc, argv);
//MPI::COMM_WORLD.Set_errhandler ( MPI::ERRORS_THROW_EXCEPTIONS );
int nprocs = MPI::COMM_WORLD.Get_size();
int myrank = MPI::COMM_WORLD.Get_rank();
if(argc < 3)
{
if(myrank == 0)
{
cout << "Usage: ./Graph500 <Auto,Force,Input> <Available RAM in MB (per core) | Scale Forced | Input Name>" << endl;
cout << "Example: ./Graph500 Auto 1024" << endl;
}
MPI::Finalize();
return -1;
}
{
typedef SelectMaxSRing<bool, int64_t> SR;
typedef SpParMat < int64_t, bool, SpDCCols<int64_t,bool> > PSpMat_Bool;
typedef SpParMat < int64_t, int, SpDCCols<int64_t,int> > PSpMat_Int;
typedef SpParMat < int64_t, int64_t, SpDCCols<int64_t,int64_t> > PSpMat_Int64;
typedef SpParMat < int32_t, int32_t, SpDCCols<int32_t,int32_t> > PSpMat_Int32;
// Declare objects
PSpMat_Bool A;
FullyDistVec<int64_t, int64_t> degrees; // degrees of vertices (including multi-edges and self-loops)
FullyDistVec<int64_t, int64_t> nonisov; // id's of non-isolated (connected) vertices
unsigned scale;
OptBuf<int64_t, int64_t> optbuf;
bool scramble = false;
if(string(argv[1]) == string("Input")) // input option
{
ifstream input(argv[2]);
A.ReadDistribute(input, 0); // read it from file
SpParHelper::Print("Read input");
PSpMat_Int64 * G = new PSpMat_Int64(A);
G->Reduce(degrees, Row, plus<int64_t>(), static_cast<int64_t>(0)); // identity is 0
delete G;
Symmetricize(A); // A += A';
FullyDistVec<int64_t, int64_t> * ColSums = new FullyDistVec<int64_t, int64_t>(A.getcommgrid(), 0);
A.Reduce(*ColSums, Column, plus<int64_t>(), static_cast<int64_t>(0)); // plus<int64_t> matches the type of the output vector
nonisov = ColSums->FindInds(bind2nd(greater<int64_t>(), 0)); // only the indices of non-isolated vertices
delete ColSums;
A = A(nonisov, nonisov);
}
else if(string(argv[1]) == string("Binary"))
{
uint64_t n, m;
from_string(n,string(argv[3]),std::dec);
from_string(m,string(argv[4]),std::dec);
ostringstream outs;
outs << "Reading " << argv[2] << " with " << n << " vertices and " << m << " edges" << endl;
SpParHelper::Print(outs.str());
DistEdgeList<int64_t> * DEL = new DistEdgeList<int64_t>(argv[2], n, m);
SpParHelper::Print("Read binary input to distributed edge list\n");
PermEdges(*DEL);
SpParHelper::Print("Permuted Edges\n");
RenameVertices(*DEL);
//DEL->Dump32bit("graph_permuted");
SpParHelper::Print("Renamed Vertices\n");
// conversion from distributed edge list, keeps self-loops, sums duplicates
PSpMat_Int64 * G = new PSpMat_Int64(*DEL, false);
delete DEL; // free memory before symmetricizing
SpParHelper::Print("Created Int64 Sparse Matrix\n");
G->Reduce(degrees, Row, plus<int64_t>(), static_cast<int64_t>(0)); // Identity is 0
A = PSpMat_Bool(*G); // Convert to Boolean
delete G;
int64_t removed = A.RemoveLoops();
ostringstream loopinfo;
loopinfo << "Converted to Boolean and removed " << removed << " loops" << endl;
SpParHelper::Print(loopinfo.str());
A.PrintInfo();
FullyDistVec<int64_t, int64_t> * ColSums = new FullyDistVec<int64_t, int64_t>(A.getcommgrid(), 0);
FullyDistVec<int64_t, int64_t> * RowSums = new FullyDistVec<int64_t, int64_t>(A.getcommgrid(), 0);
A.Reduce(*ColSums, Column, plus<int64_t>(), static_cast<int64_t>(0));
A.Reduce(*RowSums, Row, plus<int64_t>(), static_cast<int64_t>(0));
ColSums->EWiseApply(*RowSums, plus<int64_t>());
delete RowSums;
nonisov = ColSums->FindInds(bind2nd(greater<int64_t>(), 0)); // only the indices of non-isolated vertices
delete ColSums;
SpParHelper::Print("Found (and permuted) non-isolated vertices\n");
nonisov.RandPerm(); // so that A(v,v) is load-balanced (both memory and time wise)
A.PrintInfo();
A(nonisov, nonisov, true); // in-place permute to save memory
SpParHelper::Print("Dropped isolated vertices from input\n");
A.PrintInfo();
Symmetricize(A); // A += A';
SpParHelper::Print("Symmetricized\n");
//A.Dump("graph_symmetric");
#ifdef THREADED
ostringstream tinfo;
tinfo << "Threading activated with " << cblas_splits << " threads" << endl;
SpParHelper::Print(tinfo.str());
A.ActivateThreading(cblas_splits);
#endif
}
else
{
if(string(argv[1]) == string("Auto"))
{
// calculate the problem size that can be solved
// number of nonzero columns are at most the matrix dimension (for small p)
// for large p, though, nzc = nnz since each subcolumn will have a single nonzero
// so assume (1+8+8+8)*nedges for the uint64 case and (1+4+4+4)*nedges for uint32
uint64_t raminbytes = static_cast<uint64_t>(atoi(argv[2])) * 1024 * 1024;
uint64_t peredge = 1+3*sizeof(int64_t);
uint64_t maxnedges = raminbytes / peredge;
uint64_t maxvertices = maxnedges / 32;
unsigned maxscale = highestbitset(maxvertices * nprocs);
string name;
if(maxscale > 36) // at least 37 so it fits comfortably along with vectors
{
name = "Medium";
scale = 36;
}
else if(maxscale > 32)
{
name = "Small";
scale = 32;
}
else if(maxscale > 29)
{
name = "Mini";
scale = 29;
}
else if(maxscale > 26)
{
name = "Toy";
scale = 26;
}
else
{
name = "Debug";
scale = 20; // fits even to single processor
}
ostringstream outs;
outs << "Max scale allowed : " << maxscale << endl;
outs << "Using the " << name << " problem" << endl;
SpParHelper::Print(outs.str());
}
else if(string(argv[1]) == string("Force"))
{
scale = static_cast<unsigned>(atoi(argv[2]));
ostringstream outs;
outs << "Forcing scale to : " << scale << endl;
SpParHelper::Print(outs.str());
if(argc > 3 && string(argv[3]) == string("FastGen"))
{
SpParHelper::Print("Using fast vertex permutations; skipping edge permutations (like v2.1)\n");
scramble = true;
}
}
else
{
SpParHelper::Print("Unknown option\n");
MPI::Finalize();
return -1;
}
// this is an undirected graph, so A*x does indeed BFS
double initiator[4] = {.57, .19, .19, .05};
double t01 = MPI_Wtime();
double t02;
DistEdgeList<int64_t> * DEL = new DistEdgeList<int64_t>();
if(!scramble)
{
DEL->GenGraph500Data(initiator, scale, EDGEFACTOR);
SpParHelper::Print("Generated edge lists\n");
t02 = MPI_Wtime();
ostringstream tinfo;
tinfo << "Generation took " << t02-t01 << " seconds" << endl;
SpParHelper::Print(tinfo.str());
PermEdges(*DEL);
SpParHelper::Print("Permuted Edges\n");
//DEL->Dump64bit("edges_permuted");
//SpParHelper::Print("Dumped\n");
RenameVertices(*DEL); // intermediate: generates RandPerm vector, using MemoryEfficientPSort
SpParHelper::Print("Renamed Vertices\n");
}
else // fast generation
{
DEL->GenGraph500Data(initiator, scale, EDGEFACTOR, true, true ); // generate packed edges
SpParHelper::Print("Generated renamed edge lists\n");
t02 = MPI_Wtime();
ostringstream tinfo;
tinfo << "Generation took " << t02-t01 << " seconds" << endl;
SpParHelper::Print(tinfo.str());
}
// Start Kernel #1
MPI::COMM_WORLD.Barrier();
double t1 = MPI_Wtime();
// conversion from distributed edge list, keeps self-loops, sums duplicates
PSpMat_Int32 * G = new PSpMat_Int32(*DEL, false);
delete DEL; // free memory before symmetricizing
SpParHelper::Print("Created Sparse Matrix (with int32 local indices and values)\n");
MPI::COMM_WORLD.Barrier();
double redts = MPI_Wtime();
G->Reduce(degrees, Row, plus<int64_t>(), static_cast<int64_t>(0)); // Identity is 0
MPI::COMM_WORLD.Barrier();
double redtf = MPI_Wtime();
ostringstream redtimeinfo;
redtimeinfo << "Calculated degrees in " << redtf-redts << " seconds" << endl;
SpParHelper::Print(redtimeinfo.str());
A = PSpMat_Bool(*G); // Convert to Boolean
delete G;
int64_t removed = A.RemoveLoops();
ostringstream loopinfo;
loopinfo << "Converted to Boolean and removed " << removed << " loops" << endl;
SpParHelper::Print(loopinfo.str());
A.PrintInfo();
FullyDistVec<int64_t, int64_t> * ColSums = new FullyDistVec<int64_t, int64_t>(A.getcommgrid(), 0);
FullyDistVec<int64_t, int64_t> * RowSums = new FullyDistVec<int64_t, int64_t>(A.getcommgrid(), 0);
A.Reduce(*ColSums, Column, plus<int64_t>(), static_cast<int64_t>(0));
A.Reduce(*RowSums, Row, plus<int64_t>(), static_cast<int64_t>(0));
SpParHelper::Print("Reductions done\n");
ColSums->EWiseApply(*RowSums, plus<int64_t>());
SpParHelper::Print("Intersection of colsums and rowsums found\n");
delete RowSums;
// TODO: seg fault in FindInds for scale 33
nonisov = ColSums->FindInds(bind2nd(greater<int64_t>(), 0)); // only the indices of non-isolated vertices
delete ColSums;
SpParHelper::Print("Found (and permuted) non-isolated vertices\n");
nonisov.RandPerm(); // so that A(v,v) is load-balanced (both memory and time wise)
A.PrintInfo();
A(nonisov, nonisov, true); // in-place permute to save memory
SpParHelper::Print("Dropped isolated vertices from input\n");
A.PrintInfo();
Symmetricize(A); // A += A';
SpParHelper::Print("Symmetricized\n");
#ifdef THREADED
ostringstream tinfo;
tinfo << "Threading activated with " << cblas_splits << " threads" << endl;
SpParHelper::Print(tinfo.str());
A.ActivateThreading(cblas_splits);
#endif
A.PrintInfo();
MPI::COMM_WORLD.Barrier();
double t2=MPI_Wtime();
ostringstream k1timeinfo;
k1timeinfo << (t2-t1) - (redtf-redts) << " seconds elapsed for Kernel #1" << endl;
SpParHelper::Print(k1timeinfo.str());
}
A.PrintInfo();
float balance = A.LoadImbalance();
ostringstream outs;
outs << "Load balance: " << balance << endl;
SpParHelper::Print(outs.str());
MPI::COMM_WORLD.Barrier();
double t1 = MPI_Wtime();
// TODO: Threaded code crashes in FullyDistVec()
// Now that every remaining vertex is non-isolated, randomly pick ITERS many of them as starting vertices
degrees = degrees(nonisov); // fix the degrees array too
degrees.PrintInfo("Degrees array");
// degrees.DebugPrint();
FullyDistVec<int64_t, int64_t> Cands(ITERS, 0, 0);
double nver = (double) degrees.TotalLength();
MTRand M; // generate random numbers with Mersenne Twister
vector<double> loccands(ITERS);
vector<int64_t> loccandints(ITERS);
if(myrank == 0)
{
for(int i=0; i<ITERS; ++i)
loccands[i] = M.rand();
copy(loccands.begin(), loccands.end(), ostream_iterator<double>(cout," ")); cout << endl;
transform(loccands.begin(), loccands.end(), loccands.begin(), bind2nd( multiplies<double>(), nver ));
for(int i=0; i<ITERS; ++i)
loccandints[i] = static_cast<int64_t>(loccands[i]);
copy(loccandints.begin(), loccandints.end(), ostream_iterator<double>(cout," ")); cout << endl;
}
MPI::COMM_WORLD.Barrier();
MPI::COMM_WORLD.Bcast(&(loccandints[0]), ITERS, MPIType<int64_t>(),0);
MPI::COMM_WORLD.Barrier();
for(int i=0; i<ITERS; ++i)
{
Cands.SetElement(i,loccandints[i]);
}
#ifdef THREADED
#define MAXTRIALS 1
#else
#define MAXTRIALS 2
#endif
for(int trials =0; trials < MAXTRIALS; trials++) // try different algorithms for BFS
{
cblas_allgathertime = 0;
cblas_alltoalltime = 0;
if(trials == 1) // second run for multithreaded turned off
{
A.OptimizeForGraph500(optbuf);
MPI_Pcontrol(1,"BFS_SPA_Buf");
}
else
MPI_Pcontrol(1,"BFS");
double MTEPS[ITERS]; double INVMTEPS[ITERS]; double TIMES[ITERS]; double EDGES[ITERS];
for(int i=0; i<ITERS; ++i)
{
// FullyDistVec (shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id);
FullyDistVec<int64_t, int64_t> parents ( A.getcommgrid(), A.getncol(), (int64_t) -1, (int64_t) -1); // identity is -1
// FullyDistSpVec ( shared_ptr<CommGrid> grid, IT glen);
FullyDistSpVec<int64_t, int64_t> fringe(A.getcommgrid(), A.getncol()); // numerical values are stored 0-based
MPI::COMM_WORLD.Barrier();
double t1 = MPI_Wtime();
fringe.SetElement(Cands[i], Cands[i]);
int iterations = 0;
while(fringe.getnnz() > 0)
{
fringe.setNumToInd();
//fringe.PrintInfo("fringe before SpMV");
fringe = SpMV<SR>(A, fringe,true, optbuf); // SpMV with sparse vector (with indexisvalue flag set), optimization enabled
// fringe.PrintInfo("fringe after SpMV");
#ifdef TIMING
MPI::COMM_WORLD.Barrier();
double t_a1 = MPI_Wtime();
#endif
fringe = EWiseMult(fringe, parents, true, (int64_t) -1); // clean-up vertices that already has parents
#ifdef TIMING
MPI::COMM_WORLD.Barrier();
double t_a2 = MPI_Wtime();
ostringstream ewisemtime;
ewisemtime << "EWiseMult took " << t_a2-t_a1 << " seconds" << endl;
SpParHelper::Print(ewisemtime.str());
#endif
// fringe.PrintInfo("fringe after cleanup");
parents += fringe;
// parents.PrintInfo("Parents after addition");
iterations++;
MPI::COMM_WORLD.Barrier();
}
MPI::COMM_WORLD.Barrier();
double t2 = MPI_Wtime();
FullyDistSpVec<int64_t, int64_t> parentsp = parents.Find(bind2nd(greater<int64_t>(), -1));
parentsp.Apply(set<int64_t>(1));
// we use degrees on the directed graph, so that we don't count the reverse edges in the teps score
int64_t nedges = EWiseMult(parentsp, degrees, false, (int64_t) 0).Reduce(plus<int64_t>(), (int64_t) 0);
ostringstream outnew;
outnew << i << "th starting vertex was " << Cands[i] << endl;
outnew << "Number iterations: " << iterations << endl;
outnew << "Number of vertices found: " << parentsp.Reduce(plus<int64_t>(), (int64_t) 0) << endl;
outnew << "Number of edges traversed: " << nedges << endl;
outnew << "BFS time: " << t2-t1 << " seconds" << endl;
outnew << "MTEPS: " << static_cast<double>(nedges) / (t2-t1) / 1000000.0 << endl;
outnew << "Total communication (average so far): " << (cblas_allgathertime + cblas_alltoalltime) / (i+1) << endl;
TIMES[i] = t2-t1;
EDGES[i] = nedges;
MTEPS[i] = static_cast<double>(nedges) / (t2-t1) / 1000000.0;
SpParHelper::Print(outnew.str());
}
SpParHelper::Print("Finished\n");
ostringstream os;
if(trials == 1)
MPI_Pcontrol(-1,"BFS_SPA_Buf");
else
MPI_Pcontrol(-1,"BFS");
os << "Per iteration communication times: " << endl;
os << "AllGatherv: " << cblas_allgathertime / ITERS << endl;
os << "AlltoAllv: " << cblas_alltoalltime / ITERS << endl;
sort(EDGES, EDGES+ITERS);
os << "--------------------------" << endl;
os << "Min nedges: " << EDGES[0] << endl;
os << "First Quartile nedges: " << (EDGES[(ITERS/4)-1] + EDGES[ITERS/4])/2 << endl;
os << "Median nedges: " << (EDGES[(ITERS/2)-1] + EDGES[ITERS/2])/2 << endl;
os << "Third Quartile nedges: " << (EDGES[(3*ITERS/4) -1 ] + EDGES[3*ITERS/4])/2 << endl;
os << "Max nedges: " << EDGES[ITERS-1] << endl;
double mean = accumulate( EDGES, EDGES+ITERS, 0.0 )/ ITERS;
vector<double> zero_mean(ITERS); // find distances to the mean
transform(EDGES, EDGES+ITERS, zero_mean.begin(), bind2nd( minus<double>(), mean ));
// self inner-product is sum of sum of squares
double deviation = inner_product( zero_mean.begin(),zero_mean.end(), zero_mean.begin(), 0.0 );
deviation = sqrt( deviation / (ITERS-1) );
os << "Mean nedges: " << mean << endl;
os << "STDDEV nedges: " << deviation << endl;
os << "--------------------------" << endl;
sort(TIMES,TIMES+ITERS);
os << "Min time: " << TIMES[0] << " seconds" << endl;
os << "First Quartile time: " << (TIMES[(ITERS/4)-1] + TIMES[ITERS/4])/2 << " seconds" << endl;
os << "Median time: " << (TIMES[(ITERS/2)-1] + TIMES[ITERS/2])/2 << " seconds" << endl;
os << "Third Quartile time: " << (TIMES[(3*ITERS/4)-1] + TIMES[3*ITERS/4])/2 << " seconds" << endl;
os << "Max time: " << TIMES[ITERS-1] << " seconds" << endl;
mean = accumulate( TIMES, TIMES+ITERS, 0.0 )/ ITERS;
transform(TIMES, TIMES+ITERS, zero_mean.begin(), bind2nd( minus<double>(), mean ));
deviation = inner_product( zero_mean.begin(),zero_mean.end(), zero_mean.begin(), 0.0 );
deviation = sqrt( deviation / (ITERS-1) );
os << "Mean time: " << mean << " seconds" << endl;
os << "STDDEV time: " << deviation << " seconds" << endl;
os << "--------------------------" << endl;
sort(MTEPS, MTEPS+ITERS);
os << "Min MTEPS: " << MTEPS[0] << endl;
os << "First Quartile MTEPS: " << (MTEPS[(ITERS/4)-1] + MTEPS[ITERS/4])/2 << endl;
os << "Median MTEPS: " << (MTEPS[(ITERS/2)-1] + MTEPS[ITERS/2])/2 << endl;
os << "Third Quartile MTEPS: " << (MTEPS[(3*ITERS/4)-1] + MTEPS[3*ITERS/4])/2 << endl;
os << "Max MTEPS: " << MTEPS[ITERS-1] << endl;
transform(MTEPS, MTEPS+ITERS, INVMTEPS, safemultinv<double>()); // returns inf for zero teps
double hteps = static_cast<double>(ITERS) / accumulate(INVMTEPS, INVMTEPS+ITERS, 0.0);
os << "Harmonic mean of MTEPS: " << hteps << endl;
transform(INVMTEPS, INVMTEPS+ITERS, zero_mean.begin(), bind2nd(minus<double>(), 1/hteps));
deviation = inner_product( zero_mean.begin(),zero_mean.end(), zero_mean.begin(), 0.0 );
deviation = sqrt( deviation / (ITERS-1) ) * (hteps*hteps); // harmonic_std_dev
os << "Harmonic standard deviation of MTEPS: " << deviation << endl;
SpParHelper::Print(os.str());
}
}
MPI::Finalize();
return 0;
}
|
{"hexsha": "e731815b16b8bb60398f3713628854aabadda096", "size": 19522, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "CombBLAS/Applications/Graph500.cpp", "max_stars_repo_name": "shoaibkamil/OLD-kdt-specializer", "max_stars_repo_head_hexsha": "85074ec1990df980d25096ea8c55dd81350e531e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-11-15T02:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-15T02:11:33.000Z", "max_issues_repo_path": "CombBLAS/Applications/Graph500.cpp", "max_issues_repo_name": "shoaibkamil/OLD-kdt-specializer", "max_issues_repo_head_hexsha": "85074ec1990df980d25096ea8c55dd81350e531e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CombBLAS/Applications/Graph500.cpp", "max_forks_repo_name": "shoaibkamil/OLD-kdt-specializer", "max_forks_repo_head_hexsha": "85074ec1990df980d25096ea8c55dd81350e531e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6240875912, "max_line_length": 128, "alphanum_fraction": 0.6434279275, "num_tokens": 6276}
|
from setuptools import Extension, setup
from Cython.Build import cythonize
import numpy
setup(
ext_modules=cythonize(
[Extension('match', ['match.pyx'], include_dirs=[numpy.get_include()])]
)
)
|
{"hexsha": "75e2cc86975d1e02e1f220515026cccc59ceee5f", "size": 212, "ext": "py", "lang": "Python", "max_stars_repo_path": "pose_util/setup.py", "max_stars_repo_name": "SelvamArul/MOTR", "max_stars_repo_head_hexsha": "2a0b70288feaca665d460096159100d5077e9312", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pose_util/setup.py", "max_issues_repo_name": "SelvamArul/MOTR", "max_issues_repo_head_hexsha": "2a0b70288feaca665d460096159100d5077e9312", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pose_util/setup.py", "max_forks_repo_name": "SelvamArul/MOTR", "max_forks_repo_head_hexsha": "2a0b70288feaca665d460096159100d5077e9312", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.2727272727, "max_line_length": 79, "alphanum_fraction": 0.7075471698, "include": true, "reason": "import numpy", "num_tokens": 51}
|
[STATEMENT]
lemma atU_union_cases[case_names left right, consumes 1]: "\<lbrakk>
atU U (c1+c2);
atU U c1 \<Longrightarrow> P;
atU U c2 \<Longrightarrow> P
\<rbrakk> \<Longrightarrow> P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>atU U (c1 + c2); atU U c1 \<Longrightarrow> P; atU U c2 \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P
[PROOF STEP]
by (unfold atU_def) (blast elim: mset_un_cases)
|
{"llama_tokens": 176, "file": "Program-Conflict-Analysis_Semantics", "length": 1}
|
from os import stat
import numpy as np
from matplotlib import pyplot as plt
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import ReferenceModification.LibFunctions as lib
MEMORY_SIZE = 100000
# hyper parameters
BATCH_SIZE = 100
GAMMA = 0.99
tau = 0.005
NOISE = 0.2
NOISE_CLIP = 0.5
EXPLORE_NOISE = 0.1
POLICY_FREQUENCY = 2
POLICY_NOISE = 0.2
class ReplayBufferTD3(object):
def __init__(self, max_size=1000000):
self.storage = []
self.max_size = max_size
self.ptr = 0
def add(self, data):
if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = data
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(data)
def sample(self, batch_size):
ind = np.random.randint(0, len(self.storage), size=batch_size)
states, actions, next_states, rewards, dones = [], [], [], [], []
for i in ind:
s, a, s_, r, d = self.storage[i]
states.append(np.array(s, copy=False))
actions.append(np.array(a, copy=False))
next_states.append(np.array(s_, copy=False))
rewards.append(np.array(r, copy=False))
dones.append(np.array(d, copy=False))
return np.array(states), np.array(actions), np.array(next_states), np.array(rewards).reshape(-1, 1), np.array(dones).reshape(-1, 1)
def size(self):
return len(self.storage)
class SmartBufferTD3(object):
def __init__(self, max_size=1000000, state_dim=14):
self.max_size = max_size
self.state_dim = state_dim
self.ptr = 0
self.states = np.empty((max_size, state_dim))
self.actions = np.empty((max_size, 1))
self.next_states = np.empty((max_size, state_dim))
self.rewards = np.empty((max_size, 1))
self.dones = np.empty((max_size, 1))
def add(self, s, a, s_p, r, d):
self.states[self.ptr] = s
self.actions[self.ptr] = a
self.next_states[self.ptr] = s_p
self.rewards[self.ptr] = r
self.dones[self.ptr] = d
self.ptr += 1
if self.ptr == 99999: self.ptr = 0
def sample(self, batch_size):
ind = np.random.randint(0, self.ptr-1, size=batch_size)
states = np.empty((batch_size, self.state_dim))
actions = np.empty((batch_size, 1))
next_states = np.empty((batch_size, self.state_dim))
rewards = np.empty((batch_size, 1))
dones = np.empty((batch_size, 1))
for i, j in enumerate(ind):
states[i] = self.states[j]
actions[i] = self.actions[j]
next_states[i] = self.next_states[j]
rewards[i] = self.rewards[j]
dones[i] = self.dones[j]
return states, actions, next_states, rewards, dones
def size(self):
return self.ptr
nn_l1 = 400
nn_l2 = 300
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action, h_size):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, h_size)
self.l2 = nn.Linear(h_size, h_size)
self.l3 = nn.Linear(h_size, action_dim)
self.max_action = max_action
def forward(self, x):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = self.max_action * torch.tanh(self.l3(x))
return x
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, h_size):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, h_size)
self.l2 = nn.Linear(h_size, h_size)
self.l3 = nn.Linear(h_size, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, h_size)
self.l5 = nn.Linear(h_size, h_size)
self.l6 = nn.Linear(h_size, 1)
def forward(self, x, u):
xu = torch.cat([x, u], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
x2 = F.relu(self.l4(xu))
x2 = F.relu(self.l5(x2))
x2 = self.l6(x2)
return x1, x2
def Q1(self, x, u):
xu = torch.cat([x, u], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
class TD3(object):
def __init__(self, state_dim, action_dim, max_action, name):
self.name = name
self.state_dim = state_dim
self.max_action = max_action
self.act_dim = action_dim
self.actor = None
self.actor_target = None
self.actor_optimizer = None
self.critic = None
self.critic_target = None
self.critic_optimizer = None
# self.replay_buffer = ReplayBufferTD3()
self.replay_buffer = SmartBufferTD3(state_dim=state_dim)
def create_agent(self, h_size):
state_dim = self.state_dim
action_dim = self.act_dim
max_action = self.max_action
self.actor = Actor(state_dim, action_dim, max_action, h_size)
self.actor_target = Actor(state_dim, action_dim, max_action, h_size)
self.actor_target.load_state_dict(self.actor.state_dict())
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=1e-3)
self.critic = Critic(state_dim, action_dim, h_size)
self.critic_target = Critic(state_dim, action_dim, h_size)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=1e-3)
def select_action(self, state, noise=0.1):
return self.act(state, noise=noise)
def act(self, state, noise=0.1):
state = torch.FloatTensor(state.reshape(1, -1))
action = self.actor(state).data.numpy().flatten()
if noise != 0:
action = (action + np.random.normal(0, noise, size=self.act_dim))
return action.clip(-self.max_action, self.max_action)
def get_critic_value(self, state, action):
state = torch.FloatTensor(state)
action = torch.FloatTensor(action)
current_Q1, current_Q2 = self.critic(state[None, :], action[None, :])
ret = current_Q1.detach().item()
return ret
def train(self, iterations=2):
if self.replay_buffer.size() < BATCH_SIZE:
return 0
for it in range(iterations):
# Sample replay buffer
x, u, y, r, d = self.replay_buffer.sample(BATCH_SIZE)
state = torch.FloatTensor(x)
action = torch.FloatTensor(u)
next_state = torch.FloatTensor(y)
done = torch.FloatTensor(1 - d)
reward = torch.FloatTensor(r)
# Select action according to policy and add clipped noise
noise = torch.FloatTensor(u).data.normal_(0, POLICY_NOISE)
noise = noise.clamp(-NOISE_CLIP, NOISE_CLIP)
next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + (done * GAMMA * target_Q).detach()
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if it % POLICY_FREQUENCY == 0:
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
total_loss = actor_loss + critic_loss
return total_loss
def save(self, directory="./saves"):
filename = self.name
torch.save(self.actor, '%s/%s_actor.pth' % (directory, filename))
torch.save(self.critic, '%s/%s_critic.pth' % (directory, filename))
torch.save(self.actor_target, '%s/%s_actor_target.pth' % (directory, filename))
torch.save(self.critic_target, '%s/%s_critic_target.pth' % (directory, filename))
def load(self, directory="./saves"):
filename = self.name
self.actor = torch.load('%s/%s_actor.pth' % (directory, filename))
self.critic = torch.load('%s/%s_critic.pth' % (directory, filename))
self.actor_target = torch.load('%s/%s_actor_target.pth' % (directory, filename))
self.critic_target = torch.load('%s/%s_critic_target.pth' % (directory, filename))
print("Agent Loaded")
def try_load(self, load=True, h_size=300, path=None):
if load:
try:
self.load(path)
except Exception as e:
print(f"Exception: {e}")
print(f"Unable to load model")
pass
else:
print(f"Not loading - restarting training")
self.create_agent(h_size)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=1e-3)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=1e-3)
|
{"hexsha": "d5fd0d2a28d8b882326baef072a80dbb786e44e1", "size": 9875, "ext": "py", "lang": "Python", "max_stars_repo_path": "ReferenceModification/NavUtils/TD3.py", "max_stars_repo_name": "BDEvan5/ReferenceModification", "max_stars_repo_head_hexsha": "8d9d13c8f563cc331809836d148b3dc83dd5d9ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ReferenceModification/NavUtils/TD3.py", "max_issues_repo_name": "BDEvan5/ReferenceModification", "max_issues_repo_head_hexsha": "8d9d13c8f563cc331809836d148b3dc83dd5d9ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-05-06T08:54:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-13T07:04:34.000Z", "max_forks_repo_path": "ReferenceModification/NavUtils/TD3.py", "max_forks_repo_name": "BDEvan5/ReferenceModification", "max_forks_repo_head_hexsha": "8d9d13c8f563cc331809836d148b3dc83dd5d9ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-08T10:57:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-08T10:57:18.000Z", "avg_line_length": 33.8184931507, "max_line_length": 139, "alphanum_fraction": 0.6010126582, "include": true, "reason": "import numpy", "num_tokens": 2442}
|
import sys
hoomd_path = str(sys.argv[4])
gsd_path = str(sys.argv[5])
# need to extract values from filename (pa, pb, xa) for naming
part_perc_a = int(sys.argv[3])
part_frac_a = float(part_perc_a) / 100.0
pe_a = int(sys.argv[1])
pe_b = int(sys.argv[2])
sys.path.append(hoomd_path)
import hoomd
from hoomd import md
from hoomd import deprecated
#initialize system randomly, can specify GPU execution here
part_num = 15000
part_a = part_num * part_frac_a # get the total number of A particles
part_a = int(part_a)
part_b = part_num - part_a # get the total number of B particles
part_b = int(part_b)
#########################################################################
########################## Begin Data Analysis ##########################
#########################################################################
sys.path.append(gsd_path)
import gsd
from gsd import hoomd
from gsd import pygsd
import numpy as np
myfile = "pa" + str(pe_a) + "_pb" + str(pe_b) + "_xa" + str(part_perc_a) + ".gsd"
f = hoomd.open(name=myfile, mode='rb')
dumps = f.__len__()
position_array = np.zeros((dumps), dtype=np.ndarray) # array of position arrays
type_array = np.zeros((dumps), dtype=np.ndarray) # particle types
box_data = np.zeros((1), dtype=np.ndarray) # box dimensions
with hoomd.open(name=myfile, mode='rb') as t: # open for reading
snap = t[0] # snap 0th snapshot
box_data = snap.configuration.box # get box dimensions
for i in range(0,dumps):
snap = t[i] # take snap of each dump
type_array[i] = snap.particles.typeid
position_array[i] = snap.particles.position # store all particle positions
pos_A = np.zeros((dumps), dtype=np.ndarray) # type A positions
pos_B = np.zeros((dumps), dtype=np.ndarray) # type B positions
tmpA = np.zeros((part_a, 3), dtype=np.float32) # temporary storage arrays
tmpB = np.zeros((part_b, 3), dtype=np.float32)
from freud import parallel, box, density, cluster
parallel.setNumThreads(1) # don't run multiple threads
my_density = density.LocalDensity(r_cut=2.5,
volume=0.79,
diameter=1.0) # initiate class, use area of circle
l_box = box_data[0] # get box dimensions (square here)
f_box = box.Box(Lx=l_box,
Ly=l_box,
is2D=True) # initialize freud box
my_clusters = cluster.Cluster(box=f_box,
rcut=1.0) # initialize class
cluster_props = cluster.ClusterProperties(box=f_box)
number_clusters = np.zeros((dumps), dtype=np.ndarray) # arrays to store things
ids = np.zeros((dumps), dtype=np.ndarray)
size_clusters = np.zeros((dumps), dtype=np.ndarray)
tot_size = np.zeros((dumps), dtype=np.ndarray) # number of particles in clusters
tot_num = np.zeros((dumps), dtype=np.ndarray) # total number of clusters
MCS = np.zeros((dumps), dtype=np.ndarray) # Mean cluster size
GF = np.zeros((dumps), dtype=np.ndarray) # Gas fraction
A_ids = np.zeros((part_a), dtype=np.ndarray) # type A ids
B_ids = np.zeros((part_b), dtype=np.ndarray) # type B ids
percent_A = np.zeros((dumps), dtype=np.ndarray) # composition A at each timestep
largest = np.zeros((dumps), dtype=np.ndarray) # read out largest cluster at each tstep
MSD = np.zeros((dumps - 1, part_num), dtype=np.ndarray) # array of individual particle MSDs
MSD_A = np.zeros((dumps - 1, part_a), dtype=np.ndarray) # array for a particles
MSD_B = np.zeros((dumps - 1, part_b), dtype=np.ndarray) # array for a particles
# analyze all particles
for j in range(0, dumps):
l_pos = position_array[j]
my_clusters.computeClusters(l_pos)
number_clusters[j] = my_clusters.getNumClusters() # find number of clusters
ids = my_clusters.getClusterIdx() # get cluster ids
cluster_props.computeProperties(l_pos, ids)
size_clusters[j] = cluster_props.getClusterSizes() # get number of particles in each
how_many = my_clusters.getNumClusters()
A_id_count = 0
B_id_count = 0
for h in range(0, part_num):
if type_array[j][h] == 0:
A_ids[A_id_count] = ids[h] # store the cluster ids for A type
A_id_count += 1 # IMPROVE: sort while placing?
else:
B_ids[B_id_count] = ids[h] # store the cluster ids for B type
B_id_count += 1 # could put ids in order ...
clust_dat = np.zeros((how_many), dtype = np.ndarray)
clust_dat_A = np.zeros((how_many), dtype = np.ndarray)
clust_dat_B = np.zeros((how_many), dtype = np.ndarray)
numerator_A = 0
denominator_tot = 0
for m in range(0, how_many):
clust_dat_A[m] = (A_ids == m).sum() # sum all A type particles in a cluster
clust_dat_B[m] = (B_ids == m).sum()
clust_dat[m] = clust_dat_A[m] + clust_dat_B[m] # find total number of particles in cluster
if clust_dat[m] > 15:
numerator_A += clust_dat_A[m]
denominator_tot += clust_dat[m]
# get the total percent of A particles in all clusters
if denominator_tot != 0:
percent_A[j] = float(numerator_A) / float(denominator_tot)
l_clust = 0 # int size of largest cluster
for k in range(0, len(size_clusters[j])):
# the size minimum is a very important value to consider
if size_clusters[j][k] > 15 and size_clusters[j][k] < part_num:
tot_size[j] += size_clusters[j][k]
tot_num[j] += 1
if size_clusters[j][k] > l_clust: # if larger cluster is found
l_clust = size_clusters[j][k] # set l_clust to that size
largest[j] = l_clust # save largest cluster size for tstep
if tot_num[j] > 0:
MCS[j] = float(tot_size[j]/tot_num[j])/float(part_num)
GF[j] = float(part_num - tot_size[j]) / float(part_num)
else:
MCS[j] = 0
GF[j] = 1
# let's start by getting the MSD for all particles (don't care about type)
if j != dumps - 1:
msda_count = 0
msdb_count = 0
for w in range(0,part_num):
MSD[j][w] = np.sqrt(((position_array[j+1][w][0] - position_array[j][w][0])**2) +
((position_array[j+1][w][1] - position_array[j][w][1])**2) +
((position_array[j+1][w][2] - position_array[j][w][2])**2))
if type_array[j][w] == 0:
MSD_A[j][msda_count] = np.sqrt(((position_array[j+1][w][0] - position_array[j][w][0])**2) +
((position_array[j+1][w][1] - position_array[j][w][1])**2) +
((position_array[j+1][w][2] - position_array[j][w][2])**2))
msda_count += 1
else:
MSD_B[j][msdb_count] = np.sqrt(((position_array[j+1][w][0] - position_array[j][w][0])**2) +
((position_array[j+1][w][1] - position_array[j][w][1])**2) +
((position_array[j+1][w][2] - position_array[j][w][2])**2))
msdb_count += 1
def getDensityPlease(n): # call this function as needed
l_pos = position_array[n] # get ith position array
my_density.compute(f_box,
l_pos,
l_pos)
return my_density.getDensity()
avg_sys_density = np.zeros((1), dtype=np.ndarray)
take_last = dumps - 50
last = dumps - 1
msd_last = dumps - 2
for j in range(take_last, dumps):
avg_sys_density[0] += getDensityPlease(j)
avg_sys_density[0] /= (dumps - take_last)
#########################################################################
### perform the same analysis on species A and species B individually ###
#########################################################################
if part_perc_a != 0 and part_perc_a != 100:
tot_size_A = np.zeros((dumps), dtype=np.ndarray) # number of particles in clusters
tot_num_A = np.zeros((dumps), dtype=np.ndarray) # total number of clusters
MCS_A = np.zeros((dumps), dtype=np.ndarray) # Mean cluster size
GF_A = np.zeros((dumps), dtype=np.ndarray) # Gas fraction
tot_size_B = np.zeros((dumps), dtype=np.ndarray) # number of particles in clusters
tot_num_B = np.zeros((dumps), dtype=np.ndarray) # total number of clusters
MCS_B = np.zeros((dumps), dtype=np.ndarray) # Mean cluster size
GF_B = np.zeros((dumps), dtype=np.ndarray) # Gas fraction
for j in range(0, dumps):
countA = 0
countB = 0
for g in range(0, part_num):
if type_array[j][g] == 0:
tmpA[countA][0] = position_array[j][g][0]
tmpA[countA][1] = position_array[j][g][1]
tmpA[countA][2] = position_array[j][g][2]
countA += 1
else:
tmpB[countB][0] = position_array[j][g][0]
tmpB[countB][1] = position_array[j][g][1]
tmpB[countB][2] = position_array[j][g][2]
countB += 1
pos_A[j] = tmpA
pos_B[j] = tmpB
l_pos = pos_A[j]
my_clusters.computeClusters(l_pos)
number_clusters[j] = my_clusters.getNumClusters() # find number of clusters
ids = my_clusters.getClusterIdx() # get cluster ids
cluster_props.computeProperties(l_pos, ids)
size_clusters[j] = cluster_props.getClusterSizes() # get number of particles in each
for k in range(0, len(size_clusters[j])):
# the size minimum is a very important value to consider
if size_clusters[j][k] > 15 and size_clusters[j][k] < part_num:
tot_size_A[j] += size_clusters[j][k]
tot_num_A[j] += 1
if tot_num_A[j] > 0:
MCS_A[j] = float(tot_size_A[j]/tot_num_A[j])/float(part_a)
GF_A[j] = float(part_a - tot_size_A[j]) / float(part_a)
else:
MCS_A[j] = 0
GF_A[j] = 1
l_pos = pos_B[j]
my_clusters.computeClusters(l_pos)
number_clusters[j] = my_clusters.getNumClusters() # find number of clusters
ids = my_clusters.getClusterIdx() # get cluster ids
cluster_props.computeProperties(l_pos, ids)
size_clusters[j] = cluster_props.getClusterSizes() # get number of particles in each
for k in range(0, len(size_clusters[j])):
# the size minimum is a very important value to consider
if size_clusters[j][k] > 15 and size_clusters[j][k] < part_num:
tot_size_B[j] += size_clusters[j][k]
tot_num_B[j] += 1
if tot_num_B[j] > 0:
MCS_B[j] = float(tot_size_B[j]/tot_num_B[j])/float(part_b)
GF_B[j] = float(part_b - tot_size_B[j]) / float(part_b)
else:
MCS_B[j] = 0
GF_B[j] = 1
def getDensityA(n): # call this function as needed
countA = 0
for g in range(0, part_num):
if type_array[n][g] == 0:
tmpA[countA][0] = position_array[n][g][0]
tmpA[countA][1] = position_array[n][g][1]
tmpA[countA][2] = position_array[n][g][2]
countA += 1
pos_A[n] = tmpA
l_pos = pos_A[n] # get ith position array
my_density.compute(f_box,
l_pos,
l_pos)
return my_density.getDensity()
avg_dense_A = np.zeros((1), dtype=np.ndarray)
for j in range(take_last, dumps):
avg_dense_A[0] += getDensityA(j)
avg_dense_A[0] /= (dumps - take_last)
def getDensityB(n): # call this function as needed
countB = 0
for g in range(0, part_num):
if type_array[n][g] == 1:
tmpB[countB][0] = position_array[n][g][0]
tmpB[countB][1] = position_array[n][g][1]
tmpB[countB][2] = position_array[n][g][2]
countB += 1
pos_B[n] = tmpB
l_pos = pos_B[n] # get ith position array
my_density.compute(f_box,
l_pos,
l_pos)
return my_density.getDensity()
avg_dense_B = np.zeros((1), dtype=np.ndarray)
for j in range(take_last, dumps):
avg_dense_B[0] += getDensityB(j)
avg_dense_B[0] /= (dumps - take_last)
##############################################
##### Plot the individual and total data #####
##############################################
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
plt_name = "pa" + str(pe_a) + "_pb" + str(pe_b) + "_xa" + str(part_perc_a)
plt_name1 = "pa" + str(pe_a) + "_pb" + str(pe_b) + "_xa" + str(part_perc_a) + "A"
plt_name2 = "pa" + str(pe_a) + "_pb" + str(pe_b) + "_xa" + str(part_perc_a) + "B"
if part_perc_a != 0 and part_perc_a != 100:
sns.kdeplot(avg_sys_density[0], shade = True, color="g")
sns.kdeplot(avg_dense_A[0], shade = True, color="r")
sns.kdeplot(avg_dense_B[0], shade = True, color="b")
plt.savefig('avg_density_' + plt_name + '.png', dpi=1000)
plt.close()
sns.kdeplot(getDensityPlease(last), shade = True, color="g")
sns.kdeplot(getDensityA(last), shade = True, color="r")
sns.kdeplot(getDensityB(last), shade = True, color="b")
plt.savefig('final_density_' + plt_name + '.png', dpi=1000)
plt.close()
plt.plot(MCS, color="g")
plt.plot(MCS_A, color="r")
plt.plot(MCS_B, color="b")
#plt.ylim((0,1))
plt.savefig('MCS_'+ plt_name + '.png', dpi=1000)
plt.close()
plt.plot(GF, color="g")
plt.plot(GF_A, color="r")
plt.plot(GF_B, color="b")
plt.ylim((0,1))
plt.savefig('GF_'+plt_name+'.png', dpi=1000)
plt.close()
plt.plot(percent_A, color="r")
#plt.ylim((0,1))
plt.savefig('A_comp_'+plt_name+'.png', dpi=1000)
plt.close()
plt.plot(largest, color="g")
plt.savefig('Largest_clust_'+plt_name+'.png', dpi=1000)
plt.close()
sns.kdeplot(MSD[msd_last], shade = True, color="g")
sns.kdeplot(MSD_A[msd_last], shade = True, color="r")
sns.kdeplot(MSD_B[msd_last], shade = True, color="b")
plt.savefig('MSD_'+plt_name+'.png', dpi=1000)
plt.close()
else: # if monodisperse plot total values
sns.kdeplot(avg_sys_density[0], shade = True, color="g")
plt.savefig('avg_density_' + plt_name + '.png', dpi=1000)
plt.close()
sns.kdeplot(getDensityPlease(last), shade = True, color="g")
plt.savefig('final_density_' + plt_name + '.png', dpi=1000)
plt.close()
plt.plot(MCS, color="g")
plt.savefig('MCS_'+ plt_name + '.png', dpi=1000)
plt.close()
plt.plot(GF, color="g")
plt.ylim((0,1))
plt.savefig('GF_'+plt_name+'.png', dpi=1000)
plt.close()
plt.plot(largest, color="g")
plt.savefig('Largest_clust_'+plt_name+'.png', dpi=1000)
plt.close()
sns.kdeplot(MSD[msd_last], shade = True, color="g")
plt.savefig('MSD_'+plt_name+'.png', dpi=1000)
plt.close()
|
{"hexsha": "4906fc5e3de0aa1ab6963469f1f986091d27ebbb", "size": 15968, "ext": "py", "lang": "Python", "max_stars_repo_path": "deprecated/deprecated_post_proc_msd.py", "max_stars_repo_name": "kolbt/whingdingdilly", "max_stars_repo_head_hexsha": "4c17b594ebc583750fe7565d6414f08678ea7882", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-09-04T14:36:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T23:24:58.000Z", "max_issues_repo_path": "deprecated/deprecated_post_proc_msd.py", "max_issues_repo_name": "kolbt/whingdingdilly", "max_issues_repo_head_hexsha": "4c17b594ebc583750fe7565d6414f08678ea7882", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deprecated/deprecated_post_proc_msd.py", "max_forks_repo_name": "kolbt/whingdingdilly", "max_forks_repo_head_hexsha": "4c17b594ebc583750fe7565d6414f08678ea7882", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9435897436, "max_line_length": 107, "alphanum_fraction": 0.5451528056, "include": true, "reason": "import numpy", "num_tokens": 4117}
|
[STATEMENT]
lemma bindU_lifted_strict [simp]: "bindU\<cdot>\<bottom>\<cdot>k = (\<bottom>::udom\<cdot>lifted)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bindU\<cdot>\<bottom>\<cdot>k = \<bottom>
[PROOF STEP]
by fixrec_simp
|
{"llama_tokens": 94, "file": "Tycon_Lift_Monad", "length": 1}
|
__author__ = 'mangalbhaskar'
__version__ = '1.0'
"""
## Description:
# --------------------------------------------------------
# Annotation Parser Interface for Annotation work flow.
# It uses the annotations created by VGG VIA tool v2.03 (not tested), v2.05 (tested).
#
## References
* https://datascience.stackexchange.com/questions/60866/split-tuples-with-labeled-samples-in-training-validation-and-test-sets/60872
* https://cs230-stanford.github.io/train-dev-test-split.html
* https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
#
# --------------------------------------------------------
# Copyright (c) 2020 mangalbhaskar
# Licensed under [see LICENSE for details]
# Written by mangalbhaskar
# --------------------------------------------------------
## Example:
# --------------------------------------------------------
## TODO:
# --------------------------------------------------------
## Future wok:
# --------------------------------------------------------
"""
import os
import sys
import logging
import numpy as np
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.append(this_dir)
APP_ROOT_DIR = os.getenv('AI_APP')
ROOT_DIR = os.getenv('AI_HOME')
BASE_PATH_CFG = os.getenv('AI_CFG')
if APP_ROOT_DIR not in sys.path:
sys.path.append(APP_ROOT_DIR)
# if BASE_PATH_CFG not in sys.path:
# sys.path.append(BASE_PATH_CFG)
# this = sys.modules[__name__]
from Annon import ANNON
import datasplit
log = logging.getLogger('__main__.'+__name__)
def get_annon_data(cfg, args, datacfg):
"""filter images based on the specific filter_by
TODO:
* annotation filtering based on stats on total images, total annotations, area (mask, bbox)
"""
log.info("-----------------------------")
ANNONCFG = cfg['DBCFG']['ANNONCFG']
annon = ANNON(dbcfg=ANNONCFG, datacfg=datacfg)
releaseinfo = annon.getReleaseId()
filter_by = []
filter_enable = cfg['AIDS_FILTER']['ENABLE']
if filter_enable:
filter_by = cfg['AIDS_FILTER'][ cfg['AIDS_FILTER']['BY'] ]
# filter_by = np.array(cfg['AIDS_FILTER'][ cfg['AIDS_FILTER']['BY'] ])
lbl_ids = annon.getCatIds(catIds=filter_by)
# lbl_ids = list(lbl_ids[np.where(np.in1d(lbl_ids, filter_by))])
log.info("lbl_ids after filter: {}".format(lbl_ids))
else:
lbl_ids = annon.getCatIds()
log.info("lbl_ids: {}".format(lbl_ids))
images_imgIds = annon.getImgIds(catIds=lbl_ids)
# annotations = annon.getAnnIds(imgIds=images_imgIds, catIds=lbl_ids, areaRng=[])
annotations = annon.getAnnIds(imgIds=images_imgIds, catIds=lbl_ids)
classinfo = annon.loadCats(ids=lbl_ids)
## make sure that the K have a fixed order before shuffling
## https://cs230-stanford.github.io/train-dev-test-split.html
T = len(images_imgIds)
log.info("images_imgIds Size: => {}".format(T))
cfg_aids_randomizer = cfg['AIDS_RANDOMIZER']
if cfg_aids_randomizer['ENABLE']:
if cfg_aids_randomizer['USE_SEED']:
np.random.seed(T) ## provides consistent shuffle given that 'T' is same between two different execution of the script
## Shuffle K
np.random.shuffle(images_imgIds)
img_lbl_arr = np.zeros([len(images_imgIds), len(lbl_ids)], int)
for j, lbl_id in enumerate(lbl_ids):
img_col = img_lbl_arr[:,j]
img_ids = annon.getImgIds(catIds=lbl_id)
log.info("lbl_id, len(img_ids): {}, {}".format(lbl_id, len(img_ids)))
if img_ids and len(img_ids) > 0:
img_col[np.where(np.in1d(images_imgIds, img_ids))] += 1
return annon, images_imgIds, annotations, classinfo, lbl_ids, img_lbl_arr
def prepare_datasets(cfg, args, datacfg):
"""Create AI Datasets and returns the actual data to be further processed and to persists on file-system or DB
TODO:
other stats like area, per label stats
"""
log.info("-----------------------------")
annon, images_imgIds, annotations, classinfo, lbl_ids, img_lbl_arr = get_annon_data(cfg, args, datacfg)
log.info("-----------------lbl_ids: {}".format(lbl_ids))
## split the images_imgIds using splitting algorithm
images_splits, splited_indices, splited_indices_per_label = datasplit.do_data_split(cfg, images_imgIds, lbl_ids, img_lbl_arr)
log.info("len(images_splits): {}".format(len(images_splits)))
for split in images_splits:
log.info("images_splits: len(split): {}".format(len(split)))
## Create AIDS - AI Datasets data strcutre
aids_splits_criteria = cfg['AIDS_SPLITS_CRITERIA'][cfg['AIDS_SPLITS_CRITERIA']['USE']]
# splits, prcntg = aids_splits_criteria[0], aids_splits_criteria[1]
splits = aids_splits_criteria[0] ## directory names
aids = {}
stats = {}
total_stats = {
'total_images':0
,'total_annotations':0
,'total_labels':0
}
for i, fnn in enumerate(images_splits):
total_images = 0
total_annotations = 0
total_labels = 0
subset = splits[i]
log.info("\nTotal Images in: {}, {}, {}".format(subset, len(fnn), type(fnn)))
imgIds = list(fnn)
annIds = annon.getAnnIds(imgIds=imgIds, catIds=lbl_ids)
catIds = annon.getCatIds(catIds=lbl_ids)
classinfo_split = annon.loadCats(ids=catIds)
log.info("catIds: {}".format(catIds))
log.info("classinfo_split: {}".format(classinfo_split))
if subset not in aids:
aids[subset] = {
'IMAGES':None
,'ANNOTATIONS': None
# ,'CLASSINFO_SPLIT':None
,'STATS':None
}
if subset not in stats:
stats[subset] = {
'labels':None
,"classinfo": None
,"total_labels": 0
,'total_annotations':0
,"total_images": 0
# ,"total_unique_images": set()
,"total_unique_images": 0
,"labels": []
,"annotation_per_img": []
,"label_per_img": []
,"maskarea": []
,"bboxarea": []
,"colors": None
}
total_labels += len(classinfo_split)
total_annotations += len(annIds)
total_images += len(imgIds)
## update total stats object
total_stats['total_labels'] += total_labels
total_stats['total_annotations'] += total_annotations
total_stats['total_images'] += total_images
## update stats object
stats[subset]['labels'] = catIds.copy()
stats[subset]['classinfo'] = classinfo_split.copy()
stats[subset]['total_labels'] += total_labels
stats[subset]['total_annotations'] += total_annotations
stats[subset]['total_images'] += total_images
## create ai dataset data
aids[subset]['IMAGES'] = annon.loadImgs(ids=imgIds)
aids[subset]['ANNOTATIONS'] = annon.loadAnns(ids=annIds)
# aids[subset]['CLASSINFO_SPLIT'] = classinfo_split
aids[subset]['STATS'] = [stats[subset]]
# log.debug("stats: {}".format(stats))
# log.debug("aids: {}".format(aids))
datacfg['stats'] = stats
datacfg['summary'] = total_stats
datacfg['classinfo'] = classinfo
datacfg['splits'] = splits
return aids, datacfg
|
{"hexsha": "e9f04c713c207213f23229eaa3cfb465dfab30ea", "size": 6876, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/annon/dataset/hmd_to_aids.py", "max_stars_repo_name": "Roy-Tuhin/maskrcnn_sophisticate-", "max_stars_repo_head_hexsha": "a5a2300abbe2633d66847cdbfa7ed2bc2f901ec3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "apps/annon/dataset/hmd_to_aids.py", "max_issues_repo_name": "Roy-Tuhin/maskrcnn_sophisticate-", "max_issues_repo_head_hexsha": "a5a2300abbe2633d66847cdbfa7ed2bc2f901ec3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2021-02-02T22:32:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:20:40.000Z", "max_forks_repo_path": "apps/annon/dataset/hmd_to_aids.py", "max_forks_repo_name": "Boyetuhin/maskrcnn_sophisticate-", "max_forks_repo_head_hexsha": "a5a2300abbe2633d66847cdbfa7ed2bc2f901ec3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-03T22:48:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-03T22:48:36.000Z", "avg_line_length": 32.5876777251, "max_line_length": 132, "alphanum_fraction": 0.6461605585, "include": true, "reason": "import numpy", "num_tokens": 1754}
|
import numpy as np
import copy
import torch
import pickle
from tqdm import tqdm
import cv2
def detach_single(state):
return state.detach().cuda()
def visualize_text_attention_weights(model_obj, test_loader, device, reverse_word_map, num_layers = 2, batch_size =24, rnn_weights = (512, 1024], max_num_words = 48):
# generate and save text attention weights as .txt file for one batch. Multimodal input is also saved for reference
model = copy.deepcopy(model_obj)
model.cuda()
# set model to evaluate model
model.eval()
states1 = torch.zeros(num_layers, batch_size, rnn_weights[0])
states2 = torch.zeros(num_layers, batch_size, rnn_weights[1])
with torch.no_grad():
# for efficieny dataloader should contain one batch
for i, (x1_batch, x2_batch, temp1, temp2, ws) in tqdm(enumerate(test_loader)):
x1_batch = torch.tensor(x1_batch, dtype=torch.long).cuda()
x2_batch = torch.tensor(x2_batch, dtype=torch.float32).cuda()
states1 = detach_single(states1)
states2 = detach_single(states2)
attention_weights, states1, states2 = model.get_attention_weights(x1_batch, x2_batch, states1, states2)
text = x1_batch.detach().cpu().numpy()
img = x2_batch.detach().cpu().numpy()
print(x1_batch.shape)
print(attention_weights.size())
attention_weights = attention_weights.detach().cpu().numpy()
attention_weights = (attention_weights - np.amin(attention_weights))/(np.amax(attention_weights)-np.amin(attention_weights))
for b in range(batch_size):
indication = []
weights = []
for w in range(max_num_words-1):
if reverse_word_map.get(text[b,w+1]):
indication.append(reverse_word_map.get(text[b,w+1]))
weights.append(np.mean(attention_weights[b,:,w+1]))
weights = np.array(weights)
weights = (weights - np.amin(weights))/(np.amax(weights)-np.amin(weights))
with open("weights_{}.txt".format(b), "wb") as fp: # Pickling
pickle.dump(weights, fp)
with open("indication_{}.txt".format(b), "wb") as fp: # Pickling
pickle.dump(indication, fp)
img2 = img[b,:,:,:]*255
img3 = np.zeros((224,224,3))
img3[:, :, 0] = img2[0, :, :]
img3[:, :, 1] = img2[0, :, :]
img3[:, :, 2] = img2[2, :, :]
cv2.imwrite("im_{}.png".format(b), img3)
|
{"hexsha": "370f87c8809ef799c27766b6df5b43fc756c2253", "size": 2625, "ext": "py", "lang": "Python", "max_stars_repo_path": "Utils/attention_visualization.py", "max_stars_repo_name": "tjvsonsbeek/Multi-modal-automated-diagnosis-with-chestXray-and-EHR", "max_stars_repo_head_hexsha": "2ffa98b88708ca19475e09b31aac7b6569c377ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-04T06:47:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-04T06:47:38.000Z", "max_issues_repo_path": "Utils/attention_visualization.py", "max_issues_repo_name": "tjvsonsbeek/Multi-modal-automated-diagnosis-with-chestXray-and-EHR", "max_issues_repo_head_hexsha": "2ffa98b88708ca19475e09b31aac7b6569c377ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-24T19:23:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-24T19:23:20.000Z", "max_forks_repo_path": "Utils/attention_visualization.py", "max_forks_repo_name": "tjvsonsbeek/Multi-modal-automated-diagnosis-with-chestXray-and-EHR", "max_forks_repo_head_hexsha": "2ffa98b88708ca19475e09b31aac7b6569c377ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.2586206897, "max_line_length": 166, "alphanum_fraction": 0.5946666667, "include": true, "reason": "import numpy", "num_tokens": 617}
|
from tqdm.auto import tqdm
import numpy as np
from pyhopper.callbacks import Callback
from pyhopper.utils import (
ParamInfo,
CandidateType,
steps_to_pretty_str,
time_to_pretty_str,
parse_timeout,
)
import time
class ScheduledRun:
def __init__(
self,
max_steps=None,
timeout=None,
seeding_steps=None,
seeding_timeout=None,
seeding_ratio=None,
start_temperature=1.0,
end_temperature=0.0,
):
if max_steps is not None and timeout is not None:
raise ValueError(
"Cannot specify both 'max_steps' and 'timeout' at the same time, one of the two must be None"
)
self._step_limit = max_steps
self._timeout = None
if timeout is not None:
self._timeout = parse_timeout(timeout)
# print(f"Parsed {timeout} to {self._timeout} seconds")
self._start_time = time.time()
self._step = 0
self._temp_start_units = 0
self._sigterm_received = 0
self._start_temperature = start_temperature
self._end_temperature = end_temperature
if seeding_steps is not None and seeding_timeout is not None:
raise ValueError(
"Can only specify one of 'seeding_steps' and 'seeding_timeout' at the same time, one of the two must be None"
)
self._seeding_timeout = None
self._seeding_max_steps = None
if seeding_timeout is None and seeding_steps is None:
# seeding_ratio is only valid if no other argument was set
if self._step_limit is not None:
# Max steps mode with seeding ratio provided
self._seeding_max_steps = int(seeding_ratio * max_steps)
elif self._timeout is not None:
# Timeout mode with seeding ratio provided
self._seeding_timeout = seeding_ratio * self._timeout
else:
if seeding_timeout is not None:
self._seeding_timeout = parse_timeout(seeding_timeout)
self._seeding_max_steps = seeding_steps
self._seeding_ratio = seeding_ratio # only needed for endless mode
self._temp_start = None
self._force_quit_callback = None
self._original_sigint_handler = None
self.reset_temperature()
def signal_gradually_quit(self):
self._sigterm_received += 1
def increment_step(self):
self._step += 1
@property
def unit(self):
if self._timeout is not None:
return "sec"
else:
return "steps"
@property
def step(self):
return self._step
@property
def current_runtime(self):
return time.time() - self._start_time
@property
def is_timeout_mode(self):
return self._timeout is not None
@property
def is_mixed_endless(self):
"""
True if in endless seeding+sampling mode
"""
return (
self._timeout is None
and self._step_limit is None
and self._seeding_timeout is None
and self._seeding_max_steps is None
)
@property
def endless_seeding_ratio(self):
return self._seeding_ratio if self._seeding_ratio is not None else 0.2
@property
def is_endless(self):
"""
True if in endless (sampling) mode
"""
return self._timeout is None and self._step_limit is None
@property
def total_units(self):
if self._step_limit is not None:
# step-scheduled mode
return self._step_limit
elif self._timeout is not None:
# time-scheduled mode
return self._timeout
return 1
@property
def current_units(self):
if self._step_limit is not None:
# step-scheduled mode
return self._step
elif self._timeout is not None:
# time-scheduled mode
return np.minimum(time.time() - self._start_time, self._timeout)
return 0
@property
def current_runtime(self):
return time.time() - self._start_time
@property
def is_disabled(self):
if self._step_limit is not None:
# step-scheduled mode
return self._step_limit <= 0
elif self._timeout is not None:
# time-scheduled mode
return self._timeout <= 0
return False
def is_timeout(self, estimated_runtime=0):
if self._sigterm_received > 0:
return True
if self._step_limit is not None:
# step-scheduled mode
return self._step >= self._step_limit
elif self._timeout is not None:
# time-scheduled mode
return time.time() - self._start_time + estimated_runtime >= self._timeout
else:
return False
def is_seeding_timeout(self, estimated_runtime=0):
if self._sigterm_received > 0:
return True
if self._seeding_max_steps is not None:
# step-scheduled mode
return self._step >= self._seeding_max_steps
elif self._seeding_timeout is not None:
# time-scheduled mode
return (
time.time() - self._start_time + estimated_runtime
>= self._seeding_timeout
)
else:
return False
def reset_temperature(self):
self._temp_start_units = self.current_units
def to_elapsed_str(self):
return (
f"{self._step} steps ({time_to_pretty_str(time.time()-self._start_time)})"
)
def to_total_str(self):
if self._step_limit is not None:
return f"{self._step_limit} steps"
elif self._timeout is not None:
return f"{time_to_pretty_str(self._timeout)}"
else:
return "Endlessly (stop with CTRL+C)"
@property
def temperature(self):
if self.is_endless:
# In endless mode we randomly sample the progress
progress = np.random.default_rng().random()
else:
progress = (self.current_units - self._temp_start_units) / max(
self.total_units - self._temp_start_units, 1e-6 # don't divide by 0
)
progress = np.clip(progress, 0, 1)
return (
self._start_temperature
+ (self._end_temperature - self._start_temperature) * progress
)
class ProgBar(Callback):
def __init__(self, schedule, run_history, disable):
self._schedule = schedule
self._run_history = run_history
self.disabled = disable
if self._schedule.is_endless:
bar_format = (
"Endless (stop with CTRL+C) {bar}| [{elapsed}<{remaining}{postfix}]",
)
elif self._schedule.is_timeout_mode:
bar_format = "{l_bar}{bar}| [{elapsed}<{remaining}{postfix}]"
else:
# step mode
bar_format = "{l_bar}{bar}| [{elapsed}<{remaining}{postfix}]"
self._tqdm = tqdm(
total=self._schedule.total_units,
disable=disable,
unit="",
bar_format=bar_format,
)
self._last_refreshed = time.time()
def on_search_start(self, search):
if not self.disabled:
self._tqdm.write(f"Search is scheduled for {self._schedule.to_total_str()}")
def on_evaluate_end(self, new_best: dict, f: float, info: ParamInfo):
self.update()
def on_evaluate_canceled(self, candidate: dict, info: ParamInfo):
self.update()
def update(self, close=False):
self._tqdm.n = (
self._schedule.total_units if close else self._schedule.current_units
)
self._tqdm.set_postfix_str(self._str_time_per_eval(), refresh=False)
if self._run_history.best_f is not None:
self._tqdm.set_description_str(
f"Best f: {self._run_history.best_f:0.3g} (out of {self._run_history.total_amount} params)",
refresh=False,
)
# TODO: Maybe there is some more elegant way implemented in tqdm
if close or time.time() - self._last_refreshed > 0.2:
self._last_refreshed = time.time()
self._tqdm.refresh()
# Endless mode:
# Endless (stop with CTRL+C) best: 0.42 (out of 3213) [2.3min/param]
# Step
# 48% xXXXXXXXxxxxxxxxxxxxxxxxxxxxx | best: 0.42 (out of 3213) (00:38<1:00) [2.3min/param]
# Time mode
# 48% xXXXXXXXxxxxxxxxxxxxxxxxxxxxx | best: 0.42 (out of 3213) (00:38<1:00) [2.3min/param]
def _str_time_per_eval(self):
total_params_evaluated = (
self._run_history.total_amount + self._run_history.total_canceled
)
if total_params_evaluated == 0:
return "..."
seconds_per_param = self._schedule.current_runtime / total_params_evaluated
if seconds_per_param > 60 * 60:
return f"{60*60/seconds_per_param:0.1f} param/h"
elif seconds_per_param > 60:
return f"{60/seconds_per_param:0.1f} param/min"
else:
return f"{1/seconds_per_param:0.1f} param/s"
def on_search_end(self):
self.update(True)
self._tqdm.close()
self._pretty_print_results()
def _pretty_print_results(self):
text_value_quadtuple = [
(
"Initial solution ",
self._run_history.best_per_type[CandidateType.INIT],
self._run_history.amount_per_type[CandidateType.INIT],
self._run_history.canceled_per_type[CandidateType.INIT],
self._run_history.runtime_per_type[CandidateType.INIT],
)
]
if self._run_history.amount_per_type[CandidateType.MANUALLY_ADDED] > 0:
text_value_quadtuple.append(
(
"Manually added ",
self._run_history.best_per_type[CandidateType.MANUALLY_ADDED],
self._run_history.amount_per_type[CandidateType.MANUALLY_ADDED],
self._run_history.canceled_per_type[CandidateType.MANUALLY_ADDED],
self._run_history.runtime_per_type[CandidateType.MANUALLY_ADDED],
)
)
if self._run_history.amount_per_type[CandidateType.RANDOM_SEEDING] > 0:
text_value_quadtuple.append(
(
"Random seeding",
self._run_history.best_per_type[CandidateType.RANDOM_SEEDING],
self._run_history.amount_per_type[CandidateType.RANDOM_SEEDING],
self._run_history.canceled_per_type[CandidateType.RANDOM_SEEDING],
self._run_history.runtime_per_type[CandidateType.RANDOM_SEEDING],
)
)
if self._run_history.amount_per_type[CandidateType.LOCAL_SAMPLING] > 0:
text_value_quadtuple.append(
(
"Local sampling",
self._run_history.best_per_type[CandidateType.LOCAL_SAMPLING],
self._run_history.amount_per_type[CandidateType.LOCAL_SAMPLING],
self._run_history.canceled_per_type[CandidateType.LOCAL_SAMPLING],
self._run_history.runtime_per_type[CandidateType.LOCAL_SAMPLING],
)
)
text_value_quadtuple.append(
(
"Total",
self._run_history.best_f,
self._run_history.total_amount,
self._run_history.total_canceled,
self._schedule.current_runtime,
)
)
text_list = []
for text, f, steps, canceled, elapsed in text_value_quadtuple:
value = "x" if f is None else f"{f:0.3g}"
text_list.append(
[
text,
value,
steps_to_pretty_str(steps),
steps_to_pretty_str(canceled),
time_to_pretty_str(elapsed),
]
)
text_list.insert(0, ["Mode", "Best f", "Steps", "Canceled", "Time"])
text_list.insert(1, ["----------------", "----", "----", "----", "----"])
text_list.insert(-1, ["----------------", "----", "----", "----", "----"])
if self._run_history.total_canceled == 0:
# No candidate was canceled so let's not show this column
for t in text_list:
t.pop(3)
num_items = len(text_list[0])
maxes = [
np.max([len(text_list[j][i]) for j in range(len(text_list))])
for i in range(num_items)
]
line_len = np.sum(maxes) + 3 * (num_items - 1)
line = ""
for i in range(line_len // 2 - 4):
line += "="
line += " Summary "
for i in range(line_len - len(line)):
line += "="
print(line)
for j in range(len(text_list)):
line = ""
for i in range(num_items):
if i > 0:
line += " : "
line += text_list[j][i].ljust(maxes[i])
print(line)
line = ""
for i in range(line_len):
line += "="
print(line)
class RunHistory(Callback):
"""
Keeps track of internal statistics for each call of ```run```, i.e., what is printed at the end of run
"""
def __init__(self, direction):
self._direction = direction
self.total_runtime = 0
self.total_amount = 0
self.total_canceled = 0
self.estimated_candidate_runtime = 0
self.best_f = None
self.best_per_type = {
CandidateType.INIT: None,
CandidateType.MANUALLY_ADDED: None,
CandidateType.RANDOM_SEEDING: None,
CandidateType.LOCAL_SAMPLING: None,
}
self.amount_per_type = {
CandidateType.INIT: 0,
CandidateType.MANUALLY_ADDED: 0,
CandidateType.RANDOM_SEEDING: 0,
CandidateType.LOCAL_SAMPLING: 0,
}
self.runtime_per_type = {
CandidateType.INIT: 0,
CandidateType.MANUALLY_ADDED: 0,
CandidateType.RANDOM_SEEDING: 0,
CandidateType.LOCAL_SAMPLING: 0,
}
self.canceled_per_type = {
CandidateType.INIT: 0,
CandidateType.MANUALLY_ADDED: 0,
CandidateType.RANDOM_SEEDING: 0,
CandidateType.LOCAL_SAMPLING: 0,
}
def is_better(self, old, new):
return (
old is None
or (self._direction == "max" and new > old)
or (self._direction == "min" and new < old)
)
def on_evaluate_canceled(self, candidate: dict, info: ParamInfo):
self.canceled_per_type[info.type] += 1
self.total_canceled += 1
def on_search_start(self, search):
self.best_f = search.best_f
self.best_per_type[CandidateType.INIT] = self.best_f
def on_evaluate_end(self, candidate: dict, f: float, info: ParamInfo):
runtime = info.finished_at - info.sampled_at
if self.is_better(self.best_f, f):
self.best_f = f
self.total_amount += 1
self.total_runtime += runtime
if self.is_better(self.best_per_type[info.type], f):
self.best_per_type[info.type] = f
self.amount_per_type[info.type] += 1
self.runtime_per_type[info.type] += runtime
ema = 0.5
self.estimated_candidate_runtime = (
ema * self.estimated_candidate_runtime + (1 - ema) * runtime
)
class RunContext:
def __init__(
self,
direction,
canceler,
ignore_nans,
schedule,
callbacks,
task_executor,
quiet,
):
if direction not in [
"maximize",
"maximise",
"max",
"minimize",
"minimise",
"min",
]:
raise ValueError(
f"Unknown direction '{direction}', must bei either 'maximize' or 'minimize'"
)
direction = direction.lower()[0:3] # only save first 3 chars
self.direction = direction
self.canceler = canceler
if self.canceler is not None:
self.canceler.direction = self.direction
self.run_history = RunHistory(self.direction)
self.ignore_nans = ignore_nans
self.schedule = schedule
self.callbacks = self.hook_callbacks(callbacks)
if not quiet:
pbar = ProgBar(schedule, self.run_history, disable=quiet)
self.callbacks.append(pbar)
# Must be the first callback
self.callbacks.insert(0, self.run_history)
self.task_executor = task_executor
@staticmethod
def hook_callbacks(callbacks):
if callbacks is None:
return []
if not isinstance(callbacks, list):
# Convert single callback object to a list of size 1
callbacks = [callbacks]
return callbacks
|
{"hexsha": "e22595379d10a4fb27cc8ae82fb1a1fff6d3fb01", "size": 17151, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyhopper/run_context.py", "max_stars_repo_name": "pyhopper/pyhopper", "max_stars_repo_head_hexsha": "3a5a449ba36c03ba365d33f900c3ecbb2d107e6b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-06-21T11:25:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T15:16:06.000Z", "max_issues_repo_path": "pyhopper/run_context.py", "max_issues_repo_name": "PyHopper/PyHopper", "max_issues_repo_head_hexsha": "3a5a449ba36c03ba365d33f900c3ecbb2d107e6b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyhopper/run_context.py", "max_forks_repo_name": "PyHopper/PyHopper", "max_forks_repo_head_hexsha": "3a5a449ba36c03ba365d33f900c3ecbb2d107e6b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-07T20:56:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-07T20:56:02.000Z", "avg_line_length": 34.9307535642, "max_line_length": 125, "alphanum_fraction": 0.5796746545, "include": true, "reason": "import numpy", "num_tokens": 3905}
|
import numpy as np
import string
import time
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import os,shutil
arrr = [1,2,3,4]
print(arrr[-1:0:-1])
arr = np.array([1,2,3])
arr = np.append(arr,[4,5,6])
print(arr.reshape((-1,1)))
def m2():
labels = ['ellipse','rectangle','line']
if os.path.exists('data/fixed_figures'):
shutil.rmtree('data/fixed_figures')
os.makedirs('data/fixed_figures')
for label in labels:
os.mkdir('data/fixed_figures/'+label)
def meth():
plt.axis([0, 10, 0, 1])
for i in range(100):
y = np.random.random()
plt.scatter(i, y)
plt.pause(0.1)
#plt.show()
#print(ord('z')-96)/26.0
arr = np.array([3,6,7,2,5])
print(''.join(['5']))
array = np.array([2,4,6,8])
array = np.append(array,0)
array = np.pad(array,(0,12-len(array)),'wrap')
array.shape = (12,1)
print(array)
|
{"hexsha": "b78a495cd767ab6f8a902ff6f31b2920bcda2c79", "size": 936, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/study/testbox.py", "max_stars_repo_name": "sushanted/NNDL-forked", "max_stars_repo_head_hexsha": "3d8675f3e9258d17226b5a6da29854d75ee3315e", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/study/testbox.py", "max_issues_repo_name": "sushanted/NNDL-forked", "max_issues_repo_head_hexsha": "3d8675f3e9258d17226b5a6da29854d75ee3315e", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/study/testbox.py", "max_forks_repo_name": "sushanted/NNDL-forked", "max_forks_repo_head_hexsha": "3d8675f3e9258d17226b5a6da29854d75ee3315e", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.3442622951, "max_line_length": 50, "alphanum_fraction": 0.5833333333, "include": true, "reason": "import numpy", "num_tokens": 293}
|
"""
Scitail: A textual entailment dataset from science question answering
https://arxiv.org/pdf/1910.14599.pdf
The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question and the correct answer choice are converted into an assertive statement to form the hypothesis.
Homepage: "https://allenai.org/data/scitail"
"""
import numpy as np
from lm_eval.base import rf, BioTask
from lm_eval.metrics import mean
_CITATION = """
@inproceedings{khot2018scitail,
title={Scitail: A textual entailment dataset from science question answering},
author={Khot, Tushar and Sabharwal, Ashish and Clark, Peter},
booktitle={Thirty-Second AAAI Conference on Artificial Intelligence},
year={2018}
}
"""
class SciTailBase(BioTask):
VERSION = 0
DATASET_PATH = "lm_eval/datasets/biomedical/bigbio/biodatasets/scitail"
DATASET_NAME = None
SPLIT = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
if self.has_training_docs():
return self.dataset["train"]
def validation_docs(self):
if self.has_validation_docs():
return self.dataset["validation"]
def test_docs(self):
if self.has_test_docs():
return self.dataset["test"]
class SciTailTE(SciTailBase):
DATASET_NAME = "scitail_bigbio_te"
|
{"hexsha": "0359f1d7c32d82a229619f287ed26ba8383536eb", "size": 1482, "ext": "py", "lang": "Python", "max_stars_repo_path": "lm_eval/tasks/scitail.py", "max_stars_repo_name": "bigscience-workshop/lm-evaluation-harness", "max_stars_repo_head_hexsha": "c639c81974d6d0efea2e471f6292cf3c6ae67e4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lm_eval/tasks/scitail.py", "max_issues_repo_name": "bigscience-workshop/lm-evaluation-harness", "max_issues_repo_head_hexsha": "c639c81974d6d0efea2e471f6292cf3c6ae67e4c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lm_eval/tasks/scitail.py", "max_forks_repo_name": "bigscience-workshop/lm-evaluation-harness", "max_forks_repo_head_hexsha": "c639c81974d6d0efea2e471f6292cf3c6ae67e4c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9622641509, "max_line_length": 216, "alphanum_fraction": 0.7145748988, "include": true, "reason": "import numpy", "num_tokens": 346}
|
!=======================================================================
!
! Check for convergence in the relative abundances of all species
! of each particle. Set the relevant convergence flags. Calculate
! the percentage of particles that have converged.
!
!-----------------------------------------------------------------------
SUBROUTINE CHECK_CHEMISTRY_CONVERGENCE(NPART,NSPEC,PARTICLE,PERCENTAGE_CONVERGED)
USE HEALPIX_TYPES
USE PARTICLE_MODULE
USE MAIN_MODULE, ONLY : ABUNDANCE_LIMIT,CHEMISTRY_CONVERGENCE_CRITERION
IMPLICIT NONE
INTEGER(KIND=I4B), INTENT(IN) :: NPART,NSPEC
TYPE(PARTICLE_TYPE), INTENT(INOUT) :: PARTICLE(:)
REAL(KIND=DP), INTENT(OUT) :: PERCENTAGE_CONVERGED
INTEGER(KIND=I4B) :: I,P
REAL(KIND=DP) :: RELATIVE_CHANGE
PARTICLE%CHEMISTRY_CONVERGED=.TRUE.
DO P=1,NPART ! Loop over particles
DO I=1,NSPEC ! Loop over species
! Skip this species if its abundance is below the cut-off
IF(PARTICLE(P)%ABUNDANCE(I).LT.ABUNDANCE_LIMIT) CYCLE
! Skip this species if its abundance has not changed
IF(PARTICLE(P)%ABUNDANCE(I).EQ.PARTICLE(P)%PREVIOUS_ABUNDANCE(I)) CYCLE
! Calculate the relative change in abundance between this iteration and the previous
RELATIVE_CHANGE=ABS(PARTICLE(P)%ABUNDANCE(I)-PARTICLE(P)%PREVIOUS_ABUNDANCE(I)) &
& *2/(PARTICLE(P)%ABUNDANCE(I)+PARTICLE(P)%PREVIOUS_ABUNDANCE(I))
! If the relative change is greater than the criterion for convergence, set the flag to false
IF(RELATIVE_CHANGE.GT.CHEMISTRY_CONVERGENCE_CRITERION) THEN
PARTICLE(P)%CHEMISTRY_CONVERGED=.FALSE.
EXIT
END IF
END DO ! End of loop over species
END DO ! End of loop over particles
! Calculate the percentage of particles whose abundances have converged
PERCENTAGE_CONVERGED=COUNT(PARTICLE%CHEMISTRY_CONVERGED)/REAL(NPART,KIND=DP)*100
RETURN
END SUBROUTINE CHECK_CHEMISTRY_CONVERGENCE
!=======================================================================
|
{"hexsha": "edde42fe54f002536e1c2a958d9c11214465f04b", "size": 2072, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Source/check_chemistry_convergence.f90", "max_stars_repo_name": "uclchem/uclpdr", "max_stars_repo_head_hexsha": "a1c5ece6f21852af040ddf0af463cff26757d208", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Source/check_chemistry_convergence.f90", "max_issues_repo_name": "uclchem/uclpdr", "max_issues_repo_head_hexsha": "a1c5ece6f21852af040ddf0af463cff26757d208", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Source/check_chemistry_convergence.f90", "max_forks_repo_name": "uclchem/uclpdr", "max_forks_repo_head_hexsha": "a1c5ece6f21852af040ddf0af463cff26757d208", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8461538462, "max_line_length": 100, "alphanum_fraction": 0.6428571429, "num_tokens": 529}
|
# -*- coding: utf-8 -*-
"""
consciousness figure - save data
"""
from __future__ import division
from brian2 import *
##from brian2.only import *
prefs.codegen.target = 'auto'
import matplotlib.pyplot as plt
import scipy.io
import numpy as np
import numpy.random
import random as pyrand
from brian2 import defaultclock
def testcons(seedval, curr):
netwParams_hier = np.load('./netwParams_hiervals.npy')
distMat = np.load('./distMatval.npy')
flnMat = np.load('./flnMatval.npy')
def gen_params(extra_params=dict()):
para= {'Vr' : -70.*mV, 'Vreset' : -60.*mV, 'Vt' : -50.*mV,
'taum' : 20. * ms, 'tref' : 2.*ms,
'taumI': 10. * ms,
'k' : 400,
'p' : .1, 'pintarea': .1,
'N_area' : 29, 'isFB' : True,
'sigmaval' : 3.*mV,
}
# PARAMETERS INTRODUCED/MODIFYED FROM EXTERNAL CODE
for key in extra_params:
para[key] = extra_params[key] # overwrite the old value
return para
para = gen_params() #adjust below since orig values were calculated with k 100
binsize = 10*ms
para['dlocal'],para['speed'] = 2.,3.5
duration = 550*ms
para['alpha'] = 4.
para['muIEsp'],para['omegaIIsp'],para['omegaEEsp'],para['omegaIEsp'] =.19/4*mV, .075*mV,.01*mV, .075*mV
para['omegaEIsp'], para['muEEsp'] = .05*mV, .05*mV #good. async
para['muI'],para['muE'] = 14.7*mV, 14.2*mV
#used
#rnd =1 : case1 rnd=2: case2 ... rnd=10:case3 ... rnd=5:case4 ..rnd=18:case5
#currlist = [0.0] + np.arange(3.5,6.1,1./6).tolist()
rnd_seed = seedval
pyrand.seed(324823+rnd_seed)
seed(324823 + rnd_seed)
numpy.random.seed(324823+rnd_seed)
currval,currdur = curr, 1500
#flnMat = np.load('./flnMatshuf2.npy')
#flnMat = np.tril(flnMat)
netsteps = round(duration/defaultclock.dt)
arealen = para['N_area']
a1 = np.zeros([3000,1]) #try changing to see if signal propagates to higher area later
a2 = currval*np.ones([currdur,1])
a3 = np.zeros([ int(netsteps - 3000 - currdur) , 1])
aareaone = np.vstack((a1,a2,a3)) #this changed.
#"""
#give input to v1
timelen = len(aareaone)
excotherareas = para['k']*4*(arealen-1)
aareaonenet = np.tile(aareaone,(1,para['k']*4))
arest = np.zeros([timelen, excotherareas])
netarr = np.hstack((aareaonenet,arest))
#"""
inputtoE1 = TimedArray(netarr*mV, dt=defaultclock.dt)
Inpcur = inputtoE1
paraVr, paraVt, paraVreset, paramuE, paramuI, parataum, parataumI, parasigmaval = para['Vr'], para['Vt'], para['Vreset'], para['muE'], para['muI'], para['taum'], para['taumI'], para['sigmaval']
paraalpha, paraomegaEEsp, paraomegaEIsp, paraomegaIEsp, paraomegaIIsp = para['alpha'], para['omegaEEsp'], para['omegaEIsp'], para['omegaIEsp'], para['omegaIIsp']
plocal, plongr = para['p'], para['pintarea']
paramuEEsp, paramuIEsp = para['muEEsp'], para['muIEsp']
dlocal = para['dlocal']
eqs = Equations('''
dV/dt=(-(V-paraVr) + inputtoE1(t,i) + paramuE )*(1./parataum) + (parasigmaval*(1./parataum)**0.5)*xi : volt (unless refractory)
''' )
eqsI = Equations('''
dV/dt=(-(V-paraVr) + paramuI )*(1./parataumI) + (parasigmaval*(1./parataumI)**0.5)*xi : volt (unless refractory)
''')
E = NeuronGroup(N=para['k']*4*arealen, method='euler', model=eqs, threshold='V > paraVt', reset='V=paraVreset', refractory=para['tref'])
I = NeuronGroup(N=para['k']*arealen, method='euler',model=eqsI, threshold='V > paraVt', reset='V=paraVreset', refractory=para['tref'])
Exc, Inh = [], []
Exc = [ E[y*(para['k']*4):(y+1)*(para['k']*4)] for y in range(arealen)]
Inh = [ I[z*(para['k']):(z+1)*(para['k'])] for z in range(arealen)]
delayMat = distMat/para['speed']
Exc_C_loc, Inh_C_loc, EtoI_C_loc, ItoE_C_loc = [None]*arealen, [None]*arealen, [None]*arealen, [None]*arealen
Exc_C_lr_fromi, EtoI_C_lr_fromi =[], []
h = 0
while h < arealen:
#print h #local.
Exc_C_loc[h] = Synapses(Exc[h], Exc[h], 'w:volt', delay = dlocal*ms, on_pre='V+=w')
Inh_C_loc[h] = Synapses(Inh[h], Inh[h], 'w:volt', delay = dlocal*ms, on_pre='V+= w ')
EtoI_C_loc[h] = Synapses(Exc[h], Inh[h], 'w:volt', delay = dlocal*ms, on_pre='V+= w ')
ItoE_C_loc[h] = Synapses(Inh[h], Exc[h], 'w:volt', delay = dlocal*ms, on_pre='V+= w ')
Exc_C_loc[h].connect(p = plocal) #this step is taking longest time. rate determining.
Inh_C_loc[h].connect(p = plocal)
EtoI_C_loc[h].connect(p = plocal)
ItoE_C_loc[h].connect(p = plocal)
Exc_C_loc[h].w = (1+paraalpha*netwParams_hier[h])*paraomegaEEsp
Inh_C_loc[h].w = -paraomegaIIsp
EtoI_C_loc[h].w = (1+paraalpha*netwParams_hier[h])*paraomegaIEsp
ItoE_C_loc[h].w = -paraomegaEIsp
m = 0 #long range to m.
while m < arealen:
if m!= h:
exc_lr_itoj, etoi_lr_itoj = None, None
# print m
exc_lr_itoj = Synapses(Exc[h], Exc[m], 'w:volt', on_pre='V+= w ')
etoi_lr_itoj = Synapses(Exc[h], Inh[m], 'w:volt', on_pre='V+= w ')
exc_lr_itoj.connect(p = plongr) #long time.
etoi_lr_itoj.connect(p = plongr)
exc_lr_itoj.w = (1 + paraalpha * netwParams_hier[m]) * paramuEEsp * flnMat[m,h]
etoi_lr_itoj.w = (1 + paraalpha * netwParams_hier[m]) * paramuIEsp * flnMat[m,h]
meanlr, varlr = delayMat[m,h], .1*delayMat[m,h]
exc_lr_itoj.delay = np.random.normal(meanlr,varlr,len(exc_lr_itoj.w))*ms
etoi_lr_itoj.delay = np.random.normal(meanlr,varlr,len(etoi_lr_itoj.w))*ms
Exc_C_lr_fromi.append(exc_lr_itoj)
EtoI_C_lr_fromi.append(etoi_lr_itoj)
m = m + 1
h = h + 1
monitors = SpikeMonitor(E)
# Setup the network, and run it
E.V = para['Vr'] + rand(len(E)) * (para['Vt'] - para['Vr'])
I.V = para['Vr'] + rand(len(I)) * (para['Vt'] - para['Vr'])
print "before net created"
net = Network(E,I,Exc_C_loc,EtoI_C_loc,ItoE_C_loc,Inh_C_loc,Exc_C_lr_fromi,EtoI_C_lr_fromi,monitors)
print "net created"
net.run(duration, report='text')
print"hey"
maxrate = np.empty([arealen,1])
meanrate = np.empty([arealen,1])
netspike = len(monitors.i)
allspike = np.empty([netspike,2])
allspike[:,0]=monitors.t/ms
allspike[:,1]=monitors.i
allspikesorted = allspike[allspike[:,1].argsort(),]
netbinno = int( 1+(duration/ms)-(binsize/ms))
poprate = np.empty([arealen,netbinno ])
u = 0 #for areas.
count = 0#for each spike.
stepsize = 1*ms
monareaktimeall = []
while u<arealen:
monareaktime = []
while((count < netspike) and (allspikesorted[count,1]<1600*(u+1)) ):
monareaktime.append(allspikesorted[count,0])#append spike times. for each area.
count = count + 1
vals= []
vals = numpy.histogram(monareaktime, bins=int(duration/stepsize))
valszero = vals[0] #now valsbad[0] is a big vector of 500 points. binsize/stepsize = aplus say.
astep = binsize/(1*ms)
valsnew = np.zeros(netbinno)
acount = 0
while acount < netbinno:
valsnew[acount] = sum(valszero[acount:acount+astep])
acount=acount+1
valsrate = valsnew*((1000*ms/binsize) /(1600) ) #new divide by no of neurons per E pop.
poprate[u,:] = valsrate
maxrate[u,0] = max(valsrate[int(len(valsrate)/3):])
monareaktimeall.append(monareaktime)
u = u+1
#np.save('./poprate_curr_'+str(round(curr,2))+'_seed_'+str(seedval),poprate)
#print poprate
#print "done"
|
{"hexsha": "e258f83af50db2eb914baa5a89e8e1f01f18f301", "size": 7696, "ext": "py", "lang": "Python", "max_stars_repo_path": "consciousness.py", "max_stars_repo_name": "xjwanglab/JoglekarEtAl2018_Neuron", "max_stars_repo_head_hexsha": "42c21da0df79611f62b8aa549b2bacd921c79d61", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "consciousness.py", "max_issues_repo_name": "xjwanglab/JoglekarEtAl2018_Neuron", "max_issues_repo_head_hexsha": "42c21da0df79611f62b8aa549b2bacd921c79d61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "consciousness.py", "max_forks_repo_name": "xjwanglab/JoglekarEtAl2018_Neuron", "max_forks_repo_head_hexsha": "42c21da0df79611f62b8aa549b2bacd921c79d61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-06T13:10:24.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-06T13:10:24.000Z", "avg_line_length": 35.6296296296, "max_line_length": 195, "alphanum_fraction": 0.6018711019, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2678}
|
NAME Ackermann
MODE REDUCTION
SORTS NAT
SIGNATURE
0 : -> NAT
s : NAT -> NAT
+ : NAT NAT -> NAT
* : NAT NAT -> NAT
fac : NAT -> NAT
ack : NAT NAT -> NAT
ORDERING KBO
ack = 1, fac = 1, * = 1, + = 1, s = 1, 0 = 1
ack > fac > * > + > s > 0
VARIABLES
x,y : NAT
EQUATIONS
+(x,0) = x
+(x,s(y)) = s(+(x,y))
*(x,0) = 0
*(x,s(y)) = +(*(x,y),x)
fac(0) = s(0)
fac(s(x)) = *(s(x),fac(x))
ack(0,y) = s(y)
ack(s(x),0) = ack(x,s(0))
ack(s(x),s(y)) = ack(x,ack(s(x),y))
CONCLUSION fac(s(s(s(s(s(0)))))) = ack(s(s(s(0))),0)
*(ack(s(s(s(0))),0),0) = *(0,ack(s(s(s(0))),0))
|
{"hexsha": "a0b646785b5d801b83d50a81a88ed3e88dbb0adf", "size": 589, "ext": "rd", "lang": "R", "max_stars_repo_path": "src/test/resources/specs/Ackermann.rd", "max_stars_repo_name": "falsewasnottrue/forstmeister", "max_stars_repo_head_hexsha": "a6402a479d6218b71b12369a97dab8f61e3f9717", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/test/resources/specs/Ackermann.rd", "max_issues_repo_name": "falsewasnottrue/forstmeister", "max_issues_repo_head_hexsha": "a6402a479d6218b71b12369a97dab8f61e3f9717", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test/resources/specs/Ackermann.rd", "max_forks_repo_name": "falsewasnottrue/forstmeister", "max_forks_repo_head_hexsha": "a6402a479d6218b71b12369a97dab8f61e3f9717", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.8285714286, "max_line_length": 52, "alphanum_fraction": 0.4499151104, "num_tokens": 286}
|
import numpy as np
import cv2
kNearest = cv2.ml.KNearest_create()
# The size of license plate in Poland is 520 x 114 mm.
# choose smaller ratio to accept bigger contours
# Width to height ratio of license plates in Poland.
PLATE_HEIGHT_TO_WIDTH_RATIO = 90 / 520
# Width and height ratio of character
CHAR_RATIO_MIN = 0.25
CHAR_RATIO_MAX = 0.85
# Number of characters on polish license plate
LICENSE_PLATE_LENGTH = 7
RESIZED_CHAR_IMAGE_WIDTH = 20
RESIZED_CHAR_IMAGE_HEIGHT = 30
SHOW_STEPS = False
def train_KNN(classifications, flattened_images):
"""
Function that trains kNearest object based on given characters classifications and flattened images of characters
:param classifications: classification of characters
:param flattened_images: flattened images with characters
:return: True when finished
"""
# training classifications
npa_classifications = classifications.astype(np.float32)
# training images
npa_flattened_images = flattened_images.astype(np.float32)
# reshape numpy array to 1d, necessary to pass to call to train
npa_classifications = npa_classifications.reshape((npa_classifications.size, 1))
# set default K to 1
kNearest.setDefaultK(1)
# train KNN object
kNearest.train(npa_flattened_images, cv2.ml.ROW_SAMPLE, npa_classifications)
return True
def get_potential_chars_ROI(chars_potential_plate):
"""
Function that finds potential license plate with closest to 7 characters on it
:param chars_potential_plate: list of list of potential plate ROIs
:return: index of list containing ROIs with closest to 7 characters
"""
offset = 0 # this variable helps if there's more potential chars on potential plate than defined in CHARACTERS_NUMBER
while True:
for ROI_idx, potential_chars_ROI in enumerate(chars_potential_plate):
if len(potential_chars_ROI) > 0:
if len(potential_chars_ROI) == (LICENSE_PLATE_LENGTH + offset):
return ROI_idx
if len(potential_chars_ROI) == (LICENSE_PLATE_LENGTH - offset):
return ROI_idx
offset += 1
def recognize_chars_in_plate(potential_chars_ROI, img_gray):
"""
Function that recognize characters on given image based on ROIs of potential characters
:param potential_chars_ROI: ROIs of potential characters
:param img_gray: gray scale image containing potential characters
:return:
license_plate - string containing recognized characters on license plate
potential_chars_ROI - list of potential chars ROIs
"""
# threshold image
ret, img_threshed = cv2.threshold(img_gray, 200, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
# license plate to be returned. We will add each recognized character
license_plate = ""
# sort potential chars ROIs from left to right
potential_chars_ROI = sorted(potential_chars_ROI, key=lambda ROI: ROI[0])
dist_list = []
for current_char in potential_chars_ROI:
# get ROI of each potential character
img_ROI = img_threshed[current_char[1]:current_char[1] + current_char[3],
current_char[0]:current_char[0] + current_char[2]]
# resize ROI to defined in KNN training size
img_ROI_resized = cv2.resize(img_ROI, (RESIZED_CHAR_IMAGE_WIDTH, RESIZED_CHAR_IMAGE_HEIGHT))
# reshape ROI to match KNN data
npa_ROI_resized = img_ROI_resized.reshape((1, RESIZED_CHAR_IMAGE_WIDTH * RESIZED_CHAR_IMAGE_HEIGHT))
# convert default image type (int) to float
npa_ROI_resized = np.float32(npa_ROI_resized)
# find nearest neighbour
retval, npa_results, neigh_resp, dists = kNearest.findNearest(npa_ROI_resized, k=1)
# save distance returned by KNN to determine which character is recognized incorrectly, when there's more chars
# than in CHARACTERS_NUMBER
dist = dists[0][0]
dist_list.append(dist)
# retrieve character
currentChar = str(chr(int(npa_results[0][0])))
# add character to license plate string
license_plate = license_plate + currentChar
if SHOW_STEPS:
print(f"KNN distances: {dist_list}")
# when there's more chars than it should be, determine which character is recognized incorrectly
while len(license_plate) > LICENSE_PLATE_LENGTH:
incorrect_char_idx = np.argmax(dist_list)
license_plate = license_plate[0:incorrect_char_idx:] + license_plate[incorrect_char_idx + 1::]
del (dist_list[incorrect_char_idx])
del (potential_chars_ROI[incorrect_char_idx])
if SHOW_STEPS:
print(f"Recognized chars in license plate {license_plate}")
return license_plate, potential_chars_ROI
def license_plate_rules(license_plate, three_chars):
"""
Check if returned license plate match rules about license plates in Poland.
If character in license plate is in incorrect place( for example Z is in second part of plate) change it to correct
one (Z -> 2)
https://pl.wikipedia.org/wiki/Tablice_rejestracyjne_w_Polsce#Opis_systemu_tablic_rejestracyjnych_w_Polsce
:param license_plate: string containing license plate
:param three_chars: TRUE if license plate has 3 chars in first part
:return: license_plate: string containing fixed license plate
"""
# forbidden letters in first part of license plate and theirs corresponding matching
forbidden_chars_1 = {'0': 'O', '1': 'I', '2': 'Z', '3': 'B', '4': 'A', '5': 'S',
'6': 'G', '7': 'Z', '8': 'B', '9': 'P', 'X': 'K'}
# forbidden letters in second part of license plate and theirs corresponding matching
forbidden_chars_2 = {'B': '8', 'D': '0', 'I': '1', 'O': '0', 'Z': '2'}
first_part_len = 2
if three_chars:
first_part_len = 3
# if given length of license plate is smaller than LICENSE_PLATE_LENGTH
# then don't change two first numbers to letters
if len(license_plate) == LICENSE_PLATE_LENGTH:
# if any of first two characters is number change it to corresponding letters
for i in range(first_part_len):
if license_plate[i] in forbidden_chars_1:
new_char = forbidden_chars_1[license_plate[i]]
s = list(license_plate)
s[i] = new_char
license_plate = "".join(s)
# check second part of license plate
for i in range(first_part_len, len(license_plate)):
if license_plate[i] in forbidden_chars_2:
new_char = forbidden_chars_2[license_plate[i]]
s = list(license_plate)
s[i] = new_char
license_plate = "".join(s)
if SHOW_STEPS:
print(f"License plate after rules checking: {license_plate}")
return license_plate
def fill_empty_chars(license_plate, chars_ROI):
"""
Function that fills empty characters wtih ? in found license plate
:param license_plate: license plate to fill
:param chars_ROI: [x, y, w, h] for each character
:return:
license plate - string with filled license plate
chars_ROI - ROIs of ? on image
"""
# find the widest character
widest_char = max(map(lambda x: x[2], chars_ROI))
while len(license_plate) != LICENSE_PLATE_LENGTH:
# distance between detected chars
distance_between_chars = []
# calculate distance between each character
for i, ROI in enumerate(chars_ROI):
if i == 0:
distance = ROI[0]
distance_between_chars.append(distance)
else:
distance = chars_ROI[i][0] - (chars_ROI[i - 1][0] + chars_ROI[i - 1][2])
distance_between_chars.append(distance)
# find biggest distance between characters and fill this place with character and generated ROI
char_idx = np.argmax(distance_between_chars)
# add character in char_idx place
s = list(license_plate)
s.insert(char_idx, '?') # insert ? in empty space
license_plate = "".join(s)
# add generated ROI in char_idx place
new_ROI = list(np.copy(chars_ROI[char_idx]))
new_ROI[0] -= (widest_char + 1)
chars_ROI.insert(char_idx, new_ROI)
if SHOW_STEPS:
print(f"Recognized license plate with filled empty spaces {license_plate}")
return license_plate, chars_ROI
def preprocess(image, parameters=(False, False)):
"""
Function that prepare image to further processing.
Converting image to gray scale, resizing image, blurring image, finding edges on image
:param image: image you want to preprocess
:param parameters:
index 0 -> if True chooses second parameters for image filtering
index 1 -> if True chooses second parameters for detecting edges
:return:
gray_blur -> grayscale blurred image
gray_edge -> grayscale image with edges
width -> width of image after resizing
"""
# convert image to gray scale
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# resize image for faster processing
gray_img = cv2.resize(gray_img, (768, 576))
# image_resied = cv2.resize(image, (768, 576))
# get shape of resized image
height = gray_img.shape[0]
width = gray_img.shape[1]
# blur image
if not parameters[0]:
gray_blur = cv2.bilateralFilter(gray_img, 11, 55, 55)
else: # change parameters of filter if we couldn't find any license plate before
gray_blur = cv2.bilateralFilter(gray_img, 11, 17, 17)
# find edges in image
if not parameters[1]:
gray_edges = cv2.Canny(gray_blur, 85, 255)
else: # change parameters of edge detection if we couldn't find any license plate before
gray_edges = cv2.Canny(gray_blur, 30, 200)
return gray_blur, gray_edges, width
def find_potential_plates_vertices(gray_edges, width):
"""
Function that finds vertices of potential license plate on edge image
:param gray_edges: edge image
:param width: width of image
:return: list of potential plates vertices
"""
# find contours on image with edges
contours, hierarchy = cv2.findContours(gray_edges.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# find potential contours that matches license plate dimensions
potential_plates_vertices = []
for contour in contours:
[x, y, w, h] = cv2.boundingRect(contour)
# exclude contours that are smaller than 1/3 of image width and their height doesn't match ratio of licenseplate
if w < (width / 3) or h < (w * PLATE_HEIGHT_TO_WIDTH_RATIO) or w == width:
continue
# lines below and get_birds_eye_view are adapted from
# https://www.pyimagesearch.com/2014/04/21/building-pokedex-python-finding-game-boy-screen-step-4-6/
# https://www.pyimagesearch.com/2014/05/05/building-pokedex-python-opencv-perspective-warping-step-5-6/
# reshape contour of potential plate
pts = contour.reshape(contour.shape[0], 2)
# vertices of plate rectangle
vertices = np.zeros((4, 2), dtype="float32")
# top left point has smallest sum and bottom right has smallest sum
s = pts.sum(axis=1)
vertices[0] = pts[np.argmin(s)]
vertices[2] = pts[np.argmax(s)]
# top right has minimum difference and bottom left has maximum difference
diff = np.diff(pts, axis=1)
vertices[1] = pts[np.argmin(diff)]
vertices[3] = pts[np.argmax(diff)]
potential_plates_vertices.append(vertices)
return potential_plates_vertices
def get_birds_eye_view(potential_plates_vertices, gray_edges, gray_blur, skip_ratio_check=False):
"""
changes perspective in all potential license plates to birds eye view
:param potential_plates_vertices: list of vertices of potential license plate
:param gray_edges: edge image used in warp perspective
:param gray_blur: blurred image used in warp perspective
:param skip_ratio_check: skip checking ratio of potential license plate to match all warp all potential contours
:return: warped_plates_edges: list containing birds eye view edge images with license plate
warped_plates_gray: list containing birds eye view blur images with license plate
"""
# change perspective in all potential license plates, to "birds eye" view
warped_plates_edges = []
warped_plates_gray = []
for idx, vertices in enumerate(potential_plates_vertices):
# get all corners in easier way to code
(tl, tr, br, bl) = vertices
# compute width and height of image created by corners
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
# take the maximum of the width and height values to reach final dimensions
maxWidth = max(int(widthA), int(widthB))
maxHeight = max(int(heightA), int(heightB))
# if we couldn't get birds eye view in the first attempt, because image didn't match license plate ratio
# then skip this step
if not skip_ratio_check:
# stop considering images that don't match license plate width to height ratio
if maxHeight < maxWidth * PLATE_HEIGHT_TO_WIDTH_RATIO:
continue
# construct destination points which will be used to map the screen to a top-down, "birds eye" view
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
# calculate the perspective transform matrix and warp the perspective to grab the screen
M = cv2.getPerspectiveTransform(vertices, dst)
warp_edges = cv2.warpPerspective(gray_edges, M, (maxWidth, maxHeight))
warp_gray = cv2.warpPerspective(gray_blur, M, (maxWidth, maxHeight))
# stop considering image that contains only zeros
if not np.any(warp_edges):
continue
# add warped image to list
warped_plates_edges.append(warp_edges)
warped_plates_gray.append(warp_gray)
return warped_plates_edges, warped_plates_gray
def find_potential_chars_on_plates(warped_plates_edges):
"""
Function that finds ROIs of potential chars on image containing license plate
:param warped_plates_edges: list containing birds eye view edge images with license plate
:return: list of ROIS of potential chars on license plate
"""
chars_potential_plate = []
for idx, plate in enumerate(warped_plates_edges):
plate_area = plate.size
char_contours, char_hierarchy = cv2.findContours(plate.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cntr_img = cv2.drawContours(plate.copy(), char_contours, -1, 100, thickness=2)
potential_chars_ROI = []
for i, cntr in enumerate(char_contours):
[x, y, w, h] = cv2.boundingRect(cntr)
bounding_area = w * h
# check contour size to match potential character size
if (bounding_area < (0.025 * plate_area) or bounding_area > (0.4 * plate_area)) or \
(CHAR_RATIO_MIN * h > w or w > CHAR_RATIO_MAX * h):
continue # no character found
# check if there's no repeating contour (contour in contour)
if char_hierarchy[0, i, 3] != -1:
# and if parent contour isn't plate contour
if cv2.contourArea(char_contours[char_hierarchy[0, i, 3]]) < 0.4 * plate_area:
continue
# add ROI of potential char
potential_chars_ROI.append([x, y, w, h])
cv2.rectangle(plate, (x, y), (x + w, y + h), 100)
chars_potential_plate.append(potential_chars_ROI)
if SHOW_STEPS:
cv2.imshow(str(idx) + "plate with char boundings", plate)
cv2.imshow(str(idx) + "plate with contours", cntr_img)
return chars_potential_plate
def three_chars_in_first_part(chars_ROI):
"""
Function that checks if license plate has 3 chars in first part of license plate or 2
:param chars_ROI: list of [x, y, w, h] for each character
:return: TRUE if license plate has 3 chars in first part
"""
distance_between_chars = []
for i, ROI in enumerate(chars_ROI):
if i < LICENSE_PLATE_LENGTH - 1:
# calculate distance between neighbours
distance = chars_ROI[i + 1][0] - (chars_ROI[i][0] + chars_ROI[i][2])
distance_between_chars.append(distance)
if SHOW_STEPS:
print(distance_between_chars)
# if biggest distance is between 3rd and 4th character then license plate has 3 characters in first part
if np.argmax(distance_between_chars) == 2:
if SHOW_STEPS:
print("3 CHARS")
return True
else:
if SHOW_STEPS:
print("2 CHARS")
return False
def recognize_license_plate(image: np.ndarray) -> str:
"""
Function that recognize license plate on given image
:param image: image containing license plate
:return: string of characters found on license plate
"""
# print(f'image.shape: {image.shape}')
if SHOW_STEPS:
print("\n \n \n \n \n \n")
# preprocess image to get useful data
gray_blur, gray_edges, width = preprocess(image)
# find vertices of potential plate
potential_plates_vertices = find_potential_plates_vertices(gray_edges, width)
# get bird eye view of potential plate based on found vertices
warped_plates_edges, warped_plates_gray = get_birds_eye_view(potential_plates_vertices, gray_edges, gray_blur)
# find potential characters on potential plates
chars_potential_plate = find_potential_chars_on_plates(warped_plates_edges)
# if no potential chars in plate found get birds eye view once more but with other parameters
if not any(chars_potential_plate):
if SHOW_STEPS:
print(f"No chars found in first try")
# get bird eye view once again but this time, skip ratio checking
warped_plates_edges, warped_plates_gray = get_birds_eye_view(potential_plates_vertices, gray_edges,
gray_blur, True)
# find potential characters on potential plates
chars_potential_plate = find_potential_chars_on_plates(warped_plates_edges)
# if no potential chars found after skipping ratio checking
# preprocess image once more with different parameters in preprocessing
if not any(chars_potential_plate):
if SHOW_STEPS:
print(f"No chars found after skipping ratio checking")
print("Trying with different preprocessing parameters...")
# list of parameter tuples for preprocessing
# index 0 -> if True chooses second parameters for image filtering
# index 1 -> if True chooses second parameters for detecting edges
preprocess_parameters = [(True, False), (False, True), (True, True)]
for params in preprocess_parameters:
gray_blur, gray_edges, width = preprocess(image, params)
# find vertices of potential plate
potential_plates_vertices = find_potential_plates_vertices(gray_edges, width)
# get bird eye view of potential plate based on found vertices
warped_plates_edges, warped_plates_gray = get_birds_eye_view(potential_plates_vertices, gray_edges,
gray_blur, True)
# find potential characters on potential plates
chars_potential_plate = find_potential_chars_on_plates(warped_plates_edges)
# if no potential chars found in this try, try with different preprocess parameters
if not any(chars_potential_plate):
continue
else:
break
# if no potential chars found in image with all combinations then return empty license plate
if not any(chars_potential_plate):
if SHOW_STEPS:
print("NO LICENSE PLATE FOUND ON IMAGE")
return '???????' # return ?
if SHOW_STEPS:
for idx, potential_chars_ROI in enumerate(chars_potential_plate):
print(f"Potential plate index: {idx} -> potential chars {len(potential_chars_ROI)}")
# Choose potential license plate with 7 potential characters. If there's no 7 potential characters in any of
# potential license plate then choose license plate with closest to 7 number of characters.
# Then get ROI of potential characters and gray image of this plate.
potential_chars_ROI_idx = get_potential_chars_ROI(chars_potential_plate)
potential_chars_ROI = chars_potential_plate[potential_chars_ROI_idx]
potential_chars_gray_img = warped_plates_gray[potential_chars_ROI_idx]
# recognize characters in license plate
license_plate, potential_chars_ROI = recognize_chars_in_plate(potential_chars_ROI, potential_chars_gray_img)
# if there's less chars on license plate that it should be, fill empty spaces based on positions of chars
if len(potential_chars_ROI) < LICENSE_PLATE_LENGTH:
license_plate, potential_chars_ROI = fill_empty_chars(license_plate, potential_chars_ROI)
# check if license plate has 3 characters in first part or 2 characters
three_chars = three_chars_in_first_part(potential_chars_ROI)
# check if returned license plate match polish rules. If not change character based on character similarity
license_plate = license_plate_rules(license_plate, three_chars)
if SHOW_STEPS:
cv2.waitKey()
cv2.destroyAllWindows()
return license_plate
|
{"hexsha": "7885a182f7e5342a220573bac1cc9240bebf57b4", "size": 22029, "ext": "py", "lang": "Python", "max_stars_repo_path": "license_plate_processing/license_plate_recognizer.py", "max_stars_repo_name": "arekmula/license_plate_recognition", "max_stars_repo_head_hexsha": "62e5374fc56a0709d6d951629449aed347e101d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-11T07:16:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-11T07:16:06.000Z", "max_issues_repo_path": "license_plate_processing/license_plate_recognizer.py", "max_issues_repo_name": "arekmula/license_plate_recognition", "max_issues_repo_head_hexsha": "62e5374fc56a0709d6d951629449aed347e101d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "license_plate_processing/license_plate_recognizer.py", "max_forks_repo_name": "arekmula/license_plate_recognition", "max_forks_repo_head_hexsha": "62e5374fc56a0709d6d951629449aed347e101d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-06T20:12:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-06T20:12:15.000Z", "avg_line_length": 44.234939759, "max_line_length": 122, "alphanum_fraction": 0.6762449498, "include": true, "reason": "import numpy", "num_tokens": 5158}
|
import glob
import os
from PIL import Image
import numpy as np
import h5py
import IPython
path = '/home/ivanwilliam/Documents/Full_images/5.0/'
all_dirs = os.listdir(path)
dir_it=0
for dir_it in range(len(all_dirs)):
file_path = '/home/ivanwilliam/Documents/Full_images/5.0/'+str(all_dirs[dir_it])
# import IPython; IPython.embed()
for root, dirs, files in os.walk(file_path):
print('\n\tFound directory: %s' % root)
# for subdir in dirs:
# print('SUBFOLDER OF ' + str(root) + ': ' + str(subdir))
# namedir = str(subdir)
fileName = sorted(files, key=str)
N_file = len(fileName)
i = 1
k = 0
if i in range(N_file):
# print('\t%s' % fileName)
# if filename.endswith('%0*d.jpg', (4))
hdf32_list=[]
for fileName in sorted (files, key=str):
if N_file*5-k*32>=32:
print('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))
picture_ds = Image.open('%s/%s' % (root, fileName))
pics_array= np.array(picture_ds)
if i==1:
pics32_list=[]
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
# import IPython; IPython.embed()
else:
# print('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))
# picture_ds = Image.open('%s/%s' % (root, fileName))
# pics_array= np.array(picture_ds)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
# i=i+1
# import IPython; IPython.embed()
# print('Array shape of %s is %s' % (fileName, array.shape)) # (512, 512, 3)
if i*5-32*(k+1)>0:
# print('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))
# picture_ds = Image.open('%s/%s' % (root, fileName))
# pics_array= np.array(picture_ds)
pics32_array=np.stack((pics32_list[0], pics32_list[1], pics32_list[2], pics32_list[3], pics32_list[4],
pics32_list[5], pics32_list[6], pics32_list[7], pics32_list[8], pics32_list[9],
pics32_list[10], pics32_list[11], pics32_list[12], pics32_list[13], pics32_list[14],
pics32_list[15], pics32_list[16], pics32_list[17], pics32_list[18], pics32_list[19],
pics32_list[20], pics32_list[21], pics32_list[22], pics32_list[23], pics32_list[24],
pics32_list[25], pics32_list[26], pics32_list[27], pics32_list[28], pics32_list[29],
pics32_list[30], pics32_list[31]), axis=0)
print('\n Compiling 32 images into HDF list \n')
hdf32_list.append(pics32_array)
k = k+1
if N_file*5-k*32>=32:
pics32_list = pics32_list[32:]
# import IPython;IPython.embed()
# i=i+1
# import IPython; IPython.embed()
i=i+1
else:
if len(pics32_list)>32:
print('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))
picture_ds = Image.open('%s/%s' % (root, fileName))
pics_array= np.array(picture_ds)
s = len(pics32_list)
r = 32*(k+1)-N_file*5
x = s%32
e = s-(r+x)
print('\n\tThere are less than 32 file remaining, using last 32 images as LAST BATCH of HDF5 from %d till %d' % (i, N_file))
pics32_list = pics32_list[e:s]
print('\t...............Start with '+str(len(pics32_list)) +' data(s) from previous batch...............')
# import IPython; IPython.embed()
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
# import IPython; IPython.embed()
# print(len(pics32_list))
i=i+1
else:
print('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))
picture_ds = Image.open('%s/%s' % (root, fileName))
pics_array= np.array(picture_ds)
if i==N_file:
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
# print(len(pics32_list))
pics32_array=np.stack((pics32_list[0], pics32_list[1], pics32_list[2], pics32_list[3], pics32_list[4],
pics32_list[5], pics32_list[6], pics32_list[7], pics32_list[8], pics32_list[9],
pics32_list[10], pics32_list[11], pics32_list[12], pics32_list[13], pics32_list[14],
pics32_list[15], pics32_list[16], pics32_list[17], pics32_list[18], pics32_list[19],
pics32_list[20], pics32_list[21], pics32_list[22], pics32_list[23], pics32_list[24],
pics32_list[25], pics32_list[26], pics32_list[27], pics32_list[28], pics32_list[29],
pics32_list[30], pics32_list[31]), axis=0)
print('\n Compiling LAST 32 images into 1 HDF list\n')
hdf32_list.append(pics32_array)
# import IPython; IPython.embed()
# i=i+1
k=k+1
# import IPython; IPython.embed()
else:
# print('Opening %d out of %d image at directory: %s/%s' % (i, N_file, root, fileName))
# picture_ds = Image.open('%s/%s' % (root, fileName))
# pics_array= np.array(picture_ds)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
pics32_list.append(pics_array)
# print(len(pics32_list))
i=i+1
# import IPython; IPython.embed()
# hdf_file = h5py.File(hdf5_path, 'w')
h = 0
# hdf_file.open()
for h in range (k):
hdf5_name=str(all_dirs[dir_it])+'_%0*d' % (3, h+1)
hdf5_path = '/media/ivanwilliam/BINUS_DATA/HDF5_Fullfile/'+str(hdf5_name)+'.h5'
hdf_file = h5py.File(hdf5_path, 'w')
matrix123 = hdf32_list[h]
hdf_file.create_dataset(name='dataset', data=matrix123)
hdf_check=h5py.File(hdf5_path, 'r')
base_items = list (hdf_check.items())
print ("HDF5_file at "+str(hdf5_path)+" which contain: "+str(base_items)+" successfully created")
# print ("Base directory items: " + '\n' + str(base_items) + '\n with total of ' +str(len(base_items))+ ' dataset(s)')
h=h+1
# if h < k:
# h=h+1
# if h == k:
# hdf_file.close()
# import IPython; IPython.embed()
k = 0
h = 0
i = 1
hdf32_list=[]
pics32_list=[]
pics32_array=[]
dir_it=dir_it + 1
|
{"hexsha": "aff2aa7e024a900b3aa17147a3767311837d4643", "size": 6591, "ext": "py", "lang": "Python", "max_stars_repo_path": "Original_size/HDF5_converter_5.0.py", "max_stars_repo_name": "ivanwilliammd/32images_hdf5converter", "max_stars_repo_head_hexsha": "2956c163b790d1fc1c3248e46d17894dde52eeb9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-17T13:10:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T13:10:14.000Z", "max_issues_repo_path": "Original_size/HDF5_converter_5.0.py", "max_issues_repo_name": "ivanwilliammd/32images_hdf5converter", "max_issues_repo_head_hexsha": "2956c163b790d1fc1c3248e46d17894dde52eeb9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Original_size/HDF5_converter_5.0.py", "max_forks_repo_name": "ivanwilliammd/32images_hdf5converter", "max_forks_repo_head_hexsha": "2956c163b790d1fc1c3248e46d17894dde52eeb9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0280898876, "max_line_length": 130, "alphanum_fraction": 0.6217569413, "include": true, "reason": "import numpy", "num_tokens": 2122}
|
# -*- coding: utf-8 -*-
"""Copy of rnn.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1hw5VX0w03qnA-pD4YmOck-HAmzP9_fO8
# Recurrent Neural Network
## Part 1 - Data Preprocessing
### Importing the libraries
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
"""### Importing the training set"""
dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')
training_set = dataset_train.iloc[:, 1:2].values #creates numpy array
#only numpy arrays can be used as inputs to keras neural networks
"""### Feature Scaling"""
#check difference between stardarization and normalizaton
#in RNN, whenever there is sigmoid function in the output layer of the RNN,
#normalization is recommended
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1)) #creating object of the MinMaxScaler class
# with feature range
training_set_scaled = sc.fit_transform(training_set)
"""### Creating a data structure with 60 timesteps and 1 output"""
#creation of time step is important
# wrong timestep can leaad to overfiting
# the 60 time steps correspond to the past 60 inputs at any particular time step.
# hence X_train has 60 previous stock prices and Y_train has the next daay stock price, which is what
# we want from the network, hence its the output to be estimated.
X_train = []
y_train = []
for i in range(60, 1258): #hence the range starts from 60 and goes to end of the list
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
"""### Reshaping"""
#reshaping so that the array dimensions are compatible with the inputs layer of the RNN
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) # the 1 in the end is the number of indicators i.e. dependent vars
# new dimensions are also added to include more dependent variables.
"""## Part 2 - Building and Training the RNN
### Importing the Keras libraries and packages
"""
# explore keras documentation on the internet
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
"""### Initialising the RNN"""
regressor = Sequential() # we are making our RNN to be sequential. check documentations for the terms
"""### Adding the first LSTM layer and some Dropout regularisation"""
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
# return sequences == True, for back propagation, in last layer..it is false
# units == neurons
# input shape == last two dimensions of X_train
regressor.add(Dropout(0.2))
# this is to add dropout regularization i.e. to drop the percent of neruons, for more info check internet
"""### Adding a second LSTM layer and some Dropout regularisation"""
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
"""### Adding a third LSTM layer and some Dropout regularisation"""
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
"""### Adding a fourth LSTM layer and some Dropout regularisation"""
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
"""### Adding the output layer"""
regressor.add(Dense(units = 1))
"""### Compiling the RNN"""
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
"""### Fitting the RNN to the Training set"""
regressor.fit(X_train, y_train, epochs = 100, batch_size = 32)
"""## Part 3 - Making the predictions and visualising the results
### Getting the real stock price of 2017
"""
dataset_test = pd.read_csv('Google_Stock_Price_Test.csv')
real_stock_price = dataset_test.iloc[:, 1:2].values
"""### Getting the predicted stock price of 2017"""
dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60, 80):
X_test.append(inputs[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
"""### Visualising the results"""
plt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
# the aim of the NN is to map the trend and not the exact value
# trend as in the shape of the graph
|
{"hexsha": "04882599488e47e956405e21234df8e7bc1e87d4", "size": 4747, "ext": "py", "lang": "Python", "max_stars_repo_path": "Auto_Encoders_Materials/RNN/copy_of_rnn.py", "max_stars_repo_name": "mithiljoshi/Classification_Denoising_GWS", "max_stars_repo_head_hexsha": "6840cd58041dcf12e4fa88fce935977ddae205d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Auto_Encoders_Materials/RNN/copy_of_rnn.py", "max_issues_repo_name": "mithiljoshi/Classification_Denoising_GWS", "max_issues_repo_head_hexsha": "6840cd58041dcf12e4fa88fce935977ddae205d6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Auto_Encoders_Materials/RNN/copy_of_rnn.py", "max_forks_repo_name": "mithiljoshi/Classification_Denoising_GWS", "max_forks_repo_head_hexsha": "6840cd58041dcf12e4fa88fce935977ddae205d6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6666666667, "max_line_length": 137, "alphanum_fraction": 0.7432062355, "include": true, "reason": "import numpy", "num_tokens": 1200}
|
import json
import numpy as np
import tensorflow.keras as keras
data_path = "/Users/talen/Desktop/Audio_features.json"
#Loading the desired data from the json file
def data_loading(data_path, session_num):
#Read data from the json file
with open(data_path, "r") as file:
data = json.load(file)
#Specify the data (X) used for predicting and the data (Y) as the target
X = np.array(data[session_num]["Log-Mel-spectrogram"])
Y = np.array(data[session_num]["labels"])
return X,Y
#%% Create the training and testing sets
#The session 1~4 are used as training, whereas the session 5 is used as testing
sessions_training = ["1","2","3","4"]
session_testing = ["5"]
#Get the training data including the predictors and targets
X_train_1, Y_train_1 = data_loading(data_path,sessions_training[0])
X_train_2, Y_train_2 = data_loading(data_path,sessions_training[1])
X_train_3, Y_train_3 = data_loading(data_path,sessions_training[2])
X_train_4, Y_train_4 = data_loading(data_path,sessions_training[3])
X_train = np.concatenate((X_train_1, X_train_2, X_train_3, X_train_4))
Y_train = np.concatenate((Y_train_1, Y_train_2, Y_train_3, Y_train_4))
#Get the testing data including the predictors and targets
X_test, Y_test = data_loading(data_path, session_testing[0])
#Add an extra dimension — depth to the training and testing data, since the CNN requires 3D data for training
# X_train = X_train[..., np.newaxis] #4d-array -> [n_data_samples, 47 (time_bins), 40 (log-mel), 1 (depth)]
# X_test = X_test[..., np.newaxis]
#%% Create CNN for extracting features from log-mel-spectrograms
#Build the CNN
def build_CNN(input_shape):
#Initiate the model
model = keras.Sequential()
#1st conv layer
model.add(keras.layers.Conv2D(filters=30, kernel_size=(5,5), activation='relu', input_shape=input_shape))
model.add(keras.layers.MaxPool2D(pool_size=(3,3),strides=(2,2), padding='same'))
#Use batchnormalisation process to normalise the activations in the current layer and to speed up the training
model.add(keras.layers.BatchNormalization())
#2nd conv layer
model.add(keras.layers.Conv2D(filters=30, kernel_size=(3,3), activation='relu'))
model.add(keras.layers.MaxPool2D(pool_size=(3,3),strides=(2,2), padding='same'))
#Use batchnormalisation process to normalise the activations in the current layer and to speed up the training
model.add(keras.layers.BatchNormalization())
#FC layer and dense layer
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(units=32, activation="sigmoid"))
model.add(keras.layers.Dropout(0.3))
#output layer — softmax
model.add(keras.layers.Dense(units=4, activation="softmax"))
return model
#%%
#The input data has three dimensions, the last two are the time and log-mel-spectrogram coefficient that could be
#taken as the input size. These inputs also have the depth of 1 in input size
input_shape = (X_train.shape[1], X_train.shape[2], X_train.shape[3])
cnn = build_CNN(input_shape)
#Compile the model
optimiser = keras.optimizers.Adam(learning_rate=0.0001)
cnn.compile(optimizer=optimiser, loss="sparse_categorical_crossentropy", metrics=['accuracy'])
#Train the model
cnn.fit(x=X_train, y=Y_train, batch_size=32, epochs=30, verbose=1)
#Evaluate the model
error, accuracy = cnn.evaluate(X_test, Y_test, verbose=1)
print("The accuracy of the moel for SER is: ", accuracy)
#%%
#Make predictions on a new sample
def predict(model, X, y):
X=X[np.newaxis, ...]
#prediction = [[0.1, 0.2, 0.3, ...]]
prediction = model.predict(X)
#Extract the index with max value
predicted_index = np.argmax(prediction, axis=1) #e.g.,[3]
print("Expected index: {}, Predictied inex: {}".format(y, predicted_index))
X = X_test[100]
y = Y_test[100]
predict(cnn, X, y)
#%%
import librosa.display
import matplotlib.pyplot as plt
with open(data_path, "r") as file1:
data1 = json.load(file1)
#1.Read the data from session
s5_labels = data1["5"]["labels"]
s5_specs = data1["5"]["Log-Mel-spectrogram"]
#%% Temporarily use for generating spectrograms
#Read the spectrogram coefficients of session 5
for i, v in enumerate(s5_labels):
#Read the spectrogram coefficients of ang
if v == 2:
spectrogram = s5_specs[i]
librosa.display.specshow(np.array(spectrogram), x_axis="time", y_axis="log")
plt.savefig("/Users/talen/Desktop/Temp/"+str(i))
print("The number {} spectrogram is generated!".format(str(i)))
print("Done!!!")
|
{"hexsha": "5a447180cbdfb48be040398805dd8595af093e48", "size": 4564, "ext": "py", "lang": "Python", "max_stars_repo_path": "Classify_CNN.py", "max_stars_repo_name": "chentalen2021/CNN-model", "max_stars_repo_head_hexsha": "44de28e590eeea1287fd1a7b8c2df7f740727bde", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Classify_CNN.py", "max_issues_repo_name": "chentalen2021/CNN-model", "max_issues_repo_head_hexsha": "44de28e590eeea1287fd1a7b8c2df7f740727bde", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Classify_CNN.py", "max_forks_repo_name": "chentalen2021/CNN-model", "max_forks_repo_head_hexsha": "44de28e590eeea1287fd1a7b8c2df7f740727bde", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3798449612, "max_line_length": 118, "alphanum_fraction": 0.717791411, "include": true, "reason": "import numpy", "num_tokens": 1214}
|
import torch
import numpy as np
class ScheduledOptim:
""" A simple wrapper class for learning rate scheduling """
def __init__(self, model, train_config, current_step):
self._optimizer = torch.optim.Adam(
model.parameters(),
betas=train_config["optimizer"]["betas"],
eps=train_config["optimizer"]["eps"],
weight_decay=train_config["optimizer"]["weight_decay"],
)
self.current_step = current_step
self.init_lr = train_config["optimizer"]["init_lr"]
self.decay_rate = train_config["optimizer"]["decay_rate"]
self.decay_start = train_config["optimizer"]["decay_start"]
self.decay_end = train_config["optimizer"]["decay_end"]
def step_and_update_lr(self):
lr = self._update_learning_rate()
self._optimizer.step()
return lr
def zero_grad(self):
# print(self.init_lr)
self._optimizer.zero_grad()
def load_state_dict(self, path):
self._optimizer.load_state_dict(path)
def lr_lambda(self):
progress = (self.current_step - self.decay_start) / (self.decay_end - self.decay_start)
return self.decay_rate ** np.clip(progress, 0.0, 1.0)
def _update_learning_rate(self):
self.current_step += 1
lr = self.init_lr * self.lr_lambda()
for param_group in self._optimizer.param_groups:
param_group["lr"] = lr
return lr
|
{"hexsha": "aa285c4838e3f5d876395e437075fe889c24ae40", "size": 1493, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/optimizer.py", "max_stars_repo_name": "shaun95/WaveGrad2", "max_stars_repo_head_hexsha": "167d5d6e98072f34a30296ff767c70a5696a4051", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 45, "max_stars_repo_stars_event_min_datetime": "2021-06-23T12:14:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T03:10:35.000Z", "max_issues_repo_path": "model/optimizer.py", "max_issues_repo_name": "shaun95/WaveGrad2", "max_issues_repo_head_hexsha": "167d5d6e98072f34a30296ff767c70a5696a4051", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-06-23T15:12:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-18T04:25:18.000Z", "max_forks_repo_path": "model/optimizer.py", "max_forks_repo_name": "shaun95/WaveGrad2", "max_forks_repo_head_hexsha": "167d5d6e98072f34a30296ff767c70a5696a4051", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2021-06-23T08:51:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T08:45:47.000Z", "avg_line_length": 33.1777777778, "max_line_length": 96, "alphanum_fraction": 0.6229068989, "include": true, "reason": "import numpy", "num_tokens": 317}
|
"""
The structure provides convenient computation of
a Groebner basis of given ideal at a point.
See evaluate function for more details
"""
### Probably we want to dispatch on coefficients type
mutable struct GroebnerEvaluator
ideal::IdealContext
end
"""
Convenience ctor 1
?? convenience
"""
function GroebnerEvaluator(ideal)
GroebnerEvaluator(ideal)
end
#=
"""
Convenience ctor 2
"""
function GroebnerEvaluator(
ideal,
eval_ring,
coeff_ring,
ground;
saturated=true)
GroebnerEvaluator(ideal,
eval_ring,
coeff_ring,
ground,
saturated
)
end
=#
"""
Evaluates all coeffs of the underlying_ideal of G at the given point p,
then computing the Groebner basis of the resulting ideal
Returns the computed Groebner basis
Groebner basis generators are designed to be the elements
of G.eval_ring over G.ground
Underlying ideal polynomial coefficients live in G.coeff_ring and
must agree with substituting the given point p
Dependent of the saturating variable "t" polynomials are
erased from the resulting Groebner basis
"""
function AbstractAlgebra.evaluate(G::GroebnerEvaluator, p)
context = G.ideal
I = context.I
ground = context.ground
evalring = context.evalring
singular_ground = base_ring(evalring)
# TODO: change
lift = typeof(p[1]) <: Nemo.gfp_elem ? x -> x.data : x -> x
Is = [
map_coefficients(c -> singular_ground(lift(evaluate(c, p))), f)
for f in I
]
Is = [
change_base_ring(base_ring(evalring), f, parent=evalring)
for f in Is
]
ideal = Singular.Ideal(evalring, Is)
# @info "I am gpoing to compute GB of $ideal"
gb = GroebnerBasis.f4(ideal, reducegb=0, monorder=:lex)
# this should never happen ideally (but it does!!)
# @info gb
if length(gens(gb)) == 1
@info gb
@warn "F4 failed. Switching to Singular.std"
gb = Singular.std(ideal, complete_reduction=true)
end
gb = collect(gens(gb))
# ideal = Singular.Ideal(eval_ring, gb)
# gb = collect(gens(Singular.std(ideal, complete_reduction=true)))
# @info "After reduction $gb"
if context.saturated
t = last(gens(evalring))
gb = filter(f -> degree(f, t) == 0, gb)
end
gb = sort(gb, by=collect ∘ AbstractAlgebra.exponent_vectors)
normalize(f) = !isconstant(f) ? f * inv(leading_coefficient(f)) : f
gb = map(normalize, gb)
gb
end
"""
Short-cut for evaluate
"""
function (G::GroebnerEvaluator)(p)
evaluate(G, p)
end
function AbstractAlgebra.nvars(G::GroebnerEvaluator)
return length(gens(G.ideal.basepolyring))
end
"""
Returns a random point suitable for evaluation
"""
function generate_point(G::GroebnerEvaluator; M=Inf)
if M == Inf
[ rand(G.ideal.ground) for _ in 1:nvars(G) ]
else
[ G.ideal.ground(rand(0:M)) for _ in 1:nvars(G) ]
end
end
function AbstractAlgebra.base_ring(G::GroebnerEvaluator)
G.ideal.ground
end
|
{"hexsha": "d7b58b02bcf91a1f75a9509a28df9ea012eb16e0", "size": 3307, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/groebner.jl", "max_stars_repo_name": "sumiya11/RationalFunctionFields", "max_stars_repo_head_hexsha": "648db6a3ca01fd087b9eeba4e72930f73210e765", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-23T23:19:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T03:25:12.000Z", "max_issues_repo_path": "src/groebner.jl", "max_issues_repo_name": "sumiya11/RationalFunctionFields", "max_issues_repo_head_hexsha": "648db6a3ca01fd087b9eeba4e72930f73210e765", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-09-03T19:40:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-16T14:18:26.000Z", "max_forks_repo_path": "src/groebner.jl", "max_forks_repo_name": "sumiya11/RationalFunctionFields", "max_forks_repo_head_hexsha": "648db6a3ca01fd087b9eeba4e72930f73210e765", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2887323944, "max_line_length": 75, "alphanum_fraction": 0.6087087995, "num_tokens": 817}
|
'''
part dataset. Support ModelNet40, ModelNet10, XYZ and normal channels. Up to 10000 points.
'''
import os
import os.path
import json
import numpy as np
import sys
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.abspath(BASE_DIR))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc = pc / m
return pc
class PartNormalDataset(Dataset):
def __init__(self, root, npoints=2048, classification=False, split='train', normalize=True, return_cls_label=False):
self.npoints = npoints
self.root = root
self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')
self.cat = {}
self.classification = classification
self.normalize = normalize
self.return_cls_label = return_cls_label
with open(self.catfile, 'r') as f:
for line in f:
ls = line.strip().split()
self.cat[ls[0]] = ls[1]
self.cat = {k: v for k, v in self.cat.items()}
# print(self.cat)
self.meta = {}
with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:
train_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:
val_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:
test_ids = set([str(d.split('/')[2]) for d in json.load(f)])
for item in self.cat:
# print('category', item)
self.meta[item] = []
dir_point = os.path.join(self.root, self.cat[item])
fns = sorted(os.listdir(dir_point))
# print(fns[0][0:-4])
if split == 'trainval':
fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]
elif split == 'train':
fns = [fn for fn in fns if fn[0:-4] in train_ids]
elif split == 'val':
fns = [fn for fn in fns if fn[0:-4] in val_ids]
elif split == 'test':
fns = [fn for fn in fns if fn[0:-4] in test_ids]
else:
print('Unknown split: %s. Exiting..' % (split))
exit(-1)
# print(os.path.basename(fns))
for fn in fns:
token = (os.path.splitext(os.path.basename(fn))[0])
self.meta[item].append(os.path.join(dir_point, token + '.txt'))
self.datapath = []
for item in self.cat:
for fn in self.meta[item]:
self.datapath.append((item, fn))
self.classes = dict(zip(self.cat, range(len(self.cat))))
# Mapping from category ('Chair') to a list of int [10,11,12,13] as segmentation labels
self.seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],
'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46],
'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27],
'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40],
'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
for cat in sorted(self.seg_classes.keys()):
print(cat, self.seg_classes[cat])
def __getitem__(self, index):
fn = self.datapath[index]
cat = self.datapath[index][0]
cls = self.classes[cat]
cls = np.array([cls]).astype(np.int32)
data = np.loadtxt(fn[1]).astype(np.float32)
point_set = data[:, 0:3]
if self.normalize:
point_set = pc_normalize(point_set)
normal = data[:, 3:6]
seg = data[:, -1].astype(np.int32)
choice = np.random.choice(len(seg), self.npoints, replace=True)
# resample
point_set = point_set[choice, :]
seg = seg[choice]
normal = normal[choice, :]
if self.classification:
return point_set, normal, cls
else:
if self.return_cls_label:
return point_set, normal, seg, cls
else:
return point_set, normal, seg
def __len__(self):
return len(self.datapath)
if __name__ == '__main__':
# import time
# time_start = time.time()
modelnet_dataset = PartNormalDataset(root=os.path.join(ROOT_DIR, 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal'), split='test')
loader = DataLoader(modelnet_dataset, batch_size=10, shuffle=True, num_workers=1)
for _, batch in enumerate(loader):
ps_batch = batch[0]
normal = batch[1]
seg = batch[2]
print(ps_batch)
print(normal)
print(seg)
#
#
#
#
# print(d.has_next_batch())
# ps_batch, cls_batch = d.next_batch(True)
# print(ps_batch.shape)
# print(cls_batch.shape)
|
{"hexsha": "13eb2d3d20114a14e1cc5f0cd6f388b1db76bb8c", "size": 5326, "ext": "py", "lang": "Python", "max_stars_repo_path": "DataLoader/part_dataset_seg.py", "max_stars_repo_name": "meihuaz/PCUNet", "max_stars_repo_head_hexsha": "c3aafd456800a1dd4e83e8d60e2606830d3e3ffc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DataLoader/part_dataset_seg.py", "max_issues_repo_name": "meihuaz/PCUNet", "max_issues_repo_head_hexsha": "c3aafd456800a1dd4e83e8d60e2606830d3e3ffc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DataLoader/part_dataset_seg.py", "max_forks_repo_name": "meihuaz/PCUNet", "max_forks_repo_head_hexsha": "c3aafd456800a1dd4e83e8d60e2606830d3e3ffc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2447552448, "max_line_length": 146, "alphanum_fraction": 0.5580172738, "include": true, "reason": "import numpy", "num_tokens": 1421}
|
\filetitle{freq}{Frequency of a tseries object}{tseries/freq}
\paragraph{Syntax}\label{syntax}
\begin{verbatim}
f = freq(x)
\end{verbatim}
\paragraph{Input arguments}\label{input-arguments}
\begin{itemize}
\itemsep1pt\parskip0pt\parsep0pt
\item
\texttt{x} {[} tseries {]} - Tseries object.
\end{itemize}
\paragraph{Output arguments}\label{output-arguments}
\begin{itemize}
\itemsep1pt\parskip0pt\parsep0pt
\item
\texttt{f} {[} 0 \textbar{} 1 \textbar{} 2 \textbar{} 4 \textbar{} 6
\textbar{} 12 {]} - Frequency of observations in the input tseries
object (\texttt{f} is the number of periods within a year).
\end{itemize}
\paragraph{Description}\label{description}
The \texttt{freq} function is equivalent to calling
\begin{verbatim}
get(x,'freq')
\end{verbatim}
\paragraph{Example}\label{example}
|
{"hexsha": "d5a331f862b39a2c6f7cb9c949d3096108268ca3", "size": 825, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "-help/tseries/freq.tex", "max_stars_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_stars_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-06T13:38:38.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-06T13:38:38.000Z", "max_issues_repo_path": "-help/tseries/freq.tex", "max_issues_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_issues_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-03-28T08:13:20.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-02T10:40:25.000Z", "max_forks_repo_path": "-help/tseries/freq.tex", "max_forks_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_forks_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-17T07:06:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T07:06:39.000Z", "avg_line_length": 20.625, "max_line_length": 70, "alphanum_fraction": 0.7260606061, "num_tokens": 266}
|
import numpy as np
import socket
import asyncio
from matplotlib import pyplot as plt
from time import sleep
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, LoggingEventHandler
import logging
HOST = "NUS15128-11-albhuan.local" # Needs to be constantly updated except 127.0.0.1 which is auto local host
PATH = 'image_stream.jpg'
PORT = 8324
address = (HOST,PORT)
## CAST OPENCV image to byte array
## add some byte at the beginning/end that you can look for
## use opencv from byte array to image
class EventHandler(FileSystemEventHandler):
def __init__(self, obs):
self.observer = obs
self.image = b''
def on_modified(self, event):
if not event.src_path.endswith(PATH):
return
with open(event.src_path, 'rb') as img:
data_byte = img.read()
if len(data_byte) < 2e5: # TODO: filtering for incomplete writes is jank
return
self.image = data_byte
def send_image(self):
"""Raises BrokenPipeError if client has disconnected, and ValueError if there is no image to send."""
if len(self.image) == 0: raise ValueError("no image to send")
data_byte = self.image
print("sending image of", len(data_byte), type(data_byte))
data_byte = data_byte + b'ThisIsAnEndToken'
conn.sendall(data_byte)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(address)
s.listen()
while True:
print("waiting for client...")
conn, addr = s.accept()
#### With connection
with conn:
print(f"Connected by {addr}")
observer = Observer()
handler = EventHandler(observer)
observer.schedule(handler, '.')
observer.start()
try:
while True:
try:
handler.send_image()
except ValueError:
print("no image to send!")
continue
sleep(0.05)
except BrokenPipeError:
print("client disconnected")
observer.stop()
observer.join()
except ConnectionResetError:
print("client disconnected")
observer.stop()
observer.join()
cap.release()
cv.destroyAllWindows()
|
{"hexsha": "916b277d40eb004eb1c77b2ca1edf0f6dc867ccd", "size": 2414, "ext": "py", "lang": "Python", "max_stars_repo_path": "server.py", "max_stars_repo_name": "RoboticsTeam4904/2022-camera-server", "max_stars_repo_head_hexsha": "8ba2d4b5b51f2e92b468212e73dba5af419e0a49", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "server.py", "max_issues_repo_name": "RoboticsTeam4904/2022-camera-server", "max_issues_repo_head_hexsha": "8ba2d4b5b51f2e92b468212e73dba5af419e0a49", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "server.py", "max_forks_repo_name": "RoboticsTeam4904/2022-camera-server", "max_forks_repo_head_hexsha": "8ba2d4b5b51f2e92b468212e73dba5af419e0a49", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-02-21T22:07:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T22:19:53.000Z", "avg_line_length": 28.4, "max_line_length": 109, "alphanum_fraction": 0.5948632974, "include": true, "reason": "import numpy", "num_tokens": 489}
|
# # Mauna Loa time series example
#
# In this notebook, we apply Gaussian process regression to the Mauna Loa CO₂
# dataset. This showcases a rich combination of kernels, and how to handle and
# optimize all their parameters.
# ## Setup
#
# We make use of the following packages:
using CSV, DataFrames # data loading
using AbstractGPs # exact GP regression
using ParameterHandling # for nested and constrained parameters
using Optim # optimization
using Zygote # auto-diff gradient computation
using Plots # visualisation
# Let's load and visualize the dataset.
# !!! tip
# The `let` block [creates a new
# scope](https://docs.julialang.org/en/v1/manual/variables-and-scoping/#scope-of-variables),
# so any utility variables we define in here won't leak outside. This is
# particularly helpful to keep notebooks tidy! The return value of the
# block is given by its last expression.
(xtrain, ytrain), (xtest, ytest) = let
data = CSV.read(joinpath(@__DIR__, "CO2_data.csv"), Tables.matrix; header=0)
year = data[:, 1]
co2 = data[:, 2]
## We split the data into training and testing set:
idx_train = year .< 2004
idx_test = .!idx_train
(year[idx_train], co2[idx_train]), (year[idx_test], co2[idx_test]) # block's return value
end
## The utility variables such as `idx_train` and `idx_test` are not available outside the `let` scope
function plotdata()
plot(; xlabel="year", ylabel="CO₂ [ppm]", legend=:bottomright)
scatter!(xtrain, ytrain; label="training data", ms=2, markerstrokewidth=0)
return scatter!(xtest, ytest; label="test data", ms=2, markerstrokewidth=0)
end
plotdata()
# ## Prior
#
# We will model this dataset using a sum of several kernels which describe
#
# - smooth trend: squared exponential kernel with long lengthscale;
# - seasonal component: periodic covariance function with period of one year,
# multiplied with a squared exponential kernel to allow decay away from exact
# periodicity;
# - medium-term irregularities: rational quadratic kernel;
# - noise terms: squared exponential kernel with short lengthscale
# and uncorrelated observation noise.
#
# For more details, see [Rasmussen & Williams (2005), chapter 5](http://www.gaussianprocess.org/gpml/chapters/RW5.pdf).
# We will use
# [ParameterHandling.jl](https://invenia.github.io/ParameterHandling.jl/) for
# handling the (hyper)parameters of our model. It provides functions such as
# `positive` with which we can put constraints on the hyperparameters, and
# allows us to represent all required parameters as a nested NamedTuple:
#! format: off
## initial values to match http://stor-i.github.io/GaussianProcesses.jl/latest/mauna_loa/
θ_init = (;
se_long = (;
σ = positive(exp(4.0)),
ℓ = positive(exp(4.0)),
),
seasonal = (;
## product kernels only need a single overall signal variance
per = (;
ℓ = positive(exp(0.0)), # relative to period!
p = fixed(1.0), # 1 year, do not optimize over
),
se = (;
σ = positive(exp(1.0)),
ℓ = positive(exp(4.0)),
),
),
rq = (;
σ = positive(exp(0.0)),
ℓ = positive(exp(0.0)),
α = positive(exp(-1.0)),
),
se_short = (;
σ = positive(exp(-2.0)),
ℓ = positive(exp(-2.0)),
),
noise_scale = positive(exp(-2.0)),
)
#! format: on
#md nothing #hide
# We define a couple of helper functions to simplify the kernel construction:
SE(θ) = θ.σ^2 * with_lengthscale(SqExponentialKernel(), θ.ℓ)
## PeriodicKernel is broken, see https://github.com/JuliaGaussianProcesses/KernelFunctions.jl/issues/389
##Per(θ) = with_lengthscale(PeriodicKernel(; r=[θ.ℓ/2]), θ.p) # NOTE- discrepancy with GaussianProcesses.jl
Per(θ) = with_lengthscale(SqExponentialKernel(), θ.ℓ) ∘ PeriodicTransform(1 / θ.p)
RQ(θ) = θ.σ^2 * with_lengthscale(RationalQuadraticKernel(; α=θ.α), θ.ℓ)
#md nothing #hide
# This allows us to write a function that, given the nested tuple of parameter values, constructs the GP prior:
function build_gp_prior(θ)
k_smooth_trend = SE(θ.se_long)
k_seasonality = Per(θ.seasonal.per) * SE(θ.seasonal.se)
k_medium_term_irregularities = RQ(θ.rq)
k_noise_terms = SE(θ.se_short) + θ.noise_scale^2 * WhiteKernel()
kernel = k_smooth_trend + k_seasonality + k_medium_term_irregularities + k_noise_terms
return GP(kernel) # [`ZeroMean`](@ref) mean function by default
end
#md nothing #hide
# ## Posterior
#
# To construct the posterior, we need to first build a [`FiniteGP`](@ref),
# which represents the infinite-dimensional GP at a finite number of input
# features:
function build_finite_gp(θ)
f = build_gp_prior(θ)
return f(xtrain)
end
#md nothing #hide
# !!! info "`WhiteKernel` vs `FiniteGP` observation noise"
# In this notebook, we already included observation noise through the
# `WhiteKernel` as part of the GP prior covariance in `build_gp_prior`. We
# therefore call `f(xtrain)` which implies zero (additional) observation
# noise.
#
# Alternatively, we could have omitted the `θ.noise_scale^2 *
# WhiteKernel()` term and instead passed the noise variance as a second
# argument to the GP call in `build_finite_gp`, `f(xtrain,
# θ.noise_scale^2)`.
#
# These two approaches have slightly different semantics: In the first one,
# the `WhiteKernel` contributes non-zero variance to the `[i, j]` element
# of the covariance matrix of the `FiniteGP` if `xtrain[i] == xtrain[j]`
# (based on the values of the features). In the second one, the observation
# noise variance passed to `FiniteGP` only contributes to the diagonal
# elements of the covariance matrix, i.e. for `i == j`.
#
# Moreover, the variance (uncertainty) of the posterior predictions
# includes the variance from the `WhiteKernel`, but does not include the
# variance of the observation noise passed to the `FiniteGP`. To include
# the observation noise in posterior predictions from the second approach,
# call `fpost_opt(xtest, noise_scale^2)`.
#
# !!! tip
# For most use-cases and if in any doubt, we recommend that you pass in
# observation noise to the `FiniteGP`, and omit the explicit `WhiteKernel`.
# This is slightly faster (no need to check `xtrain[i] == xtrain[j]` for
# all pairs `i`, `j`), and `WhiteKernel` will not give stable gradients if
# you wish to compute the gradient of the log marginal likelihood
# `logpdf(f(x), y)` w.r.t. `x`.
# We obtain the posterior, conditioned on the (finite) observations, by calling
# [`posterior`](@ref):
function build_posterior_gp(θ)
fx = build_finite_gp(θ)
return posterior(fx, ytrain)
end
#md nothing #hide
# Now we can put it all together to obtain a [`PosteriorGP`](@ref).
# The call to `ParameterHandling.value` is required to replace the constraints
# (such as `positive` in our case) with concrete numbers:
fpost_init = build_posterior_gp(ParameterHandling.value(θ_init))
# Let's visualize what the GP fitted to the data looks like, for the initial choice of kernel hyperparameters.
#
# We use the following function to plot a GP `f` on a specific range, using the
# AbstractGPs [plotting
# recipes](https://juliagaussianprocesses.github.io/AbstractGPs.jl/dev/concrete_features/#Plotting).
# By setting `ribbon_scale=2` we visualize the uncertainty band with ``\pm 2``
# (instead of the default ``\pm 1``) standard deviations.
plot_gp!(f; label) = plot!(f(1920:0.2:2030); ribbon_scale=2, linewidth=1, label)
#md nothing #hide
plotdata()
plot_gp!(fpost_init; label="posterior f(⋅)")
# A reasonable fit to the data, but poor extrapolation away from the observations!
# ## Hyperparameter Optimization
#
# To improve the fit, we want to maximize the (log) marginal likelihood with
# respect to the hyperparameters.
# [Optim.jl](https://julianlsolvers.github.io/Optim.jl/stable/) expects to
# minimize a loss, so we define it as the negative log marginal likelihood:
function loss(θ)
fx = build_finite_gp(θ)
lml = logpdf(fx, ytrain) # this computes the log marginal likelihood
return -lml
end
#md nothing #hide
# !!! note "Work-in-progress"
# In the future, we are planning to provide the `optimize_loss` utility
# function as part of JuliaGaussianProcesses -- for now, we just define it
# inline.
#
# The L-BFGS parameters were chosen because they seem to work well empirically.
# You could also try with the defaults.
default_optimizer = LBFGS(;
alphaguess=Optim.LineSearches.InitialStatic(; scaled=true),
linesearch=Optim.LineSearches.BackTracking(),
)
function optimize_loss(loss, θ_init; optimizer=default_optimizer, maxiter=1_000)
options = Optim.Options(; iterations=maxiter, show_trace=true)
θ_flat_init, unflatten = ParameterHandling.value_flatten(θ_init)
loss_packed = loss ∘ unflatten
## https://julianlsolvers.github.io/Optim.jl/stable/#user/tipsandtricks/#avoid-repeating-computations
function fg!(F, G, x)
if F !== nothing && G !== nothing
val, grad = Zygote.withgradient(loss_packed, x)
G .= only(grad)
return val
elseif G !== nothing
grad = Zygote.gradient(loss_packed, x)
G .= only(grad)
return nothing
elseif F !== nothing
return loss_packed(x)
end
end
result = optimize(Optim.only_fg!(fg!), θ_flat_init, optimizer, options; inplace=false)
return unflatten(result.minimizer), result
end
#md nothing #hide
# We now run the optimization:
θ_opt, opt_result = optimize_loss(loss, θ_init)
opt_result
# The final value of the log marginal likelihood is:
-opt_result.minimum
# !!! warning
# To avoid bad local optima, we could (and should) have carried out several
# random restarts with different initial values for the hyperparameters,
# and then picked the result with the highest marginal likelihood. We omit
# this for simplicity. For more details on how to fit GPs in practice,
# check out [A Practical Guide to Gaussian
# Processes](https://tinyurl.com/guide2gp).
#
# Let's construct the posterior GP with the optimized hyperparameters:
fpost_opt = build_posterior_gp(ParameterHandling.value(θ_opt))
#md nothing #hide
# This is the kernel with the point-estimated hyperparameters:
fpost_opt.prior.kernel
# Let's print the optimized values of the hyperparameters in a more helpful format:
# !!! note "Work-in-progress"
# This is another utility function we would eventually like to move out of this notebook:
using Printf
show_params(nt::Union{Dict,NamedTuple}) = String(take!(show_params(IOBuffer(), nt)))
function show_params(io, nt::Union{Dict,NamedTuple}, indent::Int=0)
for (s, v) in pairs(nt)
if v isa Union{Dict,NamedTuple}
println(io, " "^indent, s, ":")
show_params(io, v, indent + 4)
else
println(io, " "^indent, s, " = ", @sprintf("%.3f", v))
end
end
return io
end
print(show_params(ParameterHandling.value(θ_opt)))
# And, finally, we can visualize our optimized posterior GP:
plotdata()
plot_gp!(fpost_opt; label="optimized posterior f(⋅)")
|
{"hexsha": "50f47e80c809d04e82ef81f7ba0c176e405e7f48", "size": 11222, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/1-mauna-loa/script.jl", "max_stars_repo_name": "JuliaGaussianProcesses/AbstractGP", "max_stars_repo_head_hexsha": "6ee8549f536c6037a02a1cc445fd35c0811425ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-11T11:07:08.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-17T06:42:23.000Z", "max_issues_repo_path": "examples/1-mauna-loa/script.jl", "max_issues_repo_name": "JuliaGaussianProcesses/AbstractGP", "max_issues_repo_head_hexsha": "6ee8549f536c6037a02a1cc445fd35c0811425ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/1-mauna-loa/script.jl", "max_forks_repo_name": "JuliaGaussianProcesses/AbstractGP", "max_forks_repo_head_hexsha": "6ee8549f536c6037a02a1cc445fd35c0811425ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1589403974, "max_line_length": 119, "alphanum_fraction": 0.7005881305, "num_tokens": 2945}
|
#!/usr/bin/env python3
import json
import os.path
import multiprocessing as mp
import timeit
import numpy as np
import _init_path
from skimage import io
from spacenet7_model.configs import load_config
from spacenet7_model.utils import (dump_prediction_to_png, ensemble_subdir,
experiment_subdir, get_aoi_from_path,
get_image_paths, load_prediction_from_png,
map_wrapper, val_list_filename)
from tqdm import tqdm
def ensemble_preds(image_path, aoi, out_dir, weights, config):
"""[summary]
Args:
image_path ([type]): [description]
aoi ([type]): [description]
out_root ([type]): [description]
weights ([type]): [description]
config ([type]): [description]
"""
image_orig = io.imread(image_path)
roi_mask = image_orig[:, :, 3] > 0
h, w = roi_mask.shape
ensembled_score = np.zeros(shape=[len(config.INPUT.CLASSES), h, w])
image_filename = os.path.basename(image_path)
array_filename, _ = os.path.splitext(image_filename)
array_filename = f'{array_filename}.png'
for exp_id, weight in zip(config.ENSEMBLE_EXP_IDS, weights):
exp_subdir = experiment_subdir(exp_id)
score_array = load_prediction_from_png(
os.path.join(config.PREDICTION_ROOT, exp_subdir, aoi,
array_filename),
n_channels=len(config.INPUT.CLASSES))
score_array[:, np.logical_not(roi_mask)] = 0
assert score_array.min() >= 0 and score_array.max() <= 1
ensembled_score += score_array * weight
assert ensembled_score.min() >= 0 and ensembled_score.max() <= 1
dump_prediction_to_png(os.path.join(out_dir, array_filename),
ensembled_score)
if __name__ == '__main__':
t0 = timeit.default_timer()
config = load_config()
assert len(config.ENSEMBLE_EXP_IDS) >= 1
N = len(config.ENSEMBLE_EXP_IDS)
n_thread = config.ENSEMBLE_NUM_THREADS
n_thread = n_thread if n_thread > 0 else mp.cpu_count()
print(f'N_thread for multiprocessing: {n_thread}')
# prepare ensemble weights
if len(config.ENSEMBLE_WEIGHTS) == 0:
weights = np.ones(shape=(N))
else:
assert len(config.ENSEMBLE_WEIGHTS) == N
weights = np.array(config.ENSEMBLE_WEIGHTS)
weights = weights / weights.sum()
# get full paths to image files
if config.TEST_TO_VAL:
# use val split for test.
data_list_path = os.path.join(
config.INPUT.TRAIN_VAL_SPLIT_DIR,
val_list_filename(config.INPUT.TRAIN_VAL_SPLIT_ID))
with open(data_list_path) as f:
data_list = json.load(f)
image_paths = [data['image_masked'] for data in data_list]
else:
# use test data for test (default).
image_paths = get_image_paths(config.INPUT.TEST_DIR)
subdir = ensemble_subdir(config.ENSEMBLE_EXP_IDS)
out_root = os.path.join(config.ENSEMBLED_PREDICTION_ROOT, subdir)
os.makedirs(out_root, exist_ok=False)
print('preparing input args...')
input_args = []
for image_path in image_paths:
aoi = get_aoi_from_path(image_path)
# prepare aoi sub directory to output ensemble results
out_dir = os.path.join(out_root, aoi)
os.makedirs(out_dir, exist_ok=True)
input_args.append(
[ensemble_preds, image_path, aoi, out_dir, weights, config])
print('running multiprocessing...')
with mp.Pool(processes=n_thread) as pool:
with tqdm(total=len(input_args)) as t:
for _ in pool.imap_unordered(map_wrapper, input_args):
t.update(1)
elapsed = timeit.default_timer() - t0
print('Time: {:.3f} min'.format(elapsed / 60.0))
|
{"hexsha": "dbc4264ef416b43749b313e706f7553f0e13e05f", "size": 3793, "ext": "py", "lang": "Python", "max_stars_repo_path": "4-motokimura/code/tools/ensemble_models.py", "max_stars_repo_name": "remtav/SpaceNet7_Multi-Temporal_Solutions", "max_stars_repo_head_hexsha": "ee535c61fc22bffa45331519239c6d1b044b1514", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2021-02-18T07:04:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T15:31:06.000Z", "max_issues_repo_path": "4-motokimura/code/tools/ensemble_models.py", "max_issues_repo_name": "remtav/SpaceNet7_Multi-Temporal_Solutions", "max_issues_repo_head_hexsha": "ee535c61fc22bffa45331519239c6d1b044b1514", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-02-22T18:53:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-22T20:28:06.000Z", "max_forks_repo_path": "4-motokimura/code/tools/ensemble_models.py", "max_forks_repo_name": "remtav/SpaceNet7_Multi-Temporal_Solutions", "max_forks_repo_head_hexsha": "ee535c61fc22bffa45331519239c6d1b044b1514", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2021-02-25T17:25:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T16:59:32.000Z", "avg_line_length": 34.7981651376, "max_line_length": 77, "alphanum_fraction": 0.6527814395, "include": true, "reason": "import numpy", "num_tokens": 897}
|
import networkx as nw
import random as rand
import math
import numpy as np
from matplotlib import pyplot as plt
point_dict = {}
neighbor_dict = {}
R = 50
infinity = 10000
X_MAX = 500
Y_MAX = 500
def check_if_same_point(x, y):
global point_dict
for item in point_dict:
if item is not None:
if (point_dict[item]['x'] == x) or (point_dict[item]['y'] == y):
return True
return False
def make_points():
global point_dict
points_count = rand.randint(200, 400)
for point in range(points_count):
x = rand.randint(0, X_MAX)
y = rand.randint(0, Y_MAX)
while check_if_same_point(x, y):
x = rand.randint(0, X_MAX)
y = rand.randint(0, Y_MAX)
point_dict.update({point: {'x': x, 'y': y}})
def get_neighbor():
global point_dict, neighbor_dict
AdjcentMatrix = np.full((len(point_dict), len(point_dict)), infinity)
for point_i in range(len(point_dict)):
point_i_x = point_dict[point_i]['x']
point_i_y = point_dict[point_i]['y']
for point_j in range(len(point_dict)):
if point_dict[point_j]['x'] == point_i_x and point_dict[point_j]['y'] == point_i_y:
AdjcentMatrix[point_i][point_j] = 0
continue
distance = math.sqrt((point_dict[point_j]['x'] - point_i_x) ** 2
+ (point_dict[point_j]['y'] - point_i_y) ** 2)
if distance <= R:
if point_i in neighbor_dict.keys():
this_neighbor_list = neighbor_dict[point_i]
else:
this_neighbor_list = []
this_neighbor_list.append(point_j)
neighbor_dict.update({point_i: this_neighbor_list})
AdjcentMatrix[point_i][point_j] = 1
return AdjcentMatrix
def createGraph():
global graph, point_dict, neighbor_dict
point_dict.clear()
neighbor_dict.clear()
plt.figure(figsize=(20, 15), dpi=50)
graph = nw.Graph()
make_points()
AdjcentMatrix = get_neighbor()
for i in range(len(point_dict)):
graph.add_node(i)
for i in neighbor_dict:
point_edge = neighbor_dict[i]
for j in range(len(point_edge)):
graph.add_edge(i, point_edge[j])
pos = []
for i in range(len(point_dict)):
pos.append((point_dict[i]['x'], point_dict[i]['y']))
nw.draw(graph, pos=pos, node_size=200, with_labels=range(len(point_dict)), font_size=30)
plt.savefig('graph_total')
plt.cla()
graph.clear()
return AdjcentMatrix, len(point_dict)
def drawResult(ways):
global graph, point_dict, neighbor_dict
graph = nw.Graph()
plt.cla()
for i in range(len(point_dict)):
graph.add_node(i)
for i in neighbor_dict:
point_edge = neighbor_dict[i]
for j in range(len(point_edge)):
graph.add_edge(i, point_edge[j])
tuple_way_list = []
for i in range(len(ways) - 1):
tuple_way_list.append((ways[i], ways[i + 1]))
pos = []
for i in range(len(point_dict)):
pos.append((point_dict[i]['x'], point_dict[i]['y']))
nw.draw(graph, pos=pos, node_size=200, with_labels=range(len(point_dict)), font_size=30)
nw.draw_networkx_edges(graph, pos, edgelist=tuple_way_list, width=8, alpha=0.5, edge_color='r')
plt.savefig('graph_total')
#
# # plt.show()
# plt.ion()
# v0 = input('请输入需要查找的节点:')
#
# v0 = int(v0)
#
# v1 = input('请输入需要到达的节点:')
#
# v1 = int(v1)
#
# choice = input('1.DV算法 2.LS算法\n 请选择需要计算的算法:')
#
# choice = int(choice)
#
# if choice == 1:
#
# # dv
# print("hehehe")
#
# router_list = dv_new.calc_router(len(point_dict), AdjcentMatrix)
#
# for i in range(len(router_list)):
# print('节点 {0} 路由表如下:'.format(i))
# print('目的地\t下一跳\t路径长度')
# for j in range(len(router_list[i].neighbor)):
# print('{0}\t{1}\t{2}'.format(router_list[i].destination[j], router_list[i].next[j], router_list[i].cost[j]))
#
# # dv.init(len(point_dict), AdjcentMatrix)
# # res = dv.calc_router(v0)
# #
# # distance = res[0]
# # path = res[1]
#
# # print(distance)
# # print(path)
#
# elif choice == 2:
# # ls
#
# res = ls.dijkstra(len(point_dict), AdjcentMatrix, v0)
#
# distance = res[0]
# path = res[1]
#
# print(distance)
# print(path)
#
# plt.figure(figsize=(20, 15), dpi=50)
# graph = nw.Graph()
#
# for i in range(len(point_dict)):
# graph.add_node(i)
#
# ways = ls.get_ways(v0, v1, path)
#
# for i in range(len(ways) - 1):
# graph.add_edge(ways[i], ways[i + 1])
#
# print('从 {0} 节点到 {1} 节点的最短路径为:{2}'.format(v0, v1, distance[v1]))
#
# nw.draw(graph, pos=pos, node_size=500, with_labels=range(len(point_dict)), font_size=20)
#
# plt.show()
|
{"hexsha": "20ebdfa158abb9e53a21cf3892bf78139950bbba", "size": 5042, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/drawNetwork.py", "max_stars_repo_name": "SaltyFish6952/RoutingAlgorithmQt", "max_stars_repo_head_hexsha": "8020cf034c886e3cb401ed151b78575508c72f4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-17T06:55:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-17T06:55:02.000Z", "max_issues_repo_path": "src/drawNetwork.py", "max_issues_repo_name": "SaltyFish6952/RoutingAlgorithmQt", "max_issues_repo_head_hexsha": "8020cf034c886e3cb401ed151b78575508c72f4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/drawNetwork.py", "max_forks_repo_name": "SaltyFish6952/RoutingAlgorithmQt", "max_forks_repo_head_hexsha": "8020cf034c886e3cb401ed151b78575508c72f4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8564102564, "max_line_length": 123, "alphanum_fraction": 0.5660452202, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1368}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.