code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib.pyplot as plt
def plot_coefficients(classifier, feature_names, icd, top_features=20):
coef = classifier.coef_.ravel()
coef = np.delete(coef, -1)
coef = np.delete(coef, -1)
top_positive_coefficients = np.argsort(coef)[-top_features:]
top_negative_coefficients = np.argsort(coef)[:top_features]
top_coefficients = np.hstack([top_negative_coefficients, top_positive_coefficients])
# create plot
plt.figure(figsize=(15, 5))
colors = ["darkgoldenrod" if c < 0 else "seagreen" for c in coef[top_coefficients]]
plt.bar(np.arange(2 * top_features), coef[top_coefficients], color=colors)
# feature_names = np.array(feature_names)
# key_list = list(vectorizer.vocabulary_.keys())
# val_list = list(vectorizer.vocabulary_.values())
key_list = list(feature_names.keys())
val_list = list(feature_names.values())
# N = [key_list[val_list.index(top_positive_coefficients[i])] for i in range(top_features)]
plt.xticks(np.arange(0, 0 + 2 * top_features),
[key_list[val_list.index(top_coefficients[i])] for i in range(2*top_features)],
rotation=60, ha='right')
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.grid(linestyle='--', linewidth='0.15', color='gray')
plt.ylabel("Coefficients", fontsize=10)
# plt.setp(ax.xaxis.get_majorticklabels(), ha='right')
plt.title(icd)
plt.savefig("Data/" + icd + ".png", bbox_inches="tight")
plt.show()
items = "E11", "E78", "I10", "I21", "I25", "I42", "I48", "I50", "N18", "Z95"
icd = items[9]
data = pd.read_csv("Data/" + icd + ".csv")
df = pd.DataFrame(data)
text_df = df['text'].values.tolist()
text_df = pd.Series((v for v in text_df))
labels_code = df[icd].values.tolist()
cols = ['age_norm', 'gender']
vars_df = df[cols]
x_train, x_test, y_train, y_test, vars_df_train, vars_df_test = \
train_test_split(text_df, labels_code, vars_df, test_size=0.20, random_state=0)
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(x_train)
df4 = pd.DataFrame(X.toarray())
train_data = pd.concat([df4.reset_index(drop=True), vars_df_train.reset_index(drop=True)], axis=1)
svc = LinearSVC(random_state=0)
svc.fit(train_data, y_train)
plot_coefficients(svc, vectorizer.vocabulary_, icd)
svm = OneVsRestClassifier(CalibratedClassifierCV(LinearSVC(random_state=0)))
svm.fit(train_data, y_train)
vectorizer2 = TfidfVectorizer(vocabulary=vectorizer.vocabulary_)
X_t = vectorizer2.fit_transform(x_test)
df4_t = pd.DataFrame(X_t.toarray())
test_data = pd.concat([df4_t.reset_index(drop=True), vars_df_test.reset_index(drop=True)], axis=1)
p_label = svm.predict(test_data)
pipe1 = Pipeline([('TFidf', TfidfVectorizer()),
("SVMProb", OneVsRestClassifier(CalibratedClassifierCV(LinearSVC(random_state=0))))])
pipe1.fit(x_train, y_train)
predicted_label = pipe1.predict(x_test)
conf_matrix = confusion_matrix(y_test, predicted_label)
print("done!")
| [
"pandas.Series",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.hstack",
"sklearn.model_selection.train_test_split",
"numpy.delete",
"matplotlib.pyplot.ylabel",
"sklearn.svm.LinearSVC",
"matplotlib.pyplot.show",
"numpy.argsort",
"sklearn.feature_extraction.text.TfidfVectorizer",
"matpl... | [((2003, 2038), 'pandas.read_csv', 'pd.read_csv', (["('Data/' + icd + '.csv')"], {}), "('Data/' + icd + '.csv')\n", (2014, 2038), True, 'import pandas as pd\n'), ((2044, 2062), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2056, 2062), True, 'import pandas as pd\n'), ((2110, 2139), 'pandas.Series', 'pd.Series', (['(v for v in text_df)'], {}), '(v for v in text_df)\n', (2119, 2139), True, 'import pandas as pd\n'), ((2299, 2377), 'sklearn.model_selection.train_test_split', 'train_test_split', (['text_df', 'labels_code', 'vars_df'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(text_df, labels_code, vars_df, test_size=0.2, random_state=0)\n', (2315, 2377), False, 'from sklearn.model_selection import train_test_split\n'), ((2392, 2409), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (2407, 2409), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2585, 2610), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2594, 2610), False, 'from sklearn.svm import LinearSVC\n'), ((2813, 2863), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'vocabulary': 'vectorizer.vocabulary_'}), '(vocabulary=vectorizer.vocabulary_)\n', (2828, 2863), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((3309, 3350), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'predicted_label'], {}), '(y_test, predicted_label)\n', (3325, 3350), False, 'from sklearn.metrics import confusion_matrix\n'), ((528, 547), 'numpy.delete', 'np.delete', (['coef', '(-1)'], {}), '(coef, -1)\n', (537, 547), True, 'import numpy as np\n'), ((559, 578), 'numpy.delete', 'np.delete', (['coef', '(-1)'], {}), '(coef, -1)\n', (568, 578), True, 'import numpy as np\n'), ((731, 796), 'numpy.hstack', 'np.hstack', (['[top_negative_coefficients, top_positive_coefficients]'], {}), '([top_negative_coefficients, top_positive_coefficients])\n', (740, 796), True, 'import numpy as np\n'), ((819, 846), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (829, 846), True, 'import matplotlib.pyplot as plt\n'), ((1545, 1561), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1556, 1561), True, 'import matplotlib.pyplot as plt\n'), ((1708, 1747), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Coefficients"""'], {'fontsize': '(10)'}), "('Coefficients', fontsize=10)\n", (1718, 1747), True, 'import matplotlib.pyplot as plt\n'), ((1811, 1825), 'matplotlib.pyplot.title', 'plt.title', (['icd'], {}), '(icd)\n', (1820, 1825), True, 'import matplotlib.pyplot as plt\n'), ((1830, 1886), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Data/' + icd + '.png')"], {'bbox_inches': '"""tight"""'}), "('Data/' + icd + '.png', bbox_inches='tight')\n", (1841, 1886), True, 'import matplotlib.pyplot as plt\n'), ((1891, 1901), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1899, 1901), True, 'import matplotlib.pyplot as plt\n'), ((611, 627), 'numpy.argsort', 'np.argsort', (['coef'], {}), '(coef)\n', (621, 627), True, 'import numpy as np\n'), ((676, 692), 'numpy.argsort', 'np.argsort', (['coef'], {}), '(coef)\n', (686, 692), True, 'import numpy as np\n'), ((947, 974), 'numpy.arange', 'np.arange', (['(2 * top_features)'], {}), '(2 * top_features)\n', (956, 974), True, 'import numpy as np\n'), ((1365, 1399), 'numpy.arange', 'np.arange', (['(0)', '(0 + 2 * top_features)'], {}), '(0, 0 + 2 * top_features)\n', (1374, 1399), True, 'import numpy as np\n'), ((2742, 2767), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2751, 2767), False, 'from sklearn.svm import LinearSVC\n'), ((3102, 3119), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (3117, 3119), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((3195, 3220), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (3204, 3220), False, 'from sklearn.svm import LinearSVC\n')] |
import numpy as np
world_alcohol = np.genfromtxt('world_alcohol.csv', delimiter =',', dtype = 'U75', skip_header = 1)
#Je ne dois pas utiliser skip_header, sinon quand on utilise le skip_header pour suprrimer la première ligne
print(world_alcohol) | [
"numpy.genfromtxt"
] | [((36, 113), 'numpy.genfromtxt', 'np.genfromtxt', (['"""world_alcohol.csv"""'], {'delimiter': '""","""', 'dtype': '"""U75"""', 'skip_header': '(1)'}), "('world_alcohol.csv', delimiter=',', dtype='U75', skip_header=1)\n", (49, 113), True, 'import numpy as np\n')] |
from utils.visualize import HumanPoseVisualizer
from utils.OakRunner import OakRunner
from utils.pose import getKeypoints
from utils.draw import displayFPS
from pathlib import Path
import depthai as dai
import numpy as np
import cv2
fps_limit = 6
frame_width, frame_height = 456, 256
pairs = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13],
[1, 0], [0, 14], [14, 16], [0, 15], [15, 17]]
colors = [[255, 100, 0], [255, 100, 0], [255, 255, 0], [255, 100, 0], [255, 255, 0], [255, 100, 0], [0, 255, 0],
[100, 200, 255], [255, 0, 255], [0, 255, 0], [100, 200, 255], [255, 0, 255], [255, 0, 0], [0, 0, 255],
[0, 200, 200], [0, 0, 255], [0, 200, 200], [0, 0, 0]]
threshold = 0.3
nb_points = 18
min_depth = 250
max_depth = 2000
def init(runner, device):
runner.custom_arguments["visualizer"] = HumanPoseVisualizer(300, 300, [runner.left_camera_location, runner.right_camera_location], colors=colors, pairs=pairs)
runner.custom_arguments["visualizer"].start()
def process(runner):
frame = runner.output_queues["middle_cam"].get().getCvFrame()
nn_current_output = runner.output_queues["nn"].get()
heatmaps = np.array(nn_current_output.getLayerFp16('Mconv7_stage2_L2')).reshape((1, 19, 32, 57)).astype('float32')
pafs = np.array(nn_current_output.getLayerFp16('Mconv7_stage2_L1')).reshape((1, 38, 32, 57)).astype('float32')
outputs = np.concatenate((heatmaps, pafs), axis=1)
slc_conf = dai.SpatialLocationCalculatorConfig()
landmarks = []
spatial_landmarks = []
for i in range(nb_points):
probMap = outputs[0, i, :, :]
probMap = cv2.resize(probMap, (frame_width, frame_height))
keypoints = getKeypoints(probMap, threshold)
if(len(keypoints) > 0 and len(keypoints[0]) > 1):
landmarks.append([keypoints[0][0], keypoints[0][1]])
cv2.circle(frame, (keypoints[0][0], keypoints[0][1]), 5, (colors[i][2], colors[i][1], colors[i][0]), -1, cv2.LINE_AA)
slc_conf_data = dai.SpatialLocationCalculatorConfigData()
slc_conf_data.depthThresholds.lowerThreshold = min_depth
slc_conf_data.depthThresholds.upperThreshold = max_depth
slc_conf_data.roi = dai.Rect(dai.Point2f(keypoints[0][0]-1, keypoints[0][1]-1), dai.Point2f(keypoints[0][0]+1, keypoints[0][1]+1))
slc_conf.addROI(slc_conf_data)
runner.input_queues["slc"].send(slc_conf)
spatial_data = runner.output_queues["slc"].get().getSpatialLocations()
spatial_landmarks.append([spatial_data[0].spatialCoordinates.x/1000, spatial_data[0].spatialCoordinates.y/1000, spatial_data[0].spatialCoordinates.z/1000])
else:
landmarks.append(keypoints)
spatial_landmarks.append(keypoints)
for pair in pairs:
if(np.alltrue([len(landmarks[i])==2 for i in pair])):
color = [0, 0, 0]
for i in range(3):
color[i] += colors[pair[0]][i]/2
color[i] += colors[pair[1]][i]/2
cv2.line(frame, (landmarks[pair[0]][0], landmarks[pair[0]][1]), (landmarks[pair[1]][0], landmarks[pair[1]][1]), (color[2], color[1], color[0]), 3, cv2.LINE_AA)
displayFPS(frame, runner.getFPS())
cv2.imshow("output", frame)
runner.custom_arguments["visualizer"].setLandmarks(spatial_landmarks)
runner = OakRunner()
runner.setMiddleCamera(frame_width, frame_height)
cam = runner.getMiddleCamera()
cam.setFps(fps_limit)
cam.setInterleaved(False)
runner.setMonoDepth()
runner.addNeuralNetworkModel(stream_name="nn", path=str(Path(__file__).parent)+"/../../_models/pose_estimation.blob", handle_mono_depth=True)
cam.preview.link(runner.neural_networks["nn"].input)
slc = runner.getSpatialLocationCalculator()
slc.setWaitForConfigInput(True)
runner.run(process=process, init=init) | [
"depthai.SpatialLocationCalculatorConfigData",
"pathlib.Path",
"cv2.line",
"depthai.Point2f",
"cv2.imshow",
"utils.OakRunner.OakRunner",
"cv2.circle",
"numpy.concatenate",
"utils.visualize.HumanPoseVisualizer",
"cv2.resize",
"depthai.SpatialLocationCalculatorConfig",
"utils.pose.getKeypoints"
... | [((3415, 3426), 'utils.OakRunner.OakRunner', 'OakRunner', ([], {}), '()\n', (3424, 3426), False, 'from utils.OakRunner import OakRunner\n'), ((885, 1008), 'utils.visualize.HumanPoseVisualizer', 'HumanPoseVisualizer', (['(300)', '(300)', '[runner.left_camera_location, runner.right_camera_location]'], {'colors': 'colors', 'pairs': 'pairs'}), '(300, 300, [runner.left_camera_location, runner.\n right_camera_location], colors=colors, pairs=pairs)\n', (904, 1008), False, 'from utils.visualize import HumanPoseVisualizer\n'), ((1453, 1493), 'numpy.concatenate', 'np.concatenate', (['(heatmaps, pafs)'], {'axis': '(1)'}), '((heatmaps, pafs), axis=1)\n', (1467, 1493), True, 'import numpy as np\n'), ((1509, 1546), 'depthai.SpatialLocationCalculatorConfig', 'dai.SpatialLocationCalculatorConfig', ([], {}), '()\n', (1544, 1546), True, 'import depthai as dai\n'), ((3300, 3327), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'frame'], {}), "('output', frame)\n", (3310, 3327), False, 'import cv2\n'), ((1681, 1729), 'cv2.resize', 'cv2.resize', (['probMap', '(frame_width, frame_height)'], {}), '(probMap, (frame_width, frame_height))\n', (1691, 1729), False, 'import cv2\n'), ((1750, 1782), 'utils.pose.getKeypoints', 'getKeypoints', (['probMap', 'threshold'], {}), '(probMap, threshold)\n', (1762, 1782), False, 'from utils.pose import getKeypoints\n'), ((1918, 2039), 'cv2.circle', 'cv2.circle', (['frame', '(keypoints[0][0], keypoints[0][1])', '(5)', '(colors[i][2], colors[i][1], colors[i][0])', '(-1)', 'cv2.LINE_AA'], {}), '(frame, (keypoints[0][0], keypoints[0][1]), 5, (colors[i][2],\n colors[i][1], colors[i][0]), -1, cv2.LINE_AA)\n', (1928, 2039), False, 'import cv2\n'), ((2065, 2106), 'depthai.SpatialLocationCalculatorConfigData', 'dai.SpatialLocationCalculatorConfigData', ([], {}), '()\n', (2104, 2106), True, 'import depthai as dai\n'), ((3096, 3264), 'cv2.line', 'cv2.line', (['frame', '(landmarks[pair[0]][0], landmarks[pair[0]][1])', '(landmarks[pair[1]][0], landmarks[pair[1]][1])', '(color[2], color[1], color[0])', '(3)', 'cv2.LINE_AA'], {}), '(frame, (landmarks[pair[0]][0], landmarks[pair[0]][1]), (landmarks[\n pair[1]][0], landmarks[pair[1]][1]), (color[2], color[1], color[0]), 3,\n cv2.LINE_AA)\n', (3104, 3264), False, 'import cv2\n'), ((2286, 2339), 'depthai.Point2f', 'dai.Point2f', (['(keypoints[0][0] - 1)', '(keypoints[0][1] - 1)'], {}), '(keypoints[0][0] - 1, keypoints[0][1] - 1)\n', (2297, 2339), True, 'import depthai as dai\n'), ((2337, 2390), 'depthai.Point2f', 'dai.Point2f', (['(keypoints[0][0] + 1)', '(keypoints[0][1] + 1)'], {}), '(keypoints[0][0] + 1, keypoints[0][1] + 1)\n', (2348, 2390), True, 'import depthai as dai\n'), ((3638, 3652), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3642, 3652), False, 'from pathlib import Path\n')] |
import numpy as np
import pandas as pd
class StrategyOptimiser:
def __init__(self,
fitness_function,
n_generations,
generation_size,
n_genes,
gene_ranges,
mutation_probability,
gene_mutation_probability,
n_select_best):
"""
Initializes a genetic algorithm with the given parameters.
Params
--
`fitness_function` the function to optimize
`n_generations` the number of generations to run for
`generation_size` the number of individuals per generation
`n_genes` the number of genes per individual
`gene_ranges` list of length `n_genes` tuples describing each
gene's value range
`mutation_probability` the probability that an individual will
be mutated
`gene_mutation_probability` the probability that a gene will
be mutated (assuming that the individual was selected
for mutation)
`n_select_best` the number of individuals that are selected
to mate in order to create the next generation
"""
self.fitness_function = fitness_function
self.n_generations = n_generations
self.generation_size = generation_size
self.n_genes = n_genes
self.gene_ranges = gene_ranges
self.mutation_probability = mutation_probability
self.gene_mutation_probability = gene_mutation_probability
self.n_select_best = n_select_best
def create_individual(self):
""" Returns a randomly-generated individual with `n_genes` genes,
each gene ranging between the values defined in `gene_ranges` """
individual = []
for i in range(self.n_genes):
gene = np.random.randint(self.gene_ranges[i][0], self.gene_ranges[i][1])
individual.append(gene)
return individual
def create_population(self, n_individuals):
""" Creates a population of `n_individuals` """
population = []
for i in range(n_individuals):
population.append(self.create_individual())
return population
def mate_parents(self, parents, n_offspring):
""" Takes a list of parents and mates them, creating `n_offspring` offspring """
n_parents = len(parents)
offspring = []
for i in range(n_offspring):
random_dad = parents[np.random.randint(0, n_parents - 1)]
random_mom = parents[np.random.randint(0, n_parents - 1)]
dad_mask = np.random.randint(0, 2, size = np.array(random_dad).shape)
mom_mask = np.logical_not(dad_mask)
child = np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask))
offspring.append(child)
return offspring
def mutate_individual(self, individual):
""" Takes an individual and mutates it gene by gene.
The probability that a gene will be mutated is `gene_mutation_probability`
"""
new_individual = []
for i in range(0, self.n_genes):
gene = individual[i]
if np.random.random() < self.gene_mutation_probability:
# mutate gene
if np.random.random() < 0.5:
# mutate brute force way
gene = np.random.randint(self.gene_ranges[i][0], self.gene_ranges[i][1])
else:
# mutate nicer way
left_range = self.gene_ranges[i][0]
right_range = self.gene_ranges[i][1]
gene_dist = right_range - left_range
# gene_mid = gene_dist / 2
x = individual[i] + gene_dist / 2 * (2 * np.random.random() - 1)
if x > right_range:
x = (x - left_range) % gene_dist + left_range
elif x < left_range:
x = (right_range - x) % gene_dist + left_range
gene = int(x)
new_individual.append(gene)
return new_individual
def mutate_population(self, population):
""" Takes a population and mutates its individuals,
with a mutation probability of `mutation_probability`.
IE (`mutation_probability` * 100)% of the population
will mutate """
mutated_pop = []
for individual in population:
new_individual = individual
if np.random.random() < self.mutation_probability:
new_individual = self.mutate_individual(individual)
mutated_pop.append(new_individual)
return mutated_pop
def select_best(self, population, n_best):
""" Selects the best `n_best` individuals in a population
(those with the highest fitness)"""
fitnesses = []
for idx, individual in enumerate(population):
individual_fitness = self.fitness_function(individual)
fitnesses.append([idx, individual_fitness])
costs_tmp = pd.DataFrame(fitnesses).sort_values(by=1, ascending = False).reset_index(drop=True)
selected_parents_idx = list(costs_tmp.iloc[:n_best, 0])
selected_parents = [parent for idx, parent in enumerate(population) if idx in selected_parents_idx]
print('best: {}, average: {}, and worst: {}'.format(
costs_tmp[1].max(),
round(costs_tmp[1].mean(), 2),
costs_tmp[1].min()
))
print("best individual:", population[selected_parents_idx[0]])
return selected_parents
def run_genetic_algo(self):
"""
Runs a genetic algorithm to optimize the `fitness_function`.
Returns
--
The best individual solution.\
"""
parent_gen = self.create_population(self.generation_size)
for i in range(self.n_generations):
print("Generation:", i, "Selecting best...")
parent_gen = self.select_best(parent_gen, self.n_select_best)
print("Mating parents & Mutating children...")
parent_gen = self.mate_parents(parent_gen, self.generation_size)
parent_gen = self.mutate_population(parent_gen)
best_children = self.select_best(parent_gen, 10)
return best_children
| [
"numpy.multiply",
"numpy.random.random",
"numpy.logical_not",
"numpy.array",
"numpy.random.randint",
"pandas.DataFrame"
] | [((1590, 1655), 'numpy.random.randint', 'np.random.randint', (['self.gene_ranges[i][0]', 'self.gene_ranges[i][1]'], {}), '(self.gene_ranges[i][0], self.gene_ranges[i][1])\n', (1607, 1655), True, 'import numpy as np\n'), ((2389, 2413), 'numpy.logical_not', 'np.logical_not', (['dad_mask'], {}), '(dad_mask)\n', (2403, 2413), True, 'import numpy as np\n'), ((2194, 2229), 'numpy.random.randint', 'np.random.randint', (['(0)', '(n_parents - 1)'], {}), '(0, n_parents - 1)\n', (2211, 2229), True, 'import numpy as np\n'), ((2258, 2293), 'numpy.random.randint', 'np.random.randint', (['(0)', '(n_parents - 1)'], {}), '(0, n_parents - 1)\n', (2275, 2293), True, 'import numpy as np\n'), ((2436, 2469), 'numpy.multiply', 'np.multiply', (['random_dad', 'dad_mask'], {}), '(random_dad, dad_mask)\n', (2447, 2469), True, 'import numpy as np\n'), ((2471, 2504), 'numpy.multiply', 'np.multiply', (['random_mom', 'mom_mask'], {}), '(random_mom, mom_mask)\n', (2482, 2504), True, 'import numpy as np\n'), ((2853, 2871), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2869, 2871), True, 'import numpy as np\n'), ((4011, 4029), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4027, 4029), True, 'import numpy as np\n'), ((2939, 2957), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2955, 2957), True, 'import numpy as np\n'), ((3017, 3082), 'numpy.random.randint', 'np.random.randint', (['self.gene_ranges[i][0]', 'self.gene_ranges[i][1]'], {}), '(self.gene_ranges[i][0], self.gene_ranges[i][1])\n', (3034, 3082), True, 'import numpy as np\n'), ((2344, 2364), 'numpy.array', 'np.array', (['random_dad'], {}), '(random_dad)\n', (2352, 2364), True, 'import numpy as np\n'), ((4545, 4568), 'pandas.DataFrame', 'pd.DataFrame', (['fitnesses'], {}), '(fitnesses)\n', (4557, 4568), True, 'import pandas as pd\n'), ((3355, 3373), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3371, 3373), True, 'import numpy as np\n')] |
import codecademylib3_seaborn
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
from sklearn.cluster import KMeans
digits = datasets.load_digits()
print(digits.target)
plt.gray()
plt.matshow(digits.images[100])
plt.show()
print(digits.target[100])
model = KMeans(n_clusters = 10, random_state=42)
model.fit(digits.data)
fig=plt.figure(figsize=(8,3))
fig.suptitle('Cluster center images',fontsize=14,fontweight='bold')
for i in range(10):
# Initialize subplots in a grid of 2X5, at i+1th position
ax = fig.add_subplot(2, 5, 1 + i)
# Display images
ax.imshow(model.cluster_centers_[i].reshape((8, 8)), cmap=plt.cm.binary)
plt.show()
new_samples = np.array([
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.60,7.32,0.99,0.00,0.00,0.00,0.00,0.00,2.06,7.62,1.98,0.00,0.00,0.00,0.00,0.00,1.52,7.62,2.29,0.00,0.00,0.00,0.00,0.00,1.07,7.63,3.21,0.00,0.00,0.00,0.00,0.00,0.00,7.62,3.81,0.00,0.00,0.00,0.00,0.00,0.00,7.39,3.58,0.00,0.00,0.00,0.00,0.00,0.00,0.99,0.23,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.92,3.90,5.85,7.40,6.03,0.38,0.00,2.67,7.40,7.47,5.80,4.80,7.62,2.98,0.53,7.55,6.03,0.84,0.00,0.77,7.62,3.51,0.76,7.62,3.05,0.00,0.00,0.07,6.63,5.64,1.14,7.62,5.11,3.05,3.05,3.74,7.09,5.64,0.00,4.66,7.62,7.62,7.62,7.63,5.87,1.98],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.54,0.00,0.00,0.00,0.00,0.00,0.00,1.83,7.55,1.15,0.00,0.00,0.00,0.00,0.00,2.29,7.62,1.52,0.00,0.00,0.00,0.00,0.00,2.29,7.62,1.52,0.00,0.00,0.00,0.00,0.00,2.29,7.62,1.52,0.00,0.00,0.00,0.00,0.00,2.21,7.62,1.45,0.00,0.00,0.00,0.00,0.00,0.08,1.98,0.00,0.00,0.00],
[0.00,0.00,3.35,7.62,7.62,4.73,0.00,0.00,0.00,0.00,3.73,7.62,7.40,7.62,2.44,0.00,0.00,0.00,1.91,7.62,3.13,6.18,6.71,0.15,0.00,0.00,4.42,7.47,0.53,2.29,7.62,2.67,0.00,0.15,7.02,5.11,0.00,0.92,7.63,3.51,0.00,2.29,7.62,3.13,3.36,6.86,7.62,2.59,0.00,3.05,7.62,7.62,7.63,6.10,1.83,0.00,0.00,0.76,3.81,3.58,1.15,0.00,0.00,0.00]
])
new_labels = model.predict(new_samples)
print(new_labels)
for i in range(len(new_labels)):
if new_labels[i] == 0:
print(0, end='')
elif new_labels[i] == 1:
print(9, end='')
elif new_labels[i] == 2:
print(2, end='')
elif new_labels[i] == 3:
print(1, end='')
elif new_labels[i] == 4:
print(6, end='')
elif new_labels[i] == 5:
print(8, end='')
elif new_labels[i] == 6:
print(4, end='')
elif new_labels[i] == 7:
print(5, end='')
elif new_labels[i] == 8:
print(7, end='')
elif new_labels[i] == 9:
print(3, end='')
| [
"sklearn.cluster.KMeans",
"matplotlib.pyplot.gray",
"sklearn.datasets.load_digits",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.matshow",
"matplotlib.pyplot.show"
] | [((161, 183), 'sklearn.datasets.load_digits', 'datasets.load_digits', ([], {}), '()\n', (181, 183), False, 'from sklearn import datasets\n'), ((206, 216), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (214, 216), True, 'from matplotlib import pyplot as plt\n'), ((217, 248), 'matplotlib.pyplot.matshow', 'plt.matshow', (['digits.images[100]'], {}), '(digits.images[100])\n', (228, 248), True, 'from matplotlib import pyplot as plt\n'), ((249, 259), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (257, 259), True, 'from matplotlib import pyplot as plt\n'), ((295, 333), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(10)', 'random_state': '(42)'}), '(n_clusters=10, random_state=42)\n', (301, 333), False, 'from sklearn.cluster import KMeans\n'), ((365, 391), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (375, 391), True, 'from matplotlib import pyplot as plt\n'), ((672, 682), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (680, 682), True, 'from matplotlib import pyplot as plt\n'), ((698, 2199), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.6, 7.32, 0.99, 0.0, \n 0.0, 0.0, 0.0, 0.0, 2.06, 7.62, 1.98, 0.0, 0.0, 0.0, 0.0, 0.0, 1.52, \n 7.62, 2.29, 0.0, 0.0, 0.0, 0.0, 0.0, 1.07, 7.63, 3.21, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 7.62, 3.81, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.39, 3.58, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.99, 0.23, 0.0, 0.0, 0.0], [0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.92, 3.9, 5.85, 7.4, 6.03, 0.38, 0.0, 2.67, 7.4, 7.47, 5.8, \n 4.8, 7.62, 2.98, 0.53, 7.55, 6.03, 0.84, 0.0, 0.77, 7.62, 3.51, 0.76, \n 7.62, 3.05, 0.0, 0.0, 0.07, 6.63, 5.64, 1.14, 7.62, 5.11, 3.05, 3.05, \n 3.74, 7.09, 5.64, 0.0, 4.66, 7.62, 7.62, 7.62, 7.63, 5.87, 1.98], [0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.54, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 1.83, 7.55, 1.15, 0.0, 0.0, 0.0, 0.0, 0.0, 2.29, \n 7.62, 1.52, 0.0, 0.0, 0.0, 0.0, 0.0, 2.29, 7.62, 1.52, 0.0, 0.0, 0.0, \n 0.0, 0.0, 2.29, 7.62, 1.52, 0.0, 0.0, 0.0, 0.0, 0.0, 2.21, 7.62, 1.45, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.08, 1.98, 0.0, 0.0, 0.0], [0.0, 0.0, 3.35, \n 7.62, 7.62, 4.73, 0.0, 0.0, 0.0, 0.0, 3.73, 7.62, 7.4, 7.62, 2.44, 0.0,\n 0.0, 0.0, 1.91, 7.62, 3.13, 6.18, 6.71, 0.15, 0.0, 0.0, 4.42, 7.47, \n 0.53, 2.29, 7.62, 2.67, 0.0, 0.15, 7.02, 5.11, 0.0, 0.92, 7.63, 3.51, \n 0.0, 2.29, 7.62, 3.13, 3.36, 6.86, 7.62, 2.59, 0.0, 3.05, 7.62, 7.62, \n 7.63, 6.1, 1.83, 0.0, 0.0, 0.76, 3.81, 3.58, 1.15, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.6, 7.32, \n 0.99, 0.0, 0.0, 0.0, 0.0, 0.0, 2.06, 7.62, 1.98, 0.0, 0.0, 0.0, 0.0, \n 0.0, 1.52, 7.62, 2.29, 0.0, 0.0, 0.0, 0.0, 0.0, 1.07, 7.63, 3.21, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 7.62, 3.81, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.39,\n 3.58, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.99, 0.23, 0.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.92, 3.9, 5.85, 7.4, 6.03, 0.38, 0.0, 2.67, 7.4, 7.47, \n 5.8, 4.8, 7.62, 2.98, 0.53, 7.55, 6.03, 0.84, 0.0, 0.77, 7.62, 3.51, \n 0.76, 7.62, 3.05, 0.0, 0.0, 0.07, 6.63, 5.64, 1.14, 7.62, 5.11, 3.05, \n 3.05, 3.74, 7.09, 5.64, 0.0, 4.66, 7.62, 7.62, 7.62, 7.63, 5.87, 1.98],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.54, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 1.83, 7.55, 1.15, 0.0, 0.0, 0.0, 0.0, 0.0, \n 2.29, 7.62, 1.52, 0.0, 0.0, 0.0, 0.0, 0.0, 2.29, 7.62, 1.52, 0.0, 0.0, \n 0.0, 0.0, 0.0, 2.29, 7.62, 1.52, 0.0, 0.0, 0.0, 0.0, 0.0, 2.21, 7.62, \n 1.45, 0.0, 0.0, 0.0, 0.0, 0.0, 0.08, 1.98, 0.0, 0.0, 0.0], [0.0, 0.0, \n 3.35, 7.62, 7.62, 4.73, 0.0, 0.0, 0.0, 0.0, 3.73, 7.62, 7.4, 7.62, 2.44,\n 0.0, 0.0, 0.0, 1.91, 7.62, 3.13, 6.18, 6.71, 0.15, 0.0, 0.0, 4.42, 7.47,\n 0.53, 2.29, 7.62, 2.67, 0.0, 0.15, 7.02, 5.11, 0.0, 0.92, 7.63, 3.51, \n 0.0, 2.29, 7.62, 3.13, 3.36, 6.86, 7.62, 2.59, 0.0, 3.05, 7.62, 7.62, \n 7.63, 6.1, 1.83, 0.0, 0.0, 0.76, 3.81, 3.58, 1.15, 0.0, 0.0, 0.0]])\n', (706, 2199), True, 'import numpy as np\n')] |
import numpy as np
import tqdm
from scipy import stats
from hmmpy.base import BaseHiddenMarkov
class SampleHMM(BaseHiddenMarkov):
"""
Class to handle sampling from HMM hidden_markov with user parameters.
Parameters
----------
n_states : int, default=2
Number of hidden states
hmm_params: dict, default=None
hmm model parameters to sample from.
To set params, create a dict with 'mu', 'std' and 'tpm' as keys
and their values in lists or numpy arrays.
random_state : int, default = 42
Parameter set to recreate output
Attributes
----------
mu : ndarray of shape (n_states,)
means to sample from
std : ndarray of shape (n_states,)
STDs to sample from
tpm : ndarray of shape (n_states, n_states)
Transition probability matrix between states
"""
def __init__(self, n_states=2, frequency='daily', hmm_params=None, random_state=42):
if hmm_params == None or (hmm_params == None and frequency == "daily"): # hmm params following Hardy (2001)
# Convert from monthly time scale t=20 to daily t=1
hmm_params = {'mu': np.array([0.0123, -0.0157]) / 20,
'std': np.array([0.0347, 0.0778]) /np.sqrt(20),
'tpm': np.array([[1-0.0021, 0.0021], # TODO figure out powers of vectors in python
[0.0120, 1-0.0120]])
}
elif hmm_params == None and frequency == "monthly":
hmm_params = {'mu': np.array([0.0123, -0.0157]),
'std': np.array([0.0347, 0.0778]),
'tpm': np.array([[0.9629, 0.0371],
[0.2101, 0.7899]])
}
self.type = 'sampler'
self.is_fitted = True
self.n_states = n_states
self.mu = np.array(hmm_params['mu'])
self.std = np.array(hmm_params['std'])
self.tpm = np.array(hmm_params['tpm'])
self.stationary_dist = super()._get_stationary_dist(self.tpm)
self.start_proba = self.stationary_dist
self.random_state = random_state
np.random.seed(self.random_state)
def sample(self, n_samples, n_sequences=1):
'''
Sample states from a fitted Hidden Markov Model.
Parameters
----------
n_samples : int
Amount of samples to generate
n_sequences : int, default=1
Number of independent sequences to sample from, e.g. if n_samples=100 and n_sequences=3
then 3 different sequences of length 100 are sampled
Returns
-------
samples : ndarray of shape (n_samples, n_sequences)
Outputs the generated samples of size n_samples
sample_states : ndarray of shape (n_samples, n_sequences)
Outputs sampled states
'''
mu = self.mu
std = self.std
tpm = self.tpm
stationary_dist = self.stationary_dist
state_index = np.arange(start=0, stop=self.n_states, step=1, dtype=np.int32) # Array of possible states
sample_states = np.zeros(shape=(n_samples, n_sequences), dtype=np.int32) # Init sample vector
samples = np.zeros(shape=(n_samples, n_sequences)) # Init sample vector
print(f'Simulating {n_sequences} of lengths {n_samples}')
for seq in tqdm.tqdm(range(n_sequences)):
sample_states[0, seq] = np.random.choice(a=state_index, size=1, p=stationary_dist)
for t in range(1, n_samples):
# Each new state is chosen using the transition probs corresponding to the previous state sojourn.
sample_states[t, seq] = np.random.choice(a=state_index, size=1, p=tpm[sample_states[t - 1, seq], :])
samples[:, seq] = stats.norm.rvs(loc=mu[sample_states[:, seq]], scale=std[sample_states[:, seq]], size=n_samples)
if n_sequences == 1:
sample_states = sample_states[:, 0]
samples = samples[:, 0]
return samples, sample_states
def sample_t(self, n_samples, n_sequences=1, dof=5):
'''
Sample states from a fitted Hidden Markov Model.
Parameters
----------
n_samples : int
Amount of samples to generate
n_sequences : int, default=1
Number of independent sequences to sample from, e.g. if n_samples=100 and n_sequences=3
then 3 different sequences of length 100 are sampled
dof : int, default=5
degrees of freedom in the conditional t-distributions.
Returns
-------
samples : ndarray of shape (n_samples, n_sequences)
Outputs the generated samples of size n_samples
sample_states : ndarray of shape (n_samples, n_sequences)
Outputs sampled states
'''
mu = self.mu
std = self.std
tpm = self.tpm
stationary_dist = self.stationary_dist
state_index = np.arange(start=0, stop=self.n_states, step=1, dtype=np.int32) # Array of possible states
sample_states = np.zeros(shape=(n_samples, n_sequences), dtype=np.int32) # Init sample vector
samples = np.zeros(shape=(n_samples, n_sequences)) # Init sample vector
for seq in tqdm.tqdm(range(n_sequences)):
sample_states[0, seq] = np.random.choice(a=state_index, size=1, p=stationary_dist)
for t in range(1, n_samples):
# Each new state is chosen using the transition probs corresponding to the previous state sojourn.
sample_states[t, seq] = np.random.choice(a=state_index, size=1, p=tpm[sample_states[t - 1, seq], :])
samples[:, seq] = stats.t.rvs(loc=mu[sample_states[:, seq]],
scale=std[sample_states[:, seq]]/np.sqrt(dof/(dof-2)),
size=n_samples, df=dof)
if n_sequences == 1:
sample_states = sample_states[:, 0]
samples = samples[:, 0]
return samples, sample_states
def sample_with_viterbi(self, n_samples, n_sequences=1):
samples, true_states = self.sample(n_samples, n_sequences)
viterbi_states = np.empty(shape=(n_samples, n_sequences), dtype=float)
if n_sequences == 1:
viterbi_states = self.decode(samples)
else:
for i in range(n_sequences):
viterbi_states[:, i] = self.decode(samples[:, i])
return samples, viterbi_states, true_states
if __name__ == "__main__":
model = SampleHMM(n_states=2)
print(model.mu)
print(model.std)
print(model.tpm)
print(model.stationary_dist)
n_samples = 1000
n_sequences = 1000
X, viterbi_states, true_states = model.sample_with_viterbi(n_samples, n_sequences) | [
"numpy.sqrt",
"numpy.random.choice",
"scipy.stats.norm.rvs",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.random.seed",
"numpy.arange"
] | [((1931, 1957), 'numpy.array', 'np.array', (["hmm_params['mu']"], {}), "(hmm_params['mu'])\n", (1939, 1957), True, 'import numpy as np\n'), ((1977, 2004), 'numpy.array', 'np.array', (["hmm_params['std']"], {}), "(hmm_params['std'])\n", (1985, 2004), True, 'import numpy as np\n'), ((2024, 2051), 'numpy.array', 'np.array', (["hmm_params['tpm']"], {}), "(hmm_params['tpm'])\n", (2032, 2051), True, 'import numpy as np\n'), ((2220, 2253), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (2234, 2253), True, 'import numpy as np\n'), ((3082, 3144), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'self.n_states', 'step': '(1)', 'dtype': 'np.int32'}), '(start=0, stop=self.n_states, step=1, dtype=np.int32)\n', (3091, 3144), True, 'import numpy as np\n'), ((3197, 3253), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_samples, n_sequences)', 'dtype': 'np.int32'}), '(shape=(n_samples, n_sequences), dtype=np.int32)\n', (3205, 3253), True, 'import numpy as np\n'), ((3293, 3333), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_samples, n_sequences)'}), '(shape=(n_samples, n_sequences))\n', (3301, 3333), True, 'import numpy as np\n'), ((5056, 5118), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'self.n_states', 'step': '(1)', 'dtype': 'np.int32'}), '(start=0, stop=self.n_states, step=1, dtype=np.int32)\n', (5065, 5118), True, 'import numpy as np\n'), ((5171, 5227), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_samples, n_sequences)', 'dtype': 'np.int32'}), '(shape=(n_samples, n_sequences), dtype=np.int32)\n', (5179, 5227), True, 'import numpy as np\n'), ((5267, 5307), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_samples, n_sequences)'}), '(shape=(n_samples, n_sequences))\n', (5275, 5307), True, 'import numpy as np\n'), ((6296, 6349), 'numpy.empty', 'np.empty', ([], {'shape': '(n_samples, n_sequences)', 'dtype': 'float'}), '(shape=(n_samples, n_sequences), dtype=float)\n', (6304, 6349), True, 'import numpy as np\n'), ((3509, 3567), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'state_index', 'size': '(1)', 'p': 'stationary_dist'}), '(a=state_index, size=1, p=stationary_dist)\n', (3525, 3567), True, 'import numpy as np\n'), ((3874, 3973), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'loc': 'mu[sample_states[:, seq]]', 'scale': 'std[sample_states[:, seq]]', 'size': 'n_samples'}), '(loc=mu[sample_states[:, seq]], scale=std[sample_states[:,\n seq]], size=n_samples)\n', (3888, 3973), False, 'from scipy import stats\n'), ((5417, 5475), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'state_index', 'size': '(1)', 'p': 'stationary_dist'}), '(a=state_index, size=1, p=stationary_dist)\n', (5433, 5475), True, 'import numpy as np\n'), ((1317, 1369), 'numpy.array', 'np.array', (['[[1 - 0.0021, 0.0021], [0.012, 1 - 0.012]]'], {}), '([[1 - 0.0021, 0.0021], [0.012, 1 - 0.012]])\n', (1325, 1369), True, 'import numpy as np\n'), ((3766, 3842), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'state_index', 'size': '(1)', 'p': 'tpm[sample_states[t - 1, seq], :]'}), '(a=state_index, size=1, p=tpm[sample_states[t - 1, seq], :])\n', (3782, 3842), True, 'import numpy as np\n'), ((5674, 5750), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'state_index', 'size': '(1)', 'p': 'tpm[sample_states[t - 1, seq], :]'}), '(a=state_index, size=1, p=tpm[sample_states[t - 1, seq], :])\n', (5690, 5750), True, 'import numpy as np\n'), ((1176, 1203), 'numpy.array', 'np.array', (['[0.0123, -0.0157]'], {}), '([0.0123, -0.0157])\n', (1184, 1203), True, 'import numpy as np\n'), ((1243, 1269), 'numpy.array', 'np.array', (['[0.0347, 0.0778]'], {}), '([0.0347, 0.0778])\n', (1251, 1269), True, 'import numpy as np\n'), ((1271, 1282), 'numpy.sqrt', 'np.sqrt', (['(20)'], {}), '(20)\n', (1278, 1282), True, 'import numpy as np\n'), ((1578, 1605), 'numpy.array', 'np.array', (['[0.0123, -0.0157]'], {}), '([0.0123, -0.0157])\n', (1586, 1605), True, 'import numpy as np\n'), ((1640, 1666), 'numpy.array', 'np.array', (['[0.0347, 0.0778]'], {}), '([0.0347, 0.0778])\n', (1648, 1666), True, 'import numpy as np\n'), ((1701, 1747), 'numpy.array', 'np.array', (['[[0.9629, 0.0371], [0.2101, 0.7899]]'], {}), '([[0.9629, 0.0371], [0.2101, 0.7899]])\n', (1709, 1747), True, 'import numpy as np\n'), ((5900, 5924), 'numpy.sqrt', 'np.sqrt', (['(dof / (dof - 2))'], {}), '(dof / (dof - 2))\n', (5907, 5924), True, 'import numpy as np\n')] |
import os
import numpy as np
import argparse
import time
import torch
import torchvision
import cv2
def yolo_forward_dynamic(output, num_classes, anchors, num_anchors, scale_x_y):
# Output would be invalid if it does not satisfy this assert
# assert (output.size(1) == (5 + num_classes) * num_anchors)
# print(output.size())
# Slice the second dimension (channel) of output into:
# [ 2, 2, 1, num_classes, 2, 2, 1, num_classes, 2, 2, 1, num_classes ]
# And then into
# bxy = [ 6 ] bwh = [ 6 ] det_conf = [ 3 ] cls_conf = [ num_classes * 3 ]
# batch = output.size(0)
# H = output.size(2)
# W = output.size(3)
bxy_list = []
bwh_list = []
det_confs_list = []
cls_confs_list = []
for i in range(num_anchors):
begin = i * (5 + num_classes)
end = (i + 1) * (5 + num_classes)
bxy_list.append(output[:, begin: begin + 2])
bwh_list.append(output[:, begin + 2: begin + 4])
det_confs_list.append(output[:, begin + 4: begin + 5])
cls_confs_list.append(output[:, begin + 5: end])
# Shape: [batch, num_anchors * 2, H, W]
bxy = torch.cat(bxy_list, dim=1)
# Shape: [batch, num_anchors * 2, H, W]
bwh = torch.cat(bwh_list, dim=1)
# Shape: [batch, num_anchors, H, W]
det_confs = torch.cat(det_confs_list, dim=1)
# Shape: [batch, num_anchors * H * W]
# print(output.size(0),num_anchors * output.size(2) * output.size(3))
det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3))
# Shape: [batch, num_anchors * num_classes, H, W]
cls_confs = torch.cat(cls_confs_list, dim=1)
# Shape: [batch, num_anchors, num_classes, H * W]
print(num_anchors, output.size(0), output.size(2), output.size(3))
cls_confs = cls_confs.view(output.size(0), num_anchors, num_classes, output.size(2) * output.size(3))
# Shape: [batch, num_anchors, num_classes, H * W] --> [batch, num_anchors * H * W, num_classes]
cls_confs = cls_confs.permute(0, 1, 3, 2).reshape(output.size(0), num_anchors * output.size(2) * output.size(3),
num_classes)
# Apply sigmoid(), exp() and softmax() to slices
print(bxy)
bxy = torch.sigmoid(bxy) * scale_x_y - 0.5 * (scale_x_y - 1)
bwh = torch.exp(bwh)
det_confs = torch.sigmoid(det_confs)
cls_confs = torch.sigmoid(cls_confs)
# Prepare C-x, C-y, P-w, P-h (None of them are torch related)
grid_x = np.expand_dims(np.expand_dims(
np.expand_dims(np.linspace(0, output.size(3) - 1, output.size(3)), axis=0).repeat(output.size(2), 0), axis=0),
axis=0)
grid_y = np.expand_dims(np.expand_dims(
np.expand_dims(np.linspace(0, output.size(2) - 1, output.size(2)), axis=1).repeat(output.size(3), 1), axis=0),
axis=0)
# grid_x = torch.linspace(0, W - 1, W).reshape(1, 1, 1, W).repeat(1, 1, H, 1)
# grid_y = torch.linspace(0, H - 1, H).reshape(1, 1, H, 1).repeat(1, 1, 1, W)
anchor_w = []
anchor_h = []
for i in range(num_anchors):
anchor_w.append(anchors[i * 2])
anchor_h.append(anchors[i * 2 + 1])
device = None
cuda_check = output.is_cuda
if cuda_check:
device = output.get_device()
bx_list = []
by_list = []
bw_list = []
bh_list = []
# Apply C-x, C-y, P-w, P-h
for i in range(num_anchors):
ii = i * 2
# Shape: [batch, 1, H, W]
bx = bxy[:, ii: ii + 1] + torch.tensor(grid_x, device=device,
dtype=torch.float32) # grid_x.to(device=device, dtype=torch.float32)
# Shape: [batch, 1, H, W]
by = bxy[:, ii + 1: ii + 2] + torch.tensor(grid_y, device=device,
dtype=torch.float32) # grid_y.to(device=device, dtype=torch.float32)
# Shape: [batch, 1, H, W]
bw = bwh[:, ii: ii + 1] * anchor_w[i]
# Shape: [batch, 1, H, W]
bh = bwh[:, ii + 1: ii + 2] * anchor_h[i]
bx_list.append(bx)
by_list.append(by)
bw_list.append(bw)
bh_list.append(bh)
########################################
# Figure out bboxes from slices #
########################################
# Shape: [batch, num_anchors, H, W]
bx = torch.cat(bx_list, dim=1)
# Shape: [batch, num_anchors, H, W]
by = torch.cat(by_list, dim=1)
# Shape: [batch, num_anchors, H, W]
bw = torch.cat(bw_list, dim=1)
# Shape: [batch, num_anchors, H, W]
bh = torch.cat(bh_list, dim=1)
# Shape: [batch, 2 * num_anchors, H, W]
bx_bw = torch.cat((bx, bw), dim=1)
# Shape: [batch, 2 * num_anchors, H, W]
by_bh = torch.cat((by, bh), dim=1)
# normalize coordinates to [0, 1]
bx_bw /= output.size(3)
by_bh /= output.size(2)
# Shape: [batch, num_anchors * H * W, 1]
bx = bx_bw[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
by = by_bh[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bw = bx_bw[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bh = by_bh[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bx1 = bx - bw * 0.5
by1 = by - bh * 0.5
bx2 = bx1 + bw
by2 = by1 + bh
# Shape: [batch, num_anchors * h * w, 4] -> [batch, num_anchors * h * w, 1, 4]
boxes = torch.cat((bx1, by1, bx2, by2), dim=2).view(output.size(0), num_anchors * output.size(2) * output.size(3),
1, 4)
# boxes = boxes.repeat(1, 1, num_classes, 1)
# boxes: [batch, num_anchors * H * W, 1, 4]
# cls_confs: [batch, num_anchors * H * W, num_classes]
# det_confs: [batch, num_anchors * H * W]
det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
confs = cls_confs * det_confs
# boxes: [batch, num_anchors * H * W, 1, 4]
# confs: [batch, num_anchors * H * W, num_classes]
return boxes, confs
class YoloLayer(object):
''' Yolo layer
model_out: while inference,is post-processing inside or outside the model
true:outside
'''
def __init__(self, anchor_mask=[], num_classes=0, anchors=[], num_anchors=1, stride=32, model_out=False):
self.anchor_mask = anchor_mask
self.num_classes = num_classes
self.anchors = anchors
self.num_anchors = num_anchors
self.anchor_step = len(anchors) // num_anchors
self.coord_scale = 1
self.noobject_scale = 1
self.object_scale = 5
self.class_scale = 1
self.thresh = 0.6
self.stride = stride
self.seen = 0
self.scale_x_y = 1
self.model_out = model_out
def forward(self, output):
masked_anchors = []
for m in self.anchor_mask:
masked_anchors += self.anchors[m * self.anchor_step:(m + 1) * self.anchor_step]
masked_anchors = [anchor / self.stride for anchor in masked_anchors]
print(masked_anchors)
return yolo_forward_dynamic(output, self.num_classes, masked_anchors,
len(self.anchor_mask), scale_x_y=self.scale_x_y)
def get_region_boxes(boxes_and_confs):
# print('Getting boxes from boxes and confs ...')
boxes_list = []
confs_list = []
for item in boxes_and_confs:
boxes_list.append(item[0])
confs_list.append(item[1])
# boxes: [batch, num1 + num2 + num3, 1, 4]
# confs: [batch, num1 + num2 + num3, num_classes]
boxes = torch.cat(boxes_list, dim=1)
confs = torch.cat(confs_list, dim=1)
return [boxes, confs]
def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):
# print(boxes.shape)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = confs.argsort()[::-1]
keep = []
while order.size > 0:
idx_self = order[0]
idx_other = order[1:]
keep.append(idx_self)
xx1 = np.maximum(x1[idx_self], x1[idx_other])
yy1 = np.maximum(y1[idx_self], y1[idx_other])
xx2 = np.minimum(x2[idx_self], x2[idx_other])
yy2 = np.minimum(y2[idx_self], y2[idx_other])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
if min_mode:
over = inter / np.minimum(areas[order[0]], areas[order[1:]])
else:
over = inter / (areas[order[0]] + areas[order[1:]] - inter)
inds = np.where(over <= nms_thresh)[0]
order = order[inds + 1]
return np.array(keep)
def nms(conf_thresh, nms_thresh, output):
# anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
# num_anchors = 9
# anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
# strides = [8, 16, 32]
# anchor_step = len(anchors) // num_anchors
# [batch, num, 1, 4]
box_array = output[0]
# [batch, num, num_classes]
confs = output[1]
if type(box_array).__name__ != 'ndarray':
box_array = box_array.cpu().detach().numpy()
confs = confs.cpu().detach().numpy()
num_classes = confs.shape[2]
# [batch, num, 4]
box_array = box_array[:, :, 0]
# [batch, num, num_classes] --> [batch, num]
max_conf = np.max(confs, axis=2)
max_id = np.argmax(confs, axis=2)
bboxes_batch = []
for i in range(box_array.shape[0]):
argwhere = max_conf[i] > conf_thresh
l_box_array = box_array[i, argwhere, :]
l_max_conf = max_conf[i, argwhere]
l_max_id = max_id[i, argwhere]
bboxes = []
# nms for each class
for j in range(num_classes):
cls_argwhere = l_max_id == j
ll_box_array = l_box_array[cls_argwhere, :]
ll_max_conf = l_max_conf[cls_argwhere]
ll_max_id = l_max_id[cls_argwhere]
keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)
if (keep.size > 0):
ll_box_array = ll_box_array[keep, :]
ll_max_conf = ll_max_conf[keep]
ll_max_id = ll_max_id[keep]
for k in range(ll_box_array.shape[0]):
bboxes.append(
[ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3],
ll_max_conf[k], ll_max_id[k]])
bboxes_batch.append(bboxes)
return bboxes_batch
def post_process(flags):
names = np.loadtxt(flags.coco_class_names, dtype='str', delimiter='\n')
# 读取bin文件用于生成预测结果
bin_path = flags.bin_data_path
ori_path = flags.origin_jpg_path
anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
num_classes = 80
det_results_path = flags.det_results_path
os.makedirs(det_results_path, exist_ok=True)
total_img = set([name[:name.rfind('_')] for name in os.listdir(bin_path) if "bin" in name])
yolo1 = YoloLayer(anchor_mask=[0, 1, 2], num_classes=num_classes, anchors=anchors, num_anchors=9, stride=8)
yolo2 = YoloLayer(anchor_mask=[3, 4, 5], num_classes=num_classes, anchors=anchors, num_anchors=9, stride=16)
yolo3 = YoloLayer(anchor_mask=[6, 7, 8], num_classes=num_classes, anchors=anchors, num_anchors=9, stride=32)
yolo_shape = [[1, 255, 76, 76], [1, 255, 38, 38], [1, 255, 19, 19]]
for bin_file in sorted(total_img):
path_base = os.path.join(bin_path, bin_file)
print(path_base)
# print('\n', os.path.join(ori_path, '{}.jpg'.format(bin_file)), '\n')
src_img = cv2.imread(os.path.join(ori_path, '{}.jpg'.format(bin_file)))
assert src_img is not None, 'Image Not Found ' + bin_file
# 加载检测的所有输出tensor
feature_map_1 = np.fromfile(path_base + "_" + '1' + ".bin", dtype="float32").reshape(yolo_shape[0])
feature_map_2 = np.fromfile(path_base + "_" + '2' + ".bin", dtype="float32").reshape(yolo_shape[1])
feature_map_3 = np.fromfile(path_base + "_" + '3' + ".bin", dtype="float32").reshape(yolo_shape[2])
pred_1 = yolo1.forward(torch.from_numpy(feature_map_1))
pred_2 = yolo2.forward(torch.from_numpy(feature_map_2))
pred_3 = yolo3.forward(torch.from_numpy(feature_map_3))
# nms
output = get_region_boxes([pred_1, pred_2, pred_3])
pred = nms(conf_thresh=0.4, nms_thresh=0.6, output=output)[0]
# save result
det_results_file = os.path.join(det_results_path, bin_file + ".txt")
print(det_results_file)
with open(det_results_file, 'w') as f:
width = src_img.shape[1]
height = src_img.shape[0]
for i in range(len(pred)):
box = pred[i]
x1 = int(box[0] * width)
y1 = int(box[1] * height)
x2 = int(box[2] * width)
y2 = int(box[3] * height)
cls_conf = box[4]
cls_id = box[5]
content = '{} {} {} {} {} {}'.format(names[int(cls_id)], cls_conf, x1, y1, x2, y2)
print(content)
f.write(content)
f.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--bin_data_path", default="./result/dumpOutput_device0")
parser.add_argument("--origin_jpg_path", default="./val2014/")
parser.add_argument("--det_results_path",
default="./detection-results/")
parser.add_argument("--coco_class_names", default="./coco2014.names")
parser.add_argument("--net_out_num", default=3)
flags = parser.parse_args()
post_process(flags)
| [
"numpy.fromfile",
"os.listdir",
"numpy.minimum",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.where",
"torch.sigmoid",
"os.path.join",
"numpy.argmax",
"torch.exp",
"numpy.max",
"torch.from_numpy",
"numpy.array",
"torch.tensor",
"numpy.maximum",
"numpy.loadtxt",
"torch.cat"
] | [((1178, 1204), 'torch.cat', 'torch.cat', (['bxy_list'], {'dim': '(1)'}), '(bxy_list, dim=1)\n', (1187, 1204), False, 'import torch\n'), ((1263, 1289), 'torch.cat', 'torch.cat', (['bwh_list'], {'dim': '(1)'}), '(bwh_list, dim=1)\n', (1272, 1289), False, 'import torch\n'), ((1350, 1382), 'torch.cat', 'torch.cat', (['det_confs_list'], {'dim': '(1)'}), '(det_confs_list, dim=1)\n', (1359, 1382), False, 'import torch\n'), ((1670, 1702), 'torch.cat', 'torch.cat', (['cls_confs_list'], {'dim': '(1)'}), '(cls_confs_list, dim=1)\n', (1679, 1702), False, 'import torch\n'), ((2374, 2388), 'torch.exp', 'torch.exp', (['bwh'], {}), '(bwh)\n', (2383, 2388), False, 'import torch\n'), ((2406, 2430), 'torch.sigmoid', 'torch.sigmoid', (['det_confs'], {}), '(det_confs)\n', (2419, 2430), False, 'import torch\n'), ((2448, 2472), 'torch.sigmoid', 'torch.sigmoid', (['cls_confs'], {}), '(cls_confs)\n', (2461, 2472), False, 'import torch\n'), ((4476, 4501), 'torch.cat', 'torch.cat', (['bx_list'], {'dim': '(1)'}), '(bx_list, dim=1)\n', (4485, 4501), False, 'import torch\n'), ((4553, 4578), 'torch.cat', 'torch.cat', (['by_list'], {'dim': '(1)'}), '(by_list, dim=1)\n', (4562, 4578), False, 'import torch\n'), ((4630, 4655), 'torch.cat', 'torch.cat', (['bw_list'], {'dim': '(1)'}), '(bw_list, dim=1)\n', (4639, 4655), False, 'import torch\n'), ((4707, 4732), 'torch.cat', 'torch.cat', (['bh_list'], {'dim': '(1)'}), '(bh_list, dim=1)\n', (4716, 4732), False, 'import torch\n'), ((4793, 4819), 'torch.cat', 'torch.cat', (['(bx, bw)'], {'dim': '(1)'}), '((bx, bw), dim=1)\n', (4802, 4819), False, 'import torch\n'), ((4878, 4904), 'torch.cat', 'torch.cat', (['(by, bh)'], {'dim': '(1)'}), '((by, bh), dim=1)\n', (4887, 4904), False, 'import torch\n'), ((7900, 7928), 'torch.cat', 'torch.cat', (['boxes_list'], {'dim': '(1)'}), '(boxes_list, dim=1)\n', (7909, 7928), False, 'import torch\n'), ((7942, 7970), 'torch.cat', 'torch.cat', (['confs_list'], {'dim': '(1)'}), '(confs_list, dim=1)\n', (7951, 7970), False, 'import torch\n'), ((8997, 9011), 'numpy.array', 'np.array', (['keep'], {}), '(keep)\n', (9005, 9011), True, 'import numpy as np\n'), ((9738, 9759), 'numpy.max', 'np.max', (['confs'], {'axis': '(2)'}), '(confs, axis=2)\n', (9744, 9759), True, 'import numpy as np\n'), ((9774, 9798), 'numpy.argmax', 'np.argmax', (['confs'], {'axis': '(2)'}), '(confs, axis=2)\n', (9783, 9798), True, 'import numpy as np\n'), ((10958, 11021), 'numpy.loadtxt', 'np.loadtxt', (['flags.coco_class_names'], {'dtype': '"""str"""', 'delimiter': '"""\n"""'}), "(flags.coco_class_names, dtype='str', delimiter='\\n')\n", (10968, 11021), True, 'import numpy as np\n'), ((11294, 11338), 'os.makedirs', 'os.makedirs', (['det_results_path'], {'exist_ok': '(True)'}), '(det_results_path, exist_ok=True)\n', (11305, 11338), False, 'import os\n'), ((13729, 13754), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13752, 13754), False, 'import argparse\n'), ((8404, 8443), 'numpy.maximum', 'np.maximum', (['x1[idx_self]', 'x1[idx_other]'], {}), '(x1[idx_self], x1[idx_other])\n', (8414, 8443), True, 'import numpy as np\n'), ((8459, 8498), 'numpy.maximum', 'np.maximum', (['y1[idx_self]', 'y1[idx_other]'], {}), '(y1[idx_self], y1[idx_other])\n', (8469, 8498), True, 'import numpy as np\n'), ((8514, 8553), 'numpy.minimum', 'np.minimum', (['x2[idx_self]', 'x2[idx_other]'], {}), '(x2[idx_self], x2[idx_other])\n', (8524, 8553), True, 'import numpy as np\n'), ((8569, 8608), 'numpy.minimum', 'np.minimum', (['y2[idx_self]', 'y2[idx_other]'], {}), '(y2[idx_self], y2[idx_other])\n', (8579, 8608), True, 'import numpy as np\n'), ((8624, 8650), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1)'], {}), '(0.0, xx2 - xx1)\n', (8634, 8650), True, 'import numpy as np\n'), ((8664, 8690), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1)'], {}), '(0.0, yy2 - yy1)\n', (8674, 8690), True, 'import numpy as np\n'), ((11919, 11951), 'os.path.join', 'os.path.join', (['bin_path', 'bin_file'], {}), '(bin_path, bin_file)\n', (11931, 11951), False, 'import os\n'), ((12965, 13014), 'os.path.join', 'os.path.join', (['det_results_path', "(bin_file + '.txt')"], {}), "(det_results_path, bin_file + '.txt')\n", (12977, 13014), False, 'import os\n'), ((2308, 2326), 'torch.sigmoid', 'torch.sigmoid', (['bxy'], {}), '(bxy)\n', (2321, 2326), False, 'import torch\n'), ((3616, 3672), 'torch.tensor', 'torch.tensor', (['grid_x'], {'device': 'device', 'dtype': 'torch.float32'}), '(grid_x, device=device, dtype=torch.float32)\n', (3628, 3672), False, 'import torch\n'), ((3844, 3900), 'torch.tensor', 'torch.tensor', (['grid_y'], {'device': 'device', 'dtype': 'torch.float32'}), '(grid_y, device=device, dtype=torch.float32)\n', (3856, 3900), False, 'import torch\n'), ((5659, 5697), 'torch.cat', 'torch.cat', (['(bx1, by1, bx2, by2)'], {'dim': '(2)'}), '((bx1, by1, bx2, by2), dim=2)\n', (5668, 5697), False, 'import torch\n'), ((8918, 8946), 'numpy.where', 'np.where', (['(over <= nms_thresh)'], {}), '(over <= nms_thresh)\n', (8926, 8946), True, 'import numpy as np\n'), ((12600, 12631), 'torch.from_numpy', 'torch.from_numpy', (['feature_map_1'], {}), '(feature_map_1)\n', (12616, 12631), False, 'import torch\n'), ((12665, 12696), 'torch.from_numpy', 'torch.from_numpy', (['feature_map_2'], {}), '(feature_map_2)\n', (12681, 12696), False, 'import torch\n'), ((12730, 12761), 'torch.from_numpy', 'torch.from_numpy', (['feature_map_3'], {}), '(feature_map_3)\n', (12746, 12761), False, 'import torch\n'), ((8766, 8811), 'numpy.minimum', 'np.minimum', (['areas[order[0]]', 'areas[order[1:]]'], {}), '(areas[order[0]], areas[order[1:]])\n', (8776, 8811), True, 'import numpy as np\n'), ((11396, 11416), 'os.listdir', 'os.listdir', (['bin_path'], {}), '(bin_path)\n', (11406, 11416), False, 'import os\n'), ((12260, 12320), 'numpy.fromfile', 'np.fromfile', (["(path_base + '_' + '1' + '.bin')"], {'dtype': '"""float32"""'}), "(path_base + '_' + '1' + '.bin', dtype='float32')\n", (12271, 12320), True, 'import numpy as np\n'), ((12369, 12429), 'numpy.fromfile', 'np.fromfile', (["(path_base + '_' + '2' + '.bin')"], {'dtype': '"""float32"""'}), "(path_base + '_' + '2' + '.bin', dtype='float32')\n", (12380, 12429), True, 'import numpy as np\n'), ((12478, 12538), 'numpy.fromfile', 'np.fromfile', (["(path_base + '_' + '3' + '.bin')"], {'dtype': '"""float32"""'}), "(path_base + '_' + '3' + '.bin', dtype='float32')\n", (12489, 12538), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import sys
def packagedWarpPerspective(f, u0, v0, a, b, c, w, h):
m31 = 0 - a
m32 = 0 - b
m33 = f + a * (w/2 + u0) + b * (h/2 + v0)
m11 = c - w / 2 * a
m12 = 0 - w / 2 * b
m13 = w / 2 * m33 - c * (w/2 + u0)
m21 = 0 - h / 2 * a
m22 = c - h / 2 * b
m23 = h / 2 * m33 - c * (h/2 + v0)
matrix = np.mat([[m11, m12, m13], [m21, m22, m23], [m31, m32, m33]])
return cv2.warpPerspective(input, matrix, (int(round(w)), int(round(h))))
# read arguments
if(len(sys.argv) != 8) :
print(sys.argv[0], ": takes 7 arguments. Not ", len(sys.argv)-1)
print("Expecting arguments: python3 ", sys.argv[0], "image f u0 v0 a b c.")
print("Example:", sys.argv[0], " Missyou.png 4500 0 0 1.2 0.6 2000")
sys.exit()
image = sys.argv[1]# image
f = float(sys.argv[2])# distance f
u0 = float(sys.argv[3])# principal point (u0, v0)
v0 = float(sys.argv[4])
a = float(sys.argv[5])# 3D plane Z = aX + bY + c
b = float(sys.argv[6])
c = float(sys.argv[7])
# check the correctness of the input parameters
if (f - 0 < 0.0001):# consider the possible inaccurate caused by float
print("Distance f cannot be zero or negative in real world.")
sys.exit()
if (c - 0 < 0.0001):
print("Plane cannot be placed behind the pinhole in real world.")
# read image
print("Processing...")
input = cv2.imread(image, cv2.IMREAD_COLOR)
if(input is None) :
print(sys.argv[0], ": Failed to read image from: ", image)
sys.exit()
#cv2.imshow("input image: " + image, input)
# warpPerspective
rows,cols,_ = input.shape
w = float(cols)
h = float(rows)
output = packagedWarpPerspective(f, u0, v0, a, b, c, w, h)
# Show image
cv2.imshow((sys.argv[0]).replace(".py", " output: "), output)
# Save image
cv2.imwrite("cxl190012_outp_1.png", output)
# wait for key to exit
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"numpy.mat",
"cv2.imwrite",
"cv2.destroyAllWindows",
"sys.exit",
"cv2.waitKey",
"cv2.imread"
] | [((1373, 1408), 'cv2.imread', 'cv2.imread', (['image', 'cv2.IMREAD_COLOR'], {}), '(image, cv2.IMREAD_COLOR)\n', (1383, 1408), False, 'import cv2\n'), ((1776, 1819), 'cv2.imwrite', 'cv2.imwrite', (['"""cxl190012_outp_1.png"""', 'output'], {}), "('cxl190012_outp_1.png', output)\n", (1787, 1819), False, 'import cv2\n'), ((1844, 1858), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1855, 1858), False, 'import cv2\n'), ((1859, 1882), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1880, 1882), False, 'import cv2\n'), ((386, 445), 'numpy.mat', 'np.mat', (['[[m11, m12, m13], [m21, m22, m23], [m31, m32, m33]]'], {}), '([[m11, m12, m13], [m21, m22, m23], [m31, m32, m33]])\n', (392, 445), True, 'import numpy as np\n'), ((793, 803), 'sys.exit', 'sys.exit', ([], {}), '()\n', (801, 803), False, 'import sys\n'), ((1226, 1236), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1234, 1236), False, 'import sys\n'), ((1496, 1506), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1504, 1506), False, 'import sys\n')] |
#!/usr/bin/env python3
from filterpy.kalman import KalmanFilter
import matplotlib.pyplot as plt
import numpy as np
import pdb
from scipy.optimize import linear_sum_assignment as linear_assignment
import sys
import time
from transform_utils import convert_3dbox_to_8corner
from iou_utils import compute_iou_2d_bboxes
class KalmanBoxTracker(object):
"""
This class represents the internel state of individual tracked objects observed as bbox.
"""
count = 0
def __init__(self, classname, bbox3D, info):
"""
Initialises a tracker using initial bounding box.
"""
#define constant velocity model
self.kf = KalmanFilter(dim_x=10, dim_z=7)
self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0], # state transition matrix
[0,1,0,0,0,0,0,0,1,0],
[0,0,1,0,0,0,0,0,0,1],
[0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,1]])
self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0], # measurement function,
[0,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0]])
# with angular velocity
# self.kf = KalmanFilter(dim_x=11, dim_z=7)
# self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0,0], # state transition matrix
# [0,1,0,0,0,0,0,0,1,0,0],
# [0,0,1,0,0,0,0,0,0,1,0],
# [0,0,0,1,0,0,0,0,0,0,1],
# [0,0,0,0,1,0,0,0,0,0,0],
# [0,0,0,0,0,1,0,0,0,0,0],
# [0,0,0,0,0,0,1,0,0,0,0],
# [0,0,0,0,0,0,0,1,0,0,0],
# [0,0,0,0,0,0,0,0,1,0,0],
# [0,0,0,0,0,0,0,0,0,1,0],
# [0,0,0,0,0,0,0,0,0,0,1]])
# self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0,0], # measurement function,
# [0,1,0,0,0,0,0,0,0,0,0],
# [0,0,1,0,0,0,0,0,0,0,0],
# [0,0,0,1,0,0,0,0,0,0,0],
# [0,0,0,0,1,0,0,0,0,0,0],
# [0,0,0,0,0,1,0,0,0,0,0],
# [0,0,0,0,0,0,1,0,0,0,0]])
# self.kf.R[0:,0:] *= 10. # measurement uncertainty
self.classname = classname
if self.classname == "VEHICLE":
# self.kf.P[7:,7:] *= 1000. #state uncertainty, give high uncertainty to the unobservable initial velocities, covariance matrix
# self.kf.P *= 10.
self.kf.P = np.diag([3.84112129e-02, 3.01642740e-01, 2.02883554e+00, 1.05744544e+04, 1.19499250e+02, \
3.96939530e-01, 6.31369764e+00,\
0.08224643, 0.02266425, 0.99492726])
self.kf.Q = np.diag([4.28608065e-02, 4.83431856e-02, 2.28783624e-01, 4.15348634e+03, 6.61465835e+02, \
8.72206718e-01, 9.48450563e+00, 5.71719333e-01, 4.34452682e-01, 2.15790151e-02])
self.kf.R = np.diag([3.84112129e-02, 3.01642740e-01, 2.02883554e+00, 1.05744544e+04, 1.19499250e+02, \
3.96939530e-01, 6.31369764e+00])
elif self.classname == "PEDESTRIAN":
# self.kf.P[7:,7:] *= 1000. #state uncertainty, give high uncertainty to the unobservable initial velocities, covariance matrix
# self.kf.P *= 10.
self.kf.P = np.diag([3.84112129e-02, 3.01642740e-01, 2.02883554e+00, 1.05744544e+04, 1.19499250e+02, \
3.96939530e-01, 6.31369764e+00,\
0.04092393, 0.01482923, 2.0059979])
self.kf.Q = np.diag([2.23634146e-02, 1.79376861e-02, 1.92915952e-02, 2.14261851e+03, 2.97151716e+02, \
1.85100157e-01, 6.02065445e+00, 1.79828381e-01, 6.98850253e-02, 5.84408290e-03])
self.kf.R = np.diag([3.84112129e-02, 3.01642740e-01, 2.02883554e+00, 1.05744544e+04, 1.19499250e+02, \
3.96939530e-01, 6.31369764e+00])
else:
self.kf.P[7:,7:] *= 1000. #state uncertainty, give high uncertainty to the unobservable initial velocities, covariance matrix
self.kf.P *= 10.
# self.kf.Q[-1,-1] *= 0.01 # process uncertainty
self.kf.Q[7:,7:] *= 0.01
self.kf.x[:7] = bbox3D.reshape((7, 1))
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 1 # number of total hits including the first detection
self.hit_streak = 1 # number of continuing hit considering the first detection
self.first_continuing_hit = 1
self.still_first = True
self.age = 0
self.info = info # other info
pass
def update(self, bbox3D, info):
"""
Updates the state vector with observed bbox.
"""
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1 # number of continuing hit
if self.still_first:
self.first_continuing_hit += 1 # number of continuing hit in the fist time
######################### orientation correction
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
new_theta = bbox3D[3]
if new_theta >= np.pi: new_theta -= np.pi * 2 # make the theta still in the range
if new_theta < -np.pi: new_theta += np.pi * 2
bbox3D[3] = new_theta
predicted_theta = self.kf.x[3]
if abs(new_theta - predicted_theta) > np.pi / 2.0 and abs(new_theta - predicted_theta) < np.pi * 3 / 2.0: # if the angle of two theta is not acute angle
self.kf.x[3] += np.pi
if self.kf.x[3] > np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
# now the angle is acute: < 90 or > 270, convert the case of > 270 to < 90
if abs(new_theta - self.kf.x[3]) >= np.pi * 3 / 2.0:
if new_theta > 0: self.kf.x[3] += np.pi * 2
else: self.kf.x[3] -= np.pi * 2
#########################
self.kf.update(bbox3D)
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
self.info = info
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
self.kf.predict()
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
self.age += 1
if(self.time_since_update>0):
self.hit_streak = 0
self.still_first = False
self.time_since_update += 1
self.history.append(self.kf.x)
return self.history[-1]
def get_state(self):
"""
Returns the current bounding box estimate.
"""
return self.kf.x[:7].reshape((7, ))
def associate_detections_to_trackers(detections,trackers,iou_threshold=0.1):
# def associate_detections_to_trackers(detections,trackers,iou_threshold=0.01): # ablation study
# def associate_detections_to_trackers(detections,trackers,iou_threshold=0.25):
"""
Assigns detections to tracked object (both represented as bounding boxes)
detections: N x 8 x 3
trackers: M x 8 x 3
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
"""
if(len(trackers)==0):
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,8,3),dtype=int)
iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)
for d,det in enumerate(detections):
for t,trk in enumerate(trackers):
#print(f'On d={d}, t={t}')
#iou_matrix[d,t] = iou3d(det,trk)[1] # try 2d iou instead # det: 8 x 3, trk: 8 x 3
iou_matrix[d,t] = compute_iou_2d_bboxes(det, trk)
matched_indices = linear_assignment(-iou_matrix) # hungarian algorithm
matched_indices = np.column_stack(matched_indices)
unmatched_detections = []
for d,det in enumerate(detections):
if(d not in matched_indices[:,0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t,trk in enumerate(trackers):
if(t not in matched_indices[:,1]):
unmatched_trackers.append(t)
#print(iou_matrix)
#filter out matched with low IOU
matches = []
for m in matched_indices:
if(iou_matrix[m[0],m[1]]<iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1,2))
if(len(matches)==0):
matches = np.empty((0,2),dtype=int)
else:
matches = np.concatenate(matches,axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
class AB3DMOT(object):
def __init__(self, classname, max_age=2,min_hits=3): # max age will preserve the bbox does not appear no more than 2 frames, interpolate the detection
# def __init__(self,max_age=3,min_hits=3): # ablation study
# def __init__(self,max_age=1,min_hits=3):
# def __init__(self,max_age=2,min_hits=1):
# def __init__(self,max_age=2,min_hits=5):
"""
"""
self.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.frame_count = 0
self.classname = classname
# self.reorder = [3, 4, 5, 6, 2, 1, 0]
# self.reorder_back = [6, 5, 4, 0, 1, 2, 3]
def update(self,dets_all):
"""
Params:
dets_all: dict
dets - a numpy array of detections in the format [[x,y,z,theta,l,w,h],[x,y,z,theta,l,w,h],...]
info: a array of other info for each det
Requires: this method must be called once for each frame even with empty detections.
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
"""
dets, info = dets_all['dets'], dets_all['info'] # dets: N x 7, float numpy array
# dets = dets[:, self.reorder]
self.frame_count += 1
trks = np.zeros((len(self.trackers),7)) # N x 7 , #get predicted locations from existing trackers.
to_del = []
ret = []
for t,trk in enumerate(trks):
pos = self.trackers[t].predict().reshape((-1, 1))
trk[:] = [pos[0], pos[1], pos[2], pos[3], pos[4], pos[5], pos[6]]
if(np.any(np.isnan(pos))):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
dets_8corner = [convert_3dbox_to_8corner(det_tmp) for det_tmp in dets]
if len(dets_8corner) > 0: dets_8corner = np.stack(dets_8corner, axis=0)
else: dets_8corner = []
trks_8corner = [convert_3dbox_to_8corner(trk_tmp) for trk_tmp in trks]
if len(trks_8corner) > 0: trks_8corner = np.stack(trks_8corner, axis=0)
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets_8corner, trks_8corner)
#update matched trackers with assigned detections
for t,trk in enumerate(self.trackers):
if t not in unmatched_trks:
d = matched[np.where(matched[:,1]==t)[0],0] # a list of index
trk.update(dets[d,:][0], info[d, :][0])
#create and initialise new trackers for unmatched detections
for i in unmatched_dets: # a scalar of index
trk = KalmanBoxTracker(self.classname, dets[i,:], info[i, :])
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
d = trk.get_state() # bbox location
# d = d[self.reorder_back]
if((trk.time_since_update < self.max_age) and (trk.hits >= self.min_hits or self.frame_count <= self.min_hits)):
ret.append(np.concatenate((d, [trk.id+1], trk.info)).reshape(1,-1)) # +1 as MOT benchmark requires positive
i -= 1
#remove dead tracklet
if(trk.time_since_update >= self.max_age):
self.trackers.pop(i)
if(len(ret)>0):
return np.concatenate(ret) # x, y, z, theta, l, w, h, ID, other info, confidence
return np.empty((0,15))
| [
"scipy.optimize.linear_sum_assignment",
"numpy.where",
"filterpy.kalman.KalmanFilter",
"numpy.column_stack",
"numpy.diag",
"transform_utils.convert_3dbox_to_8corner",
"numpy.array",
"numpy.stack",
"numpy.empty",
"numpy.isnan",
"numpy.concatenate",
"iou_utils.compute_iou_2d_bboxes",
"numpy.ma... | [((8172, 8202), 'scipy.optimize.linear_sum_assignment', 'linear_assignment', (['(-iou_matrix)'], {}), '(-iou_matrix)\n', (8189, 8202), True, 'from scipy.optimize import linear_sum_assignment as linear_assignment\n'), ((8250, 8282), 'numpy.column_stack', 'np.column_stack', (['matched_indices'], {}), '(matched_indices)\n', (8265, 8282), True, 'import numpy as np\n'), ((634, 665), 'filterpy.kalman.KalmanFilter', 'KalmanFilter', ([], {'dim_x': '(10)', 'dim_z': '(7)'}), '(dim_x=10, dim_z=7)\n', (646, 665), False, 'from filterpy.kalman import KalmanFilter\n'), ((689, 1036), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 1, \n 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0,\n 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0,\n 0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 1, 0], [\n 0, 0, 1, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0,\n 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])\n', (697, 1036), True, 'import numpy as np\n'), ((1223, 1470), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0,\n 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]\n ]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [\n 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0,\n 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 1, 0, 0, 0]])\n', (1231, 1470), True, 'import numpy as np\n'), ((8870, 8897), 'numpy.empty', 'np.empty', (['(0, 2)'], {'dtype': 'int'}), '((0, 2), dtype=int)\n', (8878, 8897), True, 'import numpy as np\n'), ((8918, 8949), 'numpy.concatenate', 'np.concatenate', (['matches'], {'axis': '(0)'}), '(matches, axis=0)\n', (8932, 8949), True, 'import numpy as np\n'), ((8968, 8998), 'numpy.array', 'np.array', (['unmatched_detections'], {}), '(unmatched_detections)\n', (8976, 8998), True, 'import numpy as np\n'), ((9000, 9028), 'numpy.array', 'np.array', (['unmatched_trackers'], {}), '(unmatched_trackers)\n', (9008, 9028), True, 'import numpy as np\n'), ((12396, 12413), 'numpy.empty', 'np.empty', (['(0, 15)'], {}), '((0, 15))\n', (12404, 12413), True, 'import numpy as np\n'), ((2999, 3134), 'numpy.diag', 'np.diag', (['[0.0384112129, 0.30164274, 2.02883554, 10574.4544, 119.49925, 0.39693953, \n 6.31369764, 0.08224643, 0.02266425, 0.99492726]'], {}), '([0.0384112129, 0.30164274, 2.02883554, 10574.4544, 119.49925, \n 0.39693953, 6.31369764, 0.08224643, 0.02266425, 0.99492726])\n', (3006, 3134), True, 'import numpy as np\n'), ((3206, 3350), 'numpy.diag', 'np.diag', (['[0.0428608065, 0.0483431856, 0.228783624, 4153.48634, 661.465835, \n 0.872206718, 9.48450563, 0.571719333, 0.434452682, 0.0215790151]'], {}), '([0.0428608065, 0.0483431856, 0.228783624, 4153.48634, 661.465835, \n 0.872206718, 9.48450563, 0.571719333, 0.434452682, 0.0215790151])\n', (3213, 3350), True, 'import numpy as np\n'), ((3404, 3503), 'numpy.diag', 'np.diag', (['[0.0384112129, 0.30164274, 2.02883554, 10574.4544, 119.49925, 0.39693953, \n 6.31369764]'], {}), '([0.0384112129, 0.30164274, 2.02883554, 10574.4544, 119.49925, \n 0.39693953, 6.31369764])\n', (3411, 3503), True, 'import numpy as np\n'), ((7723, 7750), 'numpy.empty', 'np.empty', (['(0, 2)'], {'dtype': 'int'}), '((0, 2), dtype=int)\n', (7731, 7750), True, 'import numpy as np\n'), ((7778, 7808), 'numpy.empty', 'np.empty', (['(0, 8, 3)'], {'dtype': 'int'}), '((0, 8, 3), dtype=int)\n', (7786, 7808), True, 'import numpy as np\n'), ((8119, 8150), 'iou_utils.compute_iou_2d_bboxes', 'compute_iou_2d_bboxes', (['det', 'trk'], {}), '(det, trk)\n', (8140, 8150), False, 'from iou_utils import compute_iou_2d_bboxes\n'), ((10737, 10763), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['trks'], {}), '(trks)\n', (10757, 10763), True, 'import numpy as np\n'), ((10847, 10880), 'transform_utils.convert_3dbox_to_8corner', 'convert_3dbox_to_8corner', (['det_tmp'], {}), '(det_tmp)\n', (10871, 10880), False, 'from transform_utils import convert_3dbox_to_8corner\n'), ((10947, 10977), 'numpy.stack', 'np.stack', (['dets_8corner'], {'axis': '(0)'}), '(dets_8corner, axis=0)\n', (10955, 10977), True, 'import numpy as np\n'), ((11026, 11059), 'transform_utils.convert_3dbox_to_8corner', 'convert_3dbox_to_8corner', (['trk_tmp'], {}), '(trk_tmp)\n', (11050, 11059), False, 'from transform_utils import convert_3dbox_to_8corner\n'), ((11126, 11156), 'numpy.stack', 'np.stack', (['trks_8corner'], {'axis': '(0)'}), '(trks_8corner, axis=0)\n', (11134, 11156), True, 'import numpy as np\n'), ((12306, 12325), 'numpy.concatenate', 'np.concatenate', (['ret'], {}), '(ret)\n', (12320, 12325), True, 'import numpy as np\n'), ((3761, 3895), 'numpy.diag', 'np.diag', (['[0.0384112129, 0.30164274, 2.02883554, 10574.4544, 119.49925, 0.39693953, \n 6.31369764, 0.04092393, 0.01482923, 2.0059979]'], {}), '([0.0384112129, 0.30164274, 2.02883554, 10574.4544, 119.49925, \n 0.39693953, 6.31369764, 0.04092393, 0.01482923, 2.0059979])\n', (3768, 3895), True, 'import numpy as np\n'), ((3955, 4101), 'numpy.diag', 'np.diag', (['[0.0223634146, 0.0179376861, 0.0192915952, 2142.61851, 297.151716, \n 0.185100157, 6.02065445, 0.179828381, 0.0698850253, 0.0058440829]'], {}), '([0.0223634146, 0.0179376861, 0.0192915952, 2142.61851, 297.151716, \n 0.185100157, 6.02065445, 0.179828381, 0.0698850253, 0.0058440829])\n', (3962, 4101), True, 'import numpy as np\n'), ((4154, 4253), 'numpy.diag', 'np.diag', (['[0.0384112129, 0.30164274, 2.02883554, 10574.4544, 119.49925, 0.39693953, \n 6.31369764]'], {}), '([0.0384112129, 0.30164274, 2.02883554, 10574.4544, 119.49925, \n 0.39693953, 6.31369764])\n', (4161, 4253), True, 'import numpy as np\n'), ((10664, 10677), 'numpy.isnan', 'np.isnan', (['pos'], {}), '(pos)\n', (10672, 10677), True, 'import numpy as np\n'), ((11420, 11448), 'numpy.where', 'np.where', (['(matched[:, 1] == t)'], {}), '(matched[:, 1] == t)\n', (11428, 11448), True, 'import numpy as np\n'), ((12049, 12092), 'numpy.concatenate', 'np.concatenate', (['(d, [trk.id + 1], trk.info)'], {}), '((d, [trk.id + 1], trk.info))\n', (12063, 12092), True, 'import numpy as np\n')] |
"""
This file is part of pyS5p
https://github.com/rmvanhees/pys5p.git
The class ICMio provides read access to S5p Tropomi ICM_CA_SIR products
Copyright (c) 2017-2021 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
from datetime import datetime, timedelta
from pathlib import Path, PurePosixPath
from setuptools_scm import get_version
import h5py
import numpy as np
# - global parameters ------------------------------
# - local functions --------------------------------
# - class definition -------------------------------
class ICMio():
"""
This class should offer all the necessary functionality to read Tropomi
ICM_CA_SIR products
Attributes
----------
fid : h5py.File
filename : string
bands : string
Methods
-------
coverage_time
Returns start and end of the measurement coverage time.
creation_time
Returns creation date of this product
orbit
Returns value of revolution counter.
processor_version
Returns version of the L01b processor used to generate this product.
close()
Close resources
find(msm_class)
Find a measurement as <processing-class name>.
select(msm_type: str, msm_path=None)
Select a measurement as <processing class>_<ic_id>.
get_attr(attr_name)
Obtain value of an HDF5 file attribute.
get_ref_time(band=None)
Returns reference start time of measurements.
get_delta_time(band=None)
Returns offset from the reference start time of measurement.
get_instrument_settings(band=None)
Returns instrument settings of measurement.
get_exposure_time(band=None)
Returns pixel exposure time of the measurements, which is calculated
from the parameters 'int_delay' and 'int_hold' for SWIR.
get_housekeeping_data(band=None)
Returns housekeeping data of measurements.
get_msmt_keys(band=None)
Read msmt_keys from the analysis groups.
get_msm_attr(msm_dset, attr_name, band=None)
Returns attribute of measurement dataset 'msm_dset'.
get_geo_data(band=None, geo_dset='satellite_latitude,satellite_longitude')
Returns data of selected datasets from the GEODATA group.
get_msm_data(msm_dset, band='78', *, read_raw=False, columns=None,
fill_as_nan=True)
Read datasets from a measurement selected by class-method 'select'
read_direct_msm(msm_dset, dest_sel=None, dest_dtype=None, fill_as_nan=False)
The faster implementation of class method 'get_msm_data'.
set_housekeeping_data(data, band=None)
Returns housekeeping data of measurements.
set_msm_data(msm_dset, data, band='78')
Alter dataset from a measurement selected using function 'select'.
Notes
-----
Examples
--------
"""
def __init__(self, icm_product, readwrite=False):
"""
Initialize access to an ICM product
Parameters
----------
icm_product : string
full path to in-flight calibration measurement product
readwrite : boolean
open product in read-write mode (default is False)
"""
if not Path(icm_product).is_file():
raise FileNotFoundError(f'{icm_product} does not exist')
# initialize class-attributes
self.__rw = readwrite
self.__msm_path = None
self.__patched_msm = []
self.filename = icm_product
self.bands = None
# open ICM product as HDF5 file
if readwrite:
self.fid = h5py.File(icm_product, "r+")
else:
self.fid = h5py.File(icm_product, "r")
def __repr__(self):
class_name = type(self).__name__
return f'{class_name}({self.filename!r}, readwrite={self.__rw!r})'
def __iter__(self):
for attr in sorted(self.__dict__):
if not attr.startswith("__"):
yield attr
# def __del__(self):
# """
# called when the object is destroyed
# """
# self.close()
def __enter__(self):
"""
method called to initiate the context manager
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
method called when exiting the context manager
"""
self.close()
return False # any exception is raised by the with statement.
def close(self):
"""
Before closing the product, we make sure that the output product
describes what has been altered by the S/W. To keep any change
traceable.
as attributes of this group, we write:
- dateStamp ('now')
- Git-version of S/W
- list of patched datasets
- auxiliary datasets used by patch-routines
"""
if self.fid is None:
return
self.bands = None
if self.__patched_msm:
# pylint: disable=no-member
sgrp = self.fid.require_group("METADATA/SRON_METADATA")
sgrp.attrs['dateStamp'] = datetime.utcnow().isoformat()
sgrp.attrs['git_tag'] = get_version(root='..',
relative_to=__file__)
if 'patched_datasets' not in sgrp:
dtype = h5py.special_dtype(vlen=str)
dset = sgrp.create_dataset('patched_datasets',
(len(self.__patched_msm),),
maxshape=(None,), dtype=dtype)
dset[:] = np.asarray(self.__patched_msm)
else:
dset = sgrp['patched_datasets']
dset.resize(dset.shape[0] + len(self.__patched_msm), axis=0)
dset[dset.shape[0]-1:] = np.asarray(self.__patched_msm)
self.fid.close()
self.fid = None
# ---------- RETURN VERSION of the S/W ----------
def find(self, msm_class) -> list:
"""
find a measurement as <processing-class name>
Parameters
----------
msm_class : string
processing-class name without ICID
Returns
-------
out : list of strings
String with msm_type as used by ICMio.select
"""
res = []
grp_list = ['ANALYSIS', 'CALIBRATION', 'IRRADIANCE', 'RADIANCE']
for ii in '12345678':
for name in grp_list:
grp_name = f'BAND{ii}_{name}'
if grp_name in self.fid:
gid = self.fid[grp_name]
res += [s for s in gid if s.startswith(msm_class)]
return list(set(res))
# -------------------------
def select(self, msm_type: str, msm_path=None) -> str:
"""
Select a measurement as <processing class>_<ic_id>
Parameters
----------
msm_type : string
Name of measurement group
msm_path : {'BAND%_ANALYSIS', 'BAND%_CALIBRATION',
'BAND%_IRRADIANCE', 'BAND%_RADIANCE'}
Name of path in HDF5 file to measurement group
Returns
-------
string
String with spectral bands found in product or empty
Attributes
----------
bands : string
Available spectral bands (or empty)
__msm_path : string
Full name of selected group in file (or None)
"""
self.bands = ''
self.__msm_path = None
# if path is given, then only determine avaialble spectral bands
# else determine path and avaialble spectral bands
if msm_path is None:
grp_list = ['ANALYSIS', 'CALIBRATION', 'IRRADIANCE', 'RADIANCE']
for ii in '12345678':
for name in grp_list:
grp_path = PurePosixPath(f'BAND{ii}_{name}', msm_type)
if str(grp_path) in self.fid:
msm_path = f'BAND%_{name}'
self.bands += ii
else:
if not msm_path.startswith('BAND%'):
raise ValueError('msm_path should start with BAND%')
for ii in '12345678':
grp_path = PurePosixPath(msm_path.replace('%', ii), msm_type)
if str(grp_path) in self.fid:
self.bands += ii
# return in case no data was found
if self.bands:
self.__msm_path = PurePosixPath(msm_path, msm_type)
return self.bands
# ---------- Functions that work before MSM selection ----------
@property
def orbit(self) -> int:
"""
Returns reference orbit number
"""
if 'reference_orbit' in self.fid.attrs:
return int(self.fid.attrs['reference_orbit'])
return None
@property
def processor_version(self) -> str:
"""
Returns version of the L01b processor
"""
if 'processor_version' not in self.fid.attrs:
return None
res = self.fid.attrs['processor_version']
if isinstance(res, bytes):
# pylint: disable=no-member
return res.decode('ascii')
return res
@property
def coverage_time(self) -> tuple:
"""
Returns start and end of the measurement coverage time
"""
if 'time_coverage_start' not in self.fid.attrs \
or 'time_coverage_end' not in self.fid.attrs:
return None
res1 = self.fid.attrs['time_coverage_start']
if isinstance(res1, bytes):
# pylint: disable=no-member
res1 = res1.decode('ascii')
res2 = self.fid.attrs['time_coverage_end']
if isinstance(res2, bytes):
# pylint: disable=no-member
res2 = res2.decode('ascii')
return (res1, res2)
@property
def creation_time(self) -> str:
"""
Returns version of the L01b processor
"""
grp = self.fid['/METADATA/ESA_METADATA/earth_explorer_header']
dset = grp['fixed_header/source']
return dset.attrs['Creation_Date'].split(b'=')[1].decode('ascii')
def get_attr(self, attr_name):
"""
Obtain value of an HDF5 file attribute
Parameters
----------
attr_name : string
name of the attribute
"""
if attr_name not in self.fid.attrs:
return None
res = self.fid.attrs[attr_name]
if isinstance(res, bytes):
return res.decode('ascii')
return res
# ---------- Functions that only work after MSM selection ----------
def get_ref_time(self, band=None) -> datetime:
"""
Returns reference start time of measurements
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
ref_time = datetime(2010, 1, 1, 0, 0, 0)
if not self.__msm_path:
return ref_time
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'OBSERVATIONS')
grp = self.fid[str(grp_path)]
ref_time += timedelta(seconds=int(grp['time'][0]))
elif msm_type in ['DPQF_MAP', 'NOISE']:
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'OBSERVATIONS')
grp = self.fid[str(grp_path)]
ref_time += timedelta(seconds=int(grp['time'][0]))
else:
grp_path = PurePosixPath(msm_path, 'OBSERVATIONS')
grp = self.fid[str(grp_path)]
ref_time += timedelta(seconds=int(grp['time'][0]))
return ref_time
def get_delta_time(self, band=None):
"""
Returns offset from the reference start time of measurement
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
res = None
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'OBSERVATIONS')
grp = self.fid[str(grp_path)]
if res is None:
res = grp['delta_time'][0, :].astype(int)
else:
res = np.append(res, grp['delta_time'][0, :].astype(int))
elif msm_type in ['DPQF_MAP', 'NOISE']:
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'OBSERVATIONS')
grp = self.fid[grp_path]
if res is None:
res = grp['delta_time'][0, :].astype(int)
else:
res = np.append(res, grp['delta_time'][0, :].astype(int))
else:
grp_path = PurePosixPath(msm_path, 'OBSERVATIONS')
grp = self.fid[str(grp_path)]
res = grp['delta_time'][0, :].astype(int)
return res
def get_instrument_settings(self, band=None) -> np.ndarray:
"""
Returns instrument settings of measurement
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
res = None
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[str(grp_path)]
if res is None:
res = grp['instrument_settings'][:]
else:
res = np.append(res, grp['instrument_settings'][:])
elif msm_type == 'DPQF_MAP':
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[grp_path]
if res is None:
res = grp['instrument_settings'][:]
else:
res = np.append(res, grp['instrument_settings'][:])
elif msm_type == 'NOISE':
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_msmt_keys']
icid = dset['icid'][dset.size // 2]
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
f'BACKGROUND_RADIANCE_MODE_{icid:04d}',
'INSTRUMENT')
grp = self.fid[str(grp_path)]
res = grp['instrument_settings'][:]
else:
grp_path = PurePosixPath(msm_path, 'INSTRUMENT')
grp = self.fid[str(grp_path)]
res = grp['instrument_settings'][:]
return res
def get_exposure_time(self, band=None) -> list:
"""
Returns pixel exposure time of the measurements, which is calculated
from the parameters 'int_delay' and 'int_hold' for SWIR.
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns the first available band
"""
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
# obtain instrument settings
instr_arr = self.get_instrument_settings(band)
if instr_arr is None:
return None
# calculate exact exposure time
res = []
for instr in instr_arr:
if int(band) > 6:
res.append(1.25e-6 * (65540
- instr['int_delay'] + instr['int_hold']))
else:
res.append(instr['exposure_time'])
return res
def get_housekeeping_data(self, band=None):
"""
Returns housekeeping data of measurements
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
res = None
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[str(grp_path)]
if res is None:
res = np.squeeze(grp['housekeeping_data'])
else:
res = np.append(res, np.squeeze(grp['housekeeping_data']))
elif msm_type in ['DPQF_MAP', 'NOISE']:
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath('BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[str(grp_path)]
if res is None:
res = np.squeeze(grp['housekeeping_data'])
else:
res = np.append(res, np.squeeze(grp['housekeeping_data']))
else:
grp_path = PurePosixPath(msm_path, 'INSTRUMENT')
grp = self.fid[str(grp_path)]
res = np.squeeze(grp['housekeeping_data'])
return res
# -------------------------
def get_msmt_keys(self, band=None):
"""
Read msmt_keys from the analysis groups
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
Returns
-------
[ANALOG_OFFSET_SWIR] analog_offset_swir_group_keys
[LONG_TERM_SWIR] long_term_swir_group_keys
[NOISE] noise_msmt_keys
else None
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
return np.squeeze(grp[msm_type.lower() + '_group_keys'])
if msm_type == 'NOISE':
grp = self.fid[msm_path]
return np.squeeze(grp[msm_type.lower() + '_msmt_keys'])
return None
# -------------------------
def get_msm_attr(self, msm_dset, attr_name, band=None):
"""
Returns attribute of measurement dataset "msm_dset"
Parameters
----------
msm_dset : string
Name of measurement dataset
attr_name : string
Name of the attribute
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
Returns
-------
out : scalar or numpy array
value of attribute "attr_name"
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
for dset_grp in ['OBSERVATIONS', 'ANALYSIS', '']:
ds_path = PurePosixPath(str(self.__msm_path).replace('%', band),
dset_grp, msm_dset)
if str(ds_path) not in self.fid:
continue
if attr_name in self.fid[str(ds_path)].attrs:
attr = self.fid[str(ds_path)].attrs[attr_name]
if isinstance(attr, bytes):
return attr.decode('ascii')
return attr
return None
def get_geo_data(self, band=None,
geo_dset='satellite_latitude,satellite_longitude'):
"""
Returns data of selected datasets from the GEODATA group
Parameters
----------
geo_dset : string
Name(s) of datasets in the GEODATA group, comma separated
Default is 'satellite_latitude,satellite_longitude'
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns the first available band
Returns
-------
out : dictionary
dictionary data of selected datasets from the GEODATA group
names of dictionary are taken from parameter geo_dset
"""
if not self.__msm_path:
return None
if band is None:
band = str(self.bands[0])
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
res = {}
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'GEODATA')
grp = self.fid[str(grp_path)]
for key in geo_dset.split(','):
res[key] = np.squeeze(grp[key])
elif msm_type in ['DPQF_MAP', 'NOISE']:
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'GEODATA')
grp = self.fid[str(grp_path)]
for key in geo_dset.split(','):
res[key] = np.squeeze(grp[key])
else:
grp_path = PurePosixPath(msm_path, 'GEODATA')
grp = self.fid[str(grp_path)]
for key in geo_dset.split(','):
res[key] = np.squeeze(grp[key])
return res
def get_msm_data(self, msm_dset, band='78', *, read_raw=False,
columns=None, fill_as_nan=True):
"""
Read datasets from a measurement selected by class-method "select"
Parameters
----------
msm_dset : string
name of measurement dataset
if msm_dset is None then show names of available datasets
band : {'1', '2', '3', ..., '8', '12', '34', '56', '78'}
Select data from one spectral band or channel
Default is '78' which combines band 7/8 to SWIR detector layout
read_raw : boolean
Perform raw read: without slicing or data conversion,
and ignore keywords: colums, fill_as_nan.
Default: False
columns : [i, j]
Slice data on fastest axis (columns) as from index 'i' to 'j'
fill_as_nan : boolean
Replace (float) FillValues with Nan's, when True
Returns
-------
out : array
Data of measurement dataset "msm_dset"
"""
fillvalue = float.fromhex('0x1.ep+122')
if not self.__msm_path:
return None
if not isinstance(band, str):
raise TypeError('band must be a string')
if band not in self.bands:
raise ValueError('band not found in product')
data = []
if read_raw:
for ii in band:
for dset_grp in ['OBSERVATIONS', 'ANALYSIS', '']:
msm_path = str(self.__msm_path).replace('%', ii)
ds_path = PurePosixPath(msm_path, dset_grp, msm_dset)
if str(ds_path) not in self.fid:
continue
data.append(np.squeeze(self.fid[str(ds_path)]))
return data
# skip row257 from the SWIR detector
rows = None
if int(band[0]) > 6:
rows = [0, -1]
# list potential names of the dataset dimensions
time_list = ['time', 'scanline']
row_list = ['width', 'pixel', 'pixel_window', 'ground_pixel']
column_list = ['height', 'spectral_channel', 'spectral_channel_window']
column_dim = None # column dimension is unknown
for ii in band:
for dset_grp in ['OBSERVATIONS', 'ANALYSIS', '']:
msm_path = str(self.__msm_path).replace('%', ii)
ds_path = PurePosixPath(msm_path, dset_grp, msm_dset)
if str(ds_path) not in self.fid:
continue
dset = self.fid[str(ds_path)]
skipped = 0
data_sel = ()
for xx in range(dset.ndim):
if len(dset.dims[xx][0][:]) == 1:
skipped += 1
dim_name = PurePosixPath(dset.dims[xx][0].name).name
if dim_name in time_list:
data_sel += (slice(None),)
elif dim_name in row_list:
if rows is None:
data_sel += (slice(None),)
else:
data_sel += (slice(*rows),)
elif dim_name in column_list:
column_dim = xx - skipped
if columns is None:
data_sel += (slice(None),)
else:
data_sel += (slice(*columns),)
else:
raise ValueError
if dset.dtype == np.float32:
res = np.squeeze(dset.astype(float)[data_sel])
else:
res = np.squeeze(dset[data_sel])
if fill_as_nan and dset.attrs['_FillValue'] == fillvalue:
res[(res == fillvalue)] = np.nan
data.append(res)
# Note the current implementation will not work for channels where
# the output of its bands can have different spatial dimensions (rows)
# or different integration times (frames/scanlines)
#
# no data found
if not data:
return None
# return selected band
if len(data) == 1:
return data[0]
# return bands stacked
if column_dim is None:
return data # np.stack(data)
# return band in detector layout
return np.concatenate(data, axis=column_dim)
def read_direct_msm(self, msm_dset, dest_sel=None,
dest_dtype=None, fill_as_nan=False):
"""
The faster implementation of get_msm_data()
Parameters
----------
msm_dset : string
Name of measurement dataset
dest_sel : numpy slice
Selection must be the output of numpy.s_[<args>].
dest_dtype : numpy dtype
Perform type conversion
fill_as_nan : boolean
Replace (float) FillValues with Nan's, when True
Returns
-------
out : list
list with data of all available bands
"""
fillvalue = float.fromhex('0x1.ep+122')
if not self.__msm_path:
return None
if dest_sel is None:
dest_sel = np.s_[...]
data = []
for ii in self.bands:
msm_path = str(self.__msm_path).replace('%', ii)
for dset_grp in ['OBSERVATIONS', 'ANALYSIS', '']:
ds_path = str(PurePosixPath(msm_path, dset_grp, msm_dset))
if ds_path not in self.fid:
continue
dset = self.fid[ds_path]
if dest_dtype is None:
buff = dset[dest_sel]
if fill_as_nan and '_FillValue' in dset.attrs:
if np.issubdtype(buff.dtype, np.floating):
fillvalue = dset.attrs['_FillValue'][0]
buff[(buff == fillvalue)] = np.nan
else:
buff = dset.astype(dest_dtype)[dest_sel]
if fill_as_nan and '_FillValue' in dset.attrs:
if np.issubdtype(buff.dtype, np.floating):
buff[(buff == fillvalue)] = np.nan
data.append(buff)
return data
# -------------------------
def set_housekeeping_data(self, data, band=None) -> None:
"""
Returns housekeeping data of measurements
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if not self.__rw:
raise PermissionError('read/write access required')
if not self.__msm_path:
return
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
pass
elif msm_type in ['DPQF_MAP', 'NOISE']:
pass
else:
ds_path = PurePosixPath(msm_path, 'INSTRUMENT', 'housekeeping_data')
self.fid[str(ds_path)][0, :] = data
self.__patched_msm.append(str(ds_path))
def set_msm_data(self, msm_dset, data, band='78') -> None:
"""
Alter dataset from a measurement selected using function "select"
Parameters
----------
msm_dset : string
name of measurement dataset
band : {'1', '2', '3', ..., '8', '12', '34', '56', '78'}
Select data from one spectral band or channel
Default is '78' which combines band 7/8 to SWIR detector layout
data : array-like
data to be written with same dimensions as dataset "msm_dset"
"""
fillvalue = float.fromhex('0x1.ep+122')
if not self.__rw:
raise PermissionError('read/write access required')
if not self.__msm_path:
return
if not isinstance(band, str):
raise TypeError('band must be a string')
if band not in self.bands:
raise ValueError('band not found in product')
# skip row257 from the SWIR detector
rows = None
if int(band[0]) > 6:
rows = [0, -1]
# list potential names of the dataset dimensions
time_list = ['time', 'scanline']
row_list = ['width', 'pixel', 'ground_pixel']
column_list = ['height', 'spectral_channel']
indx = 0
for ii in band:
for dset_grp in ['OBSERVATIONS', 'ANALYSIS', '']:
msm_path = str(self.__msm_path).replace('%', ii)
ds_path = PurePosixPath(msm_path, dset_grp, msm_dset)
if str(ds_path) not in self.fid:
continue
dset = self.fid[str(ds_path)]
data_sel = ()
for xx in range(dset.ndim):
dim_name = PurePosixPath(dset.dims[xx][0].name).name
if len(dset.dims[xx][0][:]) == 1:
data_sel += (0,)
elif dim_name in time_list:
data_sel += (slice(None),)
elif dim_name in row_list:
if rows is None:
data_sel += (slice(None),)
else:
data_sel += (slice(*rows),)
elif dim_name in column_list:
if len(band) == 2:
jj = data.ndim-1
data = np.stack(np.split(data, 2, axis=jj))
data_sel += (slice(None),)
else:
raise ValueError
if len(band) == 2:
if dset.attrs['_FillValue'] == fillvalue:
data[indx, np.isnan(data[indx, ...])] = fillvalue
dset[data_sel] = data[indx, ...]
indx += 1
else:
if dset.attrs['_FillValue'] == fillvalue:
data[np.isnan(data)] = fillvalue
dset[data_sel] = data
self.__patched_msm.append(ds_path)
| [
"datetime.datetime",
"pathlib.Path",
"datetime.datetime.utcnow",
"numpy.asarray",
"h5py.File",
"numpy.squeeze",
"numpy.append",
"numpy.issubdtype",
"setuptools_scm.get_version",
"numpy.isnan",
"numpy.split",
"numpy.concatenate",
"h5py.special_dtype",
"pathlib.PurePosixPath"
] | [((11000, 11029), 'datetime.datetime', 'datetime', (['(2010)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(2010, 1, 1, 0, 0, 0)\n', (11008, 11029), False, 'from datetime import datetime, timedelta\n'), ((30579, 30616), 'numpy.concatenate', 'np.concatenate', (['data'], {'axis': 'column_dim'}), '(data, axis=column_dim)\n', (30593, 30616), True, 'import numpy as np\n'), ((3594, 3622), 'h5py.File', 'h5py.File', (['icm_product', '"""r+"""'], {}), "(icm_product, 'r+')\n", (3603, 3622), False, 'import h5py\n'), ((3660, 3687), 'h5py.File', 'h5py.File', (['icm_product', '"""r"""'], {}), "(icm_product, 'r')\n", (3669, 3687), False, 'import h5py\n'), ((5160, 5204), 'setuptools_scm.get_version', 'get_version', ([], {'root': '""".."""', 'relative_to': '__file__'}), "(root='..', relative_to=__file__)\n", (5171, 5204), False, 'from setuptools_scm import get_version\n'), ((8444, 8477), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path', 'msm_type'], {}), '(msm_path, msm_type)\n', (8457, 8477), False, 'from pathlib import Path, PurePosixPath\n'), ((5324, 5352), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'str'}), '(vlen=str)\n', (5342, 5352), False, 'import h5py\n'), ((5587, 5617), 'numpy.asarray', 'np.asarray', (['self.__patched_msm'], {}), '(self.__patched_msm)\n', (5597, 5617), True, 'import numpy as np\n'), ((5802, 5832), 'numpy.asarray', 'np.asarray', (['self.__patched_msm'], {}), '(self.__patched_msm)\n', (5812, 5832), True, 'import numpy as np\n'), ((12522, 12561), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path', '"""OBSERVATIONS"""'], {}), "(msm_path, 'OBSERVATIONS')\n", (12535, 12561), False, 'from pathlib import Path, PurePosixPath\n'), ((14804, 14843), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path', '"""OBSERVATIONS"""'], {}), "(msm_path, 'OBSERVATIONS')\n", (14817, 14843), False, 'from pathlib import Path, PurePosixPath\n'), ((20823, 20860), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path', '"""INSTRUMENT"""'], {}), "(msm_path, 'INSTRUMENT')\n", (20836, 20860), False, 'from pathlib import Path, PurePosixPath\n'), ((20921, 20957), 'numpy.squeeze', 'np.squeeze', (["grp['housekeeping_data']"], {}), "(grp['housekeeping_data'])\n", (20931, 20957), True, 'import numpy as np\n'), ((25944, 25978), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path', '"""GEODATA"""'], {}), "(msm_path, 'GEODATA')\n", (25957, 25978), False, 'from pathlib import Path, PurePosixPath\n'), ((28568, 28611), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path', 'dset_grp', 'msm_dset'], {}), '(msm_path, dset_grp, msm_dset)\n', (28581, 28611), False, 'from pathlib import Path, PurePosixPath\n'), ((33466, 33524), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path', '"""INSTRUMENT"""', '"""housekeeping_data"""'], {}), "(msm_path, 'INSTRUMENT', 'housekeeping_data')\n", (33479, 33524), False, 'from pathlib import Path, PurePosixPath\n'), ((35110, 35153), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path', 'dset_grp', 'msm_dset'], {}), '(msm_path, dset_grp, msm_dset)\n', (35123, 35153), False, 'from pathlib import Path, PurePosixPath\n'), ((3216, 3233), 'pathlib.Path', 'Path', (['icm_product'], {}), '(icm_product)\n', (3220, 3233), False, 'from pathlib import Path, PurePosixPath\n'), ((5094, 5111), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5109, 5111), False, 'from datetime import datetime, timedelta\n'), ((7833, 7876), 'pathlib.PurePosixPath', 'PurePosixPath', (['f"""BAND{ii}_{name}"""', 'msm_type'], {}), "(f'BAND{ii}_{name}', msm_type)\n", (7846, 7876), False, 'from pathlib import Path, PurePosixPath\n'), ((16257, 16302), 'numpy.append', 'np.append', (['res', "grp['instrument_settings'][:]"], {}), "(res, grp['instrument_settings'][:])\n", (16266, 16302), True, 'import numpy as np\n'), ((17200, 17298), 'pathlib.PurePosixPath', 'PurePosixPath', (['f"""BAND{band}_CALIBRATION"""', 'f"""BACKGROUND_RADIANCE_MODE_{icid:04d}"""', '"""INSTRUMENT"""'], {}), "(f'BAND{band}_CALIBRATION',\n f'BACKGROUND_RADIANCE_MODE_{icid:04d}', 'INSTRUMENT')\n", (17213, 17298), False, 'from pathlib import Path, PurePosixPath\n'), ((17496, 17533), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path', '"""INSTRUMENT"""'], {}), "(msm_path, 'INSTRUMENT')\n", (17509, 17533), False, 'from pathlib import Path, PurePosixPath\n'), ((19920, 19956), 'numpy.squeeze', 'np.squeeze', (["grp['housekeeping_data']"], {}), "(grp['housekeeping_data'])\n", (19930, 19956), True, 'import numpy as np\n'), ((25256, 25276), 'numpy.squeeze', 'np.squeeze', (['grp[key]'], {}), '(grp[key])\n', (25266, 25276), True, 'import numpy as np\n'), ((26092, 26112), 'numpy.squeeze', 'np.squeeze', (['grp[key]'], {}), '(grp[key])\n', (26102, 26112), True, 'import numpy as np\n'), ((27737, 27780), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path', 'dset_grp', 'msm_dset'], {}), '(msm_path, dset_grp, msm_dset)\n', (27750, 27780), False, 'from pathlib import Path, PurePosixPath\n'), ((29846, 29872), 'numpy.squeeze', 'np.squeeze', (['dset[data_sel]'], {}), '(dset[data_sel])\n', (29856, 29872), True, 'import numpy as np\n'), ((31653, 31696), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path', 'dset_grp', 'msm_dset'], {}), '(msm_path, dset_grp, msm_dset)\n', (31666, 31696), False, 'from pathlib import Path, PurePosixPath\n'), ((11954, 11977), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path'], {}), '(msm_path)\n', (11967, 11977), False, 'from pathlib import Path, PurePosixPath\n'), ((14114, 14137), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path'], {}), '(msm_path)\n', (14127, 14137), False, 'from pathlib import Path, PurePosixPath\n'), ((16363, 16386), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path'], {}), '(msm_path)\n', (16376, 16386), False, 'from pathlib import Path, PurePosixPath\n'), ((16956, 17001), 'numpy.append', 'np.append', (['res', "grp['instrument_settings'][:]"], {}), "(res, grp['instrument_settings'][:])\n", (16965, 17001), True, 'import numpy as np\n'), ((20020, 20056), 'numpy.squeeze', 'np.squeeze', (["grp['housekeeping_data']"], {}), "(grp['housekeeping_data'])\n", (20030, 20056), True, 'import numpy as np\n'), ((20129, 20152), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path'], {}), '(msm_path)\n', (20142, 20152), False, 'from pathlib import Path, PurePosixPath\n'), ((20648, 20684), 'numpy.squeeze', 'np.squeeze', (["grp['housekeeping_data']"], {}), "(grp['housekeeping_data'])\n", (20658, 20684), True, 'import numpy as np\n'), ((25348, 25371), 'pathlib.PurePosixPath', 'PurePosixPath', (['msm_path'], {}), '(msm_path)\n', (25361, 25371), False, 'from pathlib import Path, PurePosixPath\n'), ((25886, 25906), 'numpy.squeeze', 'np.squeeze', (['grp[key]'], {}), '(grp[key])\n', (25896, 25906), True, 'import numpy as np\n'), ((28962, 28998), 'pathlib.PurePosixPath', 'PurePosixPath', (['dset.dims[xx][0].name'], {}), '(dset.dims[xx][0].name)\n', (28975, 28998), False, 'from pathlib import Path, PurePosixPath\n'), ((31988, 32026), 'numpy.issubdtype', 'np.issubdtype', (['buff.dtype', 'np.floating'], {}), '(buff.dtype, np.floating)\n', (32001, 32026), True, 'import numpy as np\n'), ((32336, 32374), 'numpy.issubdtype', 'np.issubdtype', (['buff.dtype', 'np.floating'], {}), '(buff.dtype, np.floating)\n', (32349, 32374), True, 'import numpy as np\n'), ((35384, 35420), 'pathlib.PurePosixPath', 'PurePosixPath', (['dset.dims[xx][0].name'], {}), '(dset.dims[xx][0].name)\n', (35397, 35420), False, 'from pathlib import Path, PurePosixPath\n'), ((20748, 20784), 'numpy.squeeze', 'np.squeeze', (["grp['housekeeping_data']"], {}), "(grp['housekeeping_data'])\n", (20758, 20784), True, 'import numpy as np\n'), ((36546, 36560), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (36554, 36560), True, 'import numpy as np\n'), ((36311, 36336), 'numpy.isnan', 'np.isnan', (['data[indx, ...]'], {}), '(data[indx, ...])\n', (36319, 36336), True, 'import numpy as np\n'), ((36032, 36058), 'numpy.split', 'np.split', (['data', '(2)'], {'axis': 'jj'}), '(data, 2, axis=jj)\n', (36040, 36058), True, 'import numpy as np\n')] |
import cv2 # Importe l'api OpenCV
import numpy as np # Importe le module "numpy"
def main():
confidence_ratio = 0.8 #Assigne une valeur de confiance au résultat du detecteur de chaines de caractères prenant donc que les prédictions qui ont plus de 90% de chance d'être juste
image = cv2.imread('input.png') #Charge l'image à analyser
temp_image = np.zeros(image.shape, np.uint8) #Créer une image temporaire où stocker toutes les chaines de caractères detecter
temp_image.fill(255) #Remplit cette image temporaire d'un fond blanc
detector = cv2.text_TextDetectorCNN.create("model.prototxt", "icdar13.caffemodel") #Charge l'IA reconnaisseuse d'emplacement de chaines de caractères préentrainé avec le set de ...
#... données "icdar13" tout en chargeant la configuration du model depuis "model.prototxt"
Bbox, confidence = detector.detect(image) # Fait tourner l'IA sur l'image à analyser pour obtenir ses prédictions sur les différentes chaînes de caractère detecté
filtered = list() # Créer un tableau qui servira à filtrer les délimitations chaînes de caractères qui se chevaucher pour les rassembler en une seul délimitation
for i in range(len(Bbox)):
if confidence[i] > confidence_ratio:
temp_image[Bbox[i][1]:(Bbox[i][1]+Bbox[i][3]), Bbox[i][0]:(Bbox[i][0]+Bbox[i][2])] = image[Bbox[i][1]:(Bbox[i][1]+Bbox[i][3]), Bbox[i][0]:(Bbox[i][0]+Bbox[i][2])]
overwrited = False
for n in range(len(filtered)):
rect_points = [(Bbox[i][0], Bbox[i][1]),
(Bbox[i][0]+Bbox[i][2], Bbox[i][1]),
(Bbox[i][0], Bbox[i][1]+Bbox[i][3]),
(Bbox[i][0]+Bbox[i][2], Bbox[i][1]+Bbox[i][3])]
for point in rect_points:
if point[0] in range(filtered[n][0], filtered[n][0]+filtered[n][2]) and point[1] in range(filtered[n][1], filtered[n][1]+filtered[n][3]):
overwrited = True
if overwrited == True:
if Bbox[i][0] < filtered[n][0]:
filtered[n][2] += (filtered[n][0]-Bbox[i][0])
filtered[n][0] = Bbox[i][0]
if Bbox[i][1] < filtered[n][1]:
filtered[n][3] += (filtered[n][1]-Bbox[i][1])
filtered[n][1] = Bbox[i][1]
if (Bbox[i][0]+Bbox[i][2]) > (filtered[n][0]+filtered[n][2]):
filtered[n][2] = (Bbox[i][0]+Bbox[i][2])-filtered[n][0]
if (Bbox[i][1]+Bbox[i][3]) > (filtered[n][1]+filtered[n][3]):
filtered[n][3] = (Bbox[i][1]+Bbox[i][3])-filtered[n][1]
if overwrited == False: filtered.append(Bbox[i])
# Filtre les délimitations chaînes de caractères qui se chevaucher pour les rassembler en une seul délimitation
image = temp_image # Créer une image identique servant de visualisation
print("Détecté : ", len(filtered), " chaines de caractères") # Affiche le nombre de chaînes de caractères detecté
char_list = list()
for box in filtered: # Traite chaque chaines de caractères afin d'extraire les différents caractères les composant.
cv2.rectangle(image, (box[0], box[1]), (box[0]+box[2],box[1]+box[3]), (0, 255, 0), 1)
cropped = cv2.cvtColor(image[box[1]:(box[1]+box[3]), box[0]:(box[0]+box[2])], cv2.COLOR_BGR2GRAY)
cropped = cv2.inRange(cropped, 100, 245)
# Reduit le bruit de l'image
height, width = cropped.shape
temp_image = np.zeros((height+20, width+20), np.uint8)
temp_image[10:(height+10), 10:(width+10)] = cropped
# Ajoute une marge de visualisation autour de la délimitation pour être sûr d'englober la chaînes de caractères entièrement
contours, hierarchy = cv2.findContours(temp_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
cv2.drawContours(temp_image, contours, -1, (255, 255, 255))
# Délimite les contours des caractères contenant la chaines de caractères étudié
for c in contours:
x, y, w, h = cv2.boundingRect(c)
if h in range((height//3)*2, height):
cropped_char = image[box[1]+y-10:box[1]+y-10+h, box[0]+x-10:box[0]-10+x+w]
resized_char = None
height_chr, width_chr = cropped_char.shape
cv2.resize(cropped_char,resized_char,28/width_chr, 28/height_chr, interpolation = cv2.INTER_CUBIC)
char_list.append(resized_char)
cv2.rectangle(image, (box[0]+x-10, box[1]+y-10), (box[0]-10+x+w, box[1]-10+y+h), (255,0,0), 1)
# Dessine les contours des différences caractères en rajoutant une légèrement marge pour un meilleur englobement et visualisation
return(char_list)
# Sauvegarde l'image traité et analysé dans le fichier "output.jpg"
| [
"cv2.rectangle",
"cv2.text_TextDetectorCNN.create",
"cv2.drawContours",
"cv2.inRange",
"numpy.zeros",
"cv2.cvtColor",
"cv2.findContours",
"cv2.resize",
"cv2.imread",
"cv2.boundingRect"
] | [((293, 316), 'cv2.imread', 'cv2.imread', (['"""input.png"""'], {}), "('input.png')\n", (303, 316), False, 'import cv2\n'), ((361, 392), 'numpy.zeros', 'np.zeros', (['image.shape', 'np.uint8'], {}), '(image.shape, np.uint8)\n', (369, 392), True, 'import numpy as np\n'), ((562, 633), 'cv2.text_TextDetectorCNN.create', 'cv2.text_TextDetectorCNN.create', (['"""model.prototxt"""', '"""icdar13.caffemodel"""'], {}), "('model.prototxt', 'icdar13.caffemodel')\n", (593, 633), False, 'import cv2\n'), ((3234, 3328), 'cv2.rectangle', 'cv2.rectangle', (['image', '(box[0], box[1])', '(box[0] + box[2], box[1] + box[3])', '(0, 255, 0)', '(1)'], {}), '(image, (box[0], box[1]), (box[0] + box[2], box[1] + box[3]),\n (0, 255, 0), 1)\n', (3247, 3328), False, 'import cv2\n'), ((3338, 3430), 'cv2.cvtColor', 'cv2.cvtColor', (['image[box[1]:box[1] + box[3], box[0]:box[0] + box[2]]', 'cv2.COLOR_BGR2GRAY'], {}), '(image[box[1]:box[1] + box[3], box[0]:box[0] + box[2]], cv2.\n COLOR_BGR2GRAY)\n', (3350, 3430), False, 'import cv2\n'), ((3444, 3474), 'cv2.inRange', 'cv2.inRange', (['cropped', '(100)', '(245)'], {}), '(cropped, 100, 245)\n', (3455, 3474), False, 'import cv2\n'), ((3572, 3617), 'numpy.zeros', 'np.zeros', (['(height + 20, width + 20)', 'np.uint8'], {}), '((height + 20, width + 20), np.uint8)\n', (3580, 3617), True, 'import numpy as np\n'), ((3836, 3907), 'cv2.findContours', 'cv2.findContours', (['temp_image', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_TC89_KCOS'], {}), '(temp_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)\n', (3852, 3907), False, 'import cv2\n'), ((3916, 3975), 'cv2.drawContours', 'cv2.drawContours', (['temp_image', 'contours', '(-1)', '(255, 255, 255)'], {}), '(temp_image, contours, -1, (255, 255, 255))\n', (3932, 3975), False, 'import cv2\n'), ((4117, 4136), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (4133, 4136), False, 'import cv2\n'), ((4389, 4495), 'cv2.resize', 'cv2.resize', (['cropped_char', 'resized_char', '(28 / width_chr)', '(28 / height_chr)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(cropped_char, resized_char, 28 / width_chr, 28 / height_chr,\n interpolation=cv2.INTER_CUBIC)\n', (4399, 4495), False, 'import cv2\n'), ((4551, 4671), 'cv2.rectangle', 'cv2.rectangle', (['image', '(box[0] + x - 10, box[1] + y - 10)', '(box[0] - 10 + x + w, box[1] - 10 + y + h)', '(255, 0, 0)', '(1)'], {}), '(image, (box[0] + x - 10, box[1] + y - 10), (box[0] - 10 + x +\n w, box[1] - 10 + y + h), (255, 0, 0), 1)\n', (4564, 4671), False, 'import cv2\n')] |
from pak.datasets.Dataset import Dataset
import numpy as np
import zipfile
import tarfile
import urllib.request
import shutil
from os import makedirs, listdir
from os.path import join, isfile, isdir, exists, splitext
from scipy.ndimage import imread
from scipy.misc import imresize
from scipy.io import loadmat
from skimage.transform import resize
from pak import utils
from pak.util import mpii_human_pose as mpii_hp
import h5py
from enum import Enum
class EgoHands_config(Enum):
Polygon = 1 # polygon as in the original data
AABB = 2 # AABB for simplification
class EgoHands(Dataset):
def __init__(self, root, verbose=True):
""" ctro
"""
Dataset.__init__(self, "egohands_data", root, verbose)
url = 'http://vision.soic.indiana.edu/egohands_files/egohands_data.zip'
self.root_export = join(root, "egohands_data")
self.download_and_unzip(url)
def get_raw(self, config=EgoHands_config.Polygon, memmapped=False):
"""
"""
# step 1: get all videos
labelled_samples_url = join(self.root_export, '_LABELLED_SAMPLES')
all_videos = [join(labelled_samples_url, f) for f in \
listdir(labelled_samples_url) if isdir(join(labelled_samples_url, f))]
# step 2: load video frames and polygon dataset
Y = []
if memmapped:
X_shape = (48, 100, 720, 1280, 3)
fmmap = join(self.root_export, 'egohands.memmap')
fmmap_exists = isfile(fmmap)
if not fmmap_exists:
X = np.memmap(fmmap, dtype='uint8', mode='w+', shape=X_shape)
else:
X = []
for vindx, vurl in enumerate(all_videos):
imgs = sorted([f \
for f in listdir(vurl) if isfile(join(vurl, f)) and \
f.endswith('jpg')])
assert(len(imgs) == 100) # sanity check
if memmapped:
if not fmmap_exists:
# if we already created the memmap file we do NOT
# want to recreate it!
imgs = np.array([imread(join(vurl, f)) for f in imgs], \
'uint8')
X[vindx] = imgs
else:
imgs = np.array([imread(join(vurl, f)) for f in imgs], 'uint8')
X.append(imgs)
polygon_url = join(vurl, 'polygons.mat')
M = loadmat(polygon_url)['polygons'][0]
Y_single_video = []
for i in range(100):
V = M[i]
Y_single_frame = []
for hand in range(4):
H = V[hand]
#if len(H) > 0:
if config is EgoHands_config.Polygon:
Y_single_frame.append(H)
elif len(H) > 1: # meaning: hand is not visible
x = H[:,0]
y = H[:,1]
top_right = (np.max(x), np.max(y))
bottom_left = (np.min(x), np.min(y))
Y_single_frame.append((top_right, bottom_left))
Y_single_video.append(Y_single_frame)
Y.append(Y_single_video)
# step 2: read metadata
#M = loadmat(join(self.root_export, 'metadata.mat'))
#
# M = loadmat(join(self.root_export, '_LABELLED_SAMPLES/CARDS_COURTYARD_B_T/polygons.mat'))
#
# X = imread(join(labelled_samples_url, 'CARDS_COURTYARD_B_T/frame_0011.jpg'))
if memmapped:
if not fmmap_exists:
#del X # flush the file
utils.talk('flush memmap to file', self.verbose)
X.flush() # write memmap to files
del X
X = np.memmap(fmmap, dtype='uint8', mode='r', shape=X_shape)
else:
X = np.array(X, 'uint8')
return X, np.array(Y)
| [
"os.listdir",
"numpy.memmap",
"os.path.join",
"pak.datasets.Dataset.Dataset.__init__",
"scipy.io.loadmat",
"numpy.max",
"os.path.isfile",
"numpy.array",
"pak.utils.talk",
"numpy.min"
] | [((683, 737), 'pak.datasets.Dataset.Dataset.__init__', 'Dataset.__init__', (['self', '"""egohands_data"""', 'root', 'verbose'], {}), "(self, 'egohands_data', root, verbose)\n", (699, 737), False, 'from pak.datasets.Dataset import Dataset\n'), ((845, 872), 'os.path.join', 'join', (['root', '"""egohands_data"""'], {}), "(root, 'egohands_data')\n", (849, 872), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((1072, 1115), 'os.path.join', 'join', (['self.root_export', '"""_LABELLED_SAMPLES"""'], {}), "(self.root_export, '_LABELLED_SAMPLES')\n", (1076, 1115), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((1138, 1167), 'os.path.join', 'join', (['labelled_samples_url', 'f'], {}), '(labelled_samples_url, f)\n', (1142, 1167), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((1423, 1464), 'os.path.join', 'join', (['self.root_export', '"""egohands.memmap"""'], {}), "(self.root_export, 'egohands.memmap')\n", (1427, 1464), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((1492, 1505), 'os.path.isfile', 'isfile', (['fmmap'], {}), '(fmmap)\n', (1498, 1505), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((2389, 2415), 'os.path.join', 'join', (['vurl', '"""polygons.mat"""'], {}), "(vurl, 'polygons.mat')\n", (2393, 2415), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((3781, 3837), 'numpy.memmap', 'np.memmap', (['fmmap'], {'dtype': '"""uint8"""', 'mode': '"""r"""', 'shape': 'X_shape'}), "(fmmap, dtype='uint8', mode='r', shape=X_shape)\n", (3790, 3837), True, 'import numpy as np\n'), ((3868, 3888), 'numpy.array', 'np.array', (['X', '"""uint8"""'], {}), "(X, 'uint8')\n", (3876, 3888), True, 'import numpy as np\n'), ((3908, 3919), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (3916, 3919), True, 'import numpy as np\n'), ((1191, 1220), 'os.listdir', 'listdir', (['labelled_samples_url'], {}), '(labelled_samples_url)\n', (1198, 1220), False, 'from os import makedirs, listdir\n'), ((1559, 1616), 'numpy.memmap', 'np.memmap', (['fmmap'], {'dtype': '"""uint8"""', 'mode': '"""w+"""', 'shape': 'X_shape'}), "(fmmap, dtype='uint8', mode='w+', shape=X_shape)\n", (1568, 1616), True, 'import numpy as np\n'), ((3643, 3691), 'pak.utils.talk', 'utils.talk', (['"""flush memmap to file"""', 'self.verbose'], {}), "('flush memmap to file', self.verbose)\n", (3653, 3691), False, 'from pak import utils\n'), ((1230, 1259), 'os.path.join', 'join', (['labelled_samples_url', 'f'], {}), '(labelled_samples_url, f)\n', (1234, 1259), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((2432, 2452), 'scipy.io.loadmat', 'loadmat', (['polygon_url'], {}), '(polygon_url)\n', (2439, 2452), False, 'from scipy.io import loadmat\n'), ((1766, 1779), 'os.listdir', 'listdir', (['vurl'], {}), '(vurl)\n', (1773, 1779), False, 'from os import makedirs, listdir\n'), ((2291, 2304), 'os.path.join', 'join', (['vurl', 'f'], {}), '(vurl, f)\n', (2295, 2304), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((1790, 1803), 'os.path.join', 'join', (['vurl', 'f'], {}), '(vurl, f)\n', (1794, 1803), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((2131, 2144), 'os.path.join', 'join', (['vurl', 'f'], {}), '(vurl, f)\n', (2135, 2144), False, 'from os.path import join, isfile, isdir, exists, splitext\n'), ((2983, 2992), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (2989, 2992), True, 'import numpy as np\n'), ((2994, 3003), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (3000, 3003), True, 'import numpy as np\n'), ((3044, 3053), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (3050, 3053), True, 'import numpy as np\n'), ((3055, 3064), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (3061, 3064), True, 'import numpy as np\n')] |
# imports
import time
from pathlib import Path
from typing import Tuple, Union
import numpy as np
import torch
from shapely.geometry.point import Point
from skimage.draw import circle_perimeter_aa
from torch import nn, Tensor # using torch.Tensor is annoying
from torch.utils.data import Dataset, DataLoader
from CNN import CNN
CHECKPOINT_PATH = "./Checkpoints/"
EVAL_PATH = "./eval"
Q2_DATA = "../q2_data"
TRAIN = "Train"
TEST = "Test"
INPUT = "input"
LABEL = "label"
FILE_NAME = "{}.png"
FILE_BATCH_SIZE = 100
IMG_DIM = 200
class AttrDict(dict):
"""
A wrapper class for dictionary defining the attributes of the network
"""
gpu: bool
checkpoint: str
kernel: int
num_filters: int
learn_rate: float
batch_size: int
epochs: int
seed: int
total_img: int
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
# The defaults
self.update({
'gpu': True, # set to False if machine does not support cuda
'checkpoint': "",
'kernel': 3,
'num_filters': 64,
'learn_rate': 1e-3,
'batch_size': 5,
'epochs': 25,
'seed': 0,
'total_img': 5000
})
class MyDataSet(Dataset):
"""
Custom dataset for Circles
"""
input_dir: Path
label_dir: Path
num_data: int
is_train: bool
def __init__(self, root_dir, is_train) -> None:
"""
Initialize the dataset with the given directory and the transformation
:param root_dir: the root directory
"""
# self.input_dir = Path(root_dir).joinpath(INPUT)
# self.label_dir = Path(root_dir).joinpath(LABEL)
# self.num_data = len([f for f in self.input_dir.iterdir()]) * FILE_BATCH_SIZE
self.is_train = is_train
if self.is_train:
img_path = Path(root_dir).joinpath(FILE_NAME.format("train_input"))
label_path = Path(root_dir).joinpath(
FILE_NAME.format("train_label"))
else:
img_path = Path(root_dir).joinpath(FILE_NAME.format("test_input"))
label_path = Path(root_dir).joinpath(FILE_NAME.format("test_label"))
self.images = np.load(str(img_path))
self.labels = np.load(str(label_path))
def __len__(self) -> int:
"""
Returns the size of the dataset
:return: the size of the dataset
"""
return self.images.shape[0]
# return self.num_data
def __getitem__(self, idx: Union[int, Tensor]) \
-> Tuple[Tensor, Tensor]:
"""
Get the dataset at index idx
:param idx: the index
:return: the dictionary of the input and the mask
"""
if torch.is_tensor(idx):
idx = idx.tolist()
images = torch.from_numpy(self.images[idx]).type(torch.float32)
labels = torch.from_numpy(self.labels[idx]).type(torch.float32)
return images, labels
def get_data_loaders(args: AttrDict):
"""
Get The data loaders for the training and testing data
:param args: the arguments for the network
:param augment: true if augment
:return: the data loader
"""
train_dataset = MyDataSet(Q2_DATA, True)
test_dataset = MyDataSet(Q2_DATA, False)
train_data_loader = DataLoader(train_dataset, batch_size=args.batch_size,
shuffle=True)
test_data_loader = DataLoader(test_dataset, batch_size=args.batch_size,
shuffle=True)
return train_data_loader, test_data_loader
def generate_data(args):
data_path = Path(Q2_DATA)
train_path = data_path.joinpath(TRAIN)
test_path = data_path.joinpath(TEST)
train_input_path = train_path.joinpath(INPUT)
train_label_path = train_path.joinpath(LABEL)
test_input_path = test_path.joinpath(INPUT)
test_label_path = test_path.joinpath(LABEL)
train_input_path.mkdir(exist_ok=True, parents=True)
train_label_path.mkdir(exist_ok=True, parents=True)
test_input_path.mkdir(exist_ok=True, parents=True)
test_label_path.mkdir(exist_ok=True, parents=True)
split_idx = int(args.total_img * .7)
train_input, train_label, test_input, test_label = [], [], [], []
for i in range(args.total_img):
params, img = noisy_circle(IMG_DIM, 50, 2)
img = img[np.newaxis, :, :]
if i < split_idx:
train_input.append(img)
train_label.append(np.array(params, dtype=np.float))
else:
test_input.append(img)
test_label.append(np.array(params, dtype=np.float))
np.save(str(data_path.joinpath("train_input")), np.array(train_input))
np.save(str(data_path.joinpath("train_label")), np.array(train_label))
np.save(str(data_path.joinpath("test_input")), np.array(test_input))
np.save(str(data_path.joinpath("test_label")), np.array(test_label))
def draw_circle(img, row, col, rad):
rr, cc, val = circle_perimeter_aa(row, col, rad)
valid = ((rr >= 0) & (rr < img.shape[0]) & (cc >= 0) & (cc < img.shape[1]))
img[rr[valid], cc[valid]] = val[valid]
return img
def noisy_circle(size, radius, noise):
img = np.zeros((size, size), dtype=np.float)
# Circle
row = np.random.randint(size)
col = np.random.randint(size)
rad = np.random.randint(10, max(10, radius))
draw_circle(img, row, col, rad)
# Noise
img += noise * np.random.rand(*img.shape)
return (row, col, rad), img
def iou(params0, params1):
row0, col0, rad0 = params0
row1, col1, rad1 = params1
shape0 = Point(row0, col0).buffer(rad0)
shape1 = Point(row1, col1).buffer(rad1)
return shape0.intersection(shape1).area / shape0.union(shape1).area
def train(args, criterion, train_data_loader, test_data_loader, model=None):
# Load Model
if model is None:
model = CNN(kernel=args.kernel, num_filters=args.num_filters,
num_colours=3, num_in_channels=1)
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.learn_rate)
print("Beginning training ...")
if args.gpu:
model.cuda()
start = time.time()
train_losses, valid_losses, valid_accuracies = [], [], []
for epoch in range(1, args.epochs + 1):
# Training
model.train()
losses = []
for i, (images, labels) in enumerate(train_data_loader):
if args.gpu:
images, labels = images.cuda(), labels.cuda()
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
losses.append(loss.data.item())
# Report training result
avg_loss = np.mean(losses)
train_losses.append(avg_loss)
time_elapsed = time.time() - start
print('Epoch [%d/%d], Loss: %.4f, Time (s): %d' % (
epoch, args.epochs, float(avg_loss), time_elapsed))
# Evaluate the model
model.eval() # Change model to 'eval' mode (BN uses moving mean/var).
val_loss, val_acc = run_validation_step(model, criterion, args.gpu,
test_data_loader)
time_elapsed = time.time() - start
valid_losses.append(val_loss)
valid_accuracies.append(val_acc)
print('Epoch [%d/%d], Val Loss: %.4f, Val Acc: %.1f%%, Time(s): %d' % (
epoch, args.epochs, float(val_loss), val_acc, time_elapsed))
if args.checkpoint:
print('Saving model...')
torch.save(model.state_dict(), args.checkpoint)
return model
def euclidean_distance_loss(output, labels):
return torch.sum(torch.sqrt(torch.sum((output - labels) ** 2)))
def run_validation_step(cnn, criterion, gpu, test_data_loader):
correct = 0.0
total = 0.0
losses = []
for i, (images, labels) in enumerate(test_data_loader):
if gpu:
images, labels = images.cuda(), labels.cuda()
outputs = cnn(images)
val_loss = criterion(outputs, labels)
losses.append(val_loss.data.item())
total += outputs.numel()
output_class = (outputs > 0.5).float()
correct += (output_class == labels).float().sum()
val_loss = np.mean(losses)
val_acc = 100 * correct / total
return val_loss, val_acc
def train_model():
args = AttrDict()
args_dict = {
'kernel': 3,
'num_filters': 32,
'learn_rate': 1e-3,
'batch_size': 20,
'epochs': 25,
}
args.update(args_dict)
train_data_loader, test_data_loader = get_data_loaders(args)
args.checkpoint = CHECKPOINT_PATH + "2_1_{}.pt".format(args.epochs)
return train(args, nn.MSELoss(), train_data_loader,
test_data_loader)
def find_circle(img, model):
pred = model(img.cuda())
np_pred = pred.cpu().detach().numpy()
return np_pred[0][0], np_pred[0][1], np_pred[0][2]
# return 100, 100, 30
def main():
# generate_data(args)
# train
trained_model = train_model()
# eval
results = []
for _ in range(1000):
params, img = noisy_circle(200, 50, 2)
detected = find_circle(trained_model)
results.append(iou(params, detected))
results = np.array(results)
print((results > 0.7).mean())
if __name__ == '__main__':
main()
| [
"numpy.mean",
"skimage.draw.circle_perimeter_aa",
"numpy.random.rand",
"pathlib.Path",
"CNN.CNN",
"torch.from_numpy",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"torch.is_tensor",
"torch.nn.MSELoss",
"torch.sum",
"torch.utils.data.DataLoader",
"time.time",
"shapely.geometry.po... | [((3378, 3445), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size, shuffle=True)\n', (3388, 3445), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3504, 3570), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(test_dataset, batch_size=args.batch_size, shuffle=True)\n', (3514, 3570), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3695, 3708), 'pathlib.Path', 'Path', (['Q2_DATA'], {}), '(Q2_DATA)\n', (3699, 3708), False, 'from pathlib import Path\n'), ((5040, 5074), 'skimage.draw.circle_perimeter_aa', 'circle_perimeter_aa', (['row', 'col', 'rad'], {}), '(row, col, rad)\n', (5059, 5074), False, 'from skimage.draw import circle_perimeter_aa\n'), ((5264, 5302), 'numpy.zeros', 'np.zeros', (['(size, size)'], {'dtype': 'np.float'}), '((size, size), dtype=np.float)\n', (5272, 5302), True, 'import numpy as np\n'), ((5327, 5350), 'numpy.random.randint', 'np.random.randint', (['size'], {}), '(size)\n', (5344, 5350), True, 'import numpy as np\n'), ((5361, 5384), 'numpy.random.randint', 'np.random.randint', (['size'], {}), '(size)\n', (5378, 5384), True, 'import numpy as np\n'), ((6233, 6244), 'time.time', 'time.time', ([], {}), '()\n', (6242, 6244), False, 'import time\n'), ((8404, 8419), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (8411, 8419), True, 'import numpy as np\n'), ((9411, 9428), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (9419, 9428), True, 'import numpy as np\n'), ((2808, 2828), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (2823, 2828), False, 'import torch\n'), ((4739, 4760), 'numpy.array', 'np.array', (['train_input'], {}), '(train_input)\n', (4747, 4760), True, 'import numpy as np\n'), ((4814, 4835), 'numpy.array', 'np.array', (['train_label'], {}), '(train_label)\n', (4822, 4835), True, 'import numpy as np\n'), ((4888, 4908), 'numpy.array', 'np.array', (['test_input'], {}), '(test_input)\n', (4896, 4908), True, 'import numpy as np\n'), ((4961, 4981), 'numpy.array', 'np.array', (['test_label'], {}), '(test_label)\n', (4969, 4981), True, 'import numpy as np\n'), ((5502, 5528), 'numpy.random.rand', 'np.random.rand', (['*img.shape'], {}), '(*img.shape)\n', (5516, 5528), True, 'import numpy as np\n'), ((5948, 6039), 'CNN.CNN', 'CNN', ([], {'kernel': 'args.kernel', 'num_filters': 'args.num_filters', 'num_colours': '(3)', 'num_in_channels': '(1)'}), '(kernel=args.kernel, num_filters=args.num_filters, num_colours=3,\n num_in_channels=1)\n', (5951, 6039), False, 'from CNN import CNN\n'), ((6881, 6896), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (6888, 6896), True, 'import numpy as np\n'), ((8864, 8876), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (8874, 8876), False, 'from torch import nn, Tensor\n'), ((5666, 5683), 'shapely.geometry.point.Point', 'Point', (['row0', 'col0'], {}), '(row0, col0)\n', (5671, 5683), False, 'from shapely.geometry.point import Point\n'), ((5710, 5727), 'shapely.geometry.point.Point', 'Point', (['row1', 'col1'], {}), '(row1, col1)\n', (5715, 5727), False, 'from shapely.geometry.point import Point\n'), ((6958, 6969), 'time.time', 'time.time', ([], {}), '()\n', (6967, 6969), False, 'import time\n'), ((7377, 7388), 'time.time', 'time.time', ([], {}), '()\n', (7386, 7388), False, 'import time\n'), ((7840, 7873), 'torch.sum', 'torch.sum', (['((output - labels) ** 2)'], {}), '((output - labels) ** 2)\n', (7849, 7873), False, 'import torch\n'), ((2878, 2912), 'torch.from_numpy', 'torch.from_numpy', (['self.images[idx]'], {}), '(self.images[idx])\n', (2894, 2912), False, 'import torch\n'), ((2950, 2984), 'torch.from_numpy', 'torch.from_numpy', (['self.labels[idx]'], {}), '(self.labels[idx])\n', (2966, 2984), False, 'import torch\n'), ((4540, 4572), 'numpy.array', 'np.array', (['params'], {'dtype': 'np.float'}), '(params, dtype=np.float)\n', (4548, 4572), True, 'import numpy as np\n'), ((4653, 4685), 'numpy.array', 'np.array', (['params'], {'dtype': 'np.float'}), '(params, dtype=np.float)\n', (4661, 4685), True, 'import numpy as np\n'), ((1928, 1942), 'pathlib.Path', 'Path', (['root_dir'], {}), '(root_dir)\n', (1932, 1942), False, 'from pathlib import Path\n'), ((2010, 2024), 'pathlib.Path', 'Path', (['root_dir'], {}), '(root_dir)\n', (2014, 2024), False, 'from pathlib import Path\n'), ((2121, 2135), 'pathlib.Path', 'Path', (['root_dir'], {}), '(root_dir)\n', (2125, 2135), False, 'from pathlib import Path\n'), ((2202, 2216), 'pathlib.Path', 'Path', (['root_dir'], {}), '(root_dir)\n', (2206, 2216), False, 'from pathlib import Path\n')] |
import numpy as np
class Maze(object):
def __init__(self, filename):
'''
Maze objects have two main attributes:
- dim: mazes should be square, with sides of even length. (integer)
- walls: passages are coded as a 4-bit number, with a bit value taking
0 if there is a wall and 1 if there is no wall. The 1s register
corresponds with a square's top edge, 2s register the right edge,
4s register the bottom edge, and 8s register the left edge. (numpy
array)
The initialization function also performs some consistency checks for
wall positioning.
'''
with open(filename, 'r') as f_in:
# First line should be an integer with the maze dimensions
self.dim = int(f_in.readline())
# Subsequent lines describe the permissability of walls
walls = []
for line in f_in:
line_walls = line.split(',')
# transform string into ints inside of the list
length = len(line_walls)
for i in range(length):
line_walls[i] = int(line_walls[i])
walls.append(line_walls)
self.walls = np.array(walls)
# Perform validation on maze
# Maze dimensions
if self.dim % 2:
raise Exception('Maze dimensions must be even in length!')
if self.walls.shape != (self.dim, self.dim):
raise Exception('Maze shape does not match dimension attribute!')
# Wall permeability
wall_errors = []
# vertical walls
for x in range(self.dim-1):
for y in range(self.dim):
if (self.walls[x,y] & 2 != 0) != (self.walls[x+1,y] & 8 != 0):
wall_errors.append([(x,y), 'v'])
# horizontal walls
for y in range(self.dim-1):
for x in range(self.dim):
if (self.walls[x,y] & 1 != 0) != (self.walls[x,y+1] & 4 != 0):
wall_errors.append([(x,y), 'h'])
if wall_errors:
for cell, wall_type in wall_errors:
if wall_type == 'v':
cell2 = (cell[0]+1, cell[1])
print ('Inconsistent vertical wall betweeen {} and {}'.format(cell, cell2))
else:
cell2 = (cell[0], cell[1]+1)
print ('Inconsistent horizontal wall betweeen {} and {}'.format(cell, cell2))
raise Exception('Consistency errors found in wall specifications!')
def is_permissible(self, cell, direction):
"""
Returns a boolean designating whether or not a cell is passable in the
given direction. Cell is input as a list. Directions may be
input as single letter 'u', 'r', 'd', 'l', or complete words 'up',
'right', 'down', 'left'.
"""
dir_int = {'u': 1, 'r': 2, 'd': 4, 'l': 8,
'up': 1, 'right': 2, 'down': 4, 'left': 8}
try:
return (self.walls[tuple(cell)] & dir_int[direction] != 0)
except:
print ('Invalid direction provided!')
def dist_to_wall(self, cell, direction):
"""
Returns a number designating the number of open cells to the nearest
wall in the indicated direction. Cell is input as a list. Directions
may be input as a single letter 'u', 'r', 'd', 'l', or complete words
'up', 'right', 'down', 'left'.
"""
dir_move = {'u': [0, 1], 'r': [1, 0], 'd': [0, -1], 'l': [-1, 0],
'up': [0, 1], 'right': [1, 0], 'down': [0, -1], 'left': [-1, 0]}
sensing = True
distance = 0
curr_cell = list(cell) # make copy to preserve original
while sensing:
if self.is_permissible(curr_cell, direction):
distance += 1
curr_cell[0] += dir_move[direction][0]
curr_cell[1] += dir_move[direction][1]
else:
sensing = False
return distance | [
"numpy.array"
] | [((1283, 1298), 'numpy.array', 'np.array', (['walls'], {}), '(walls)\n', (1291, 1298), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module test relu op.
"""
import unittest
from multiprocessing import Manager
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from scipy.special import logit
from scipy.special import expit
class TestSigmoidCrossEntropyWithLogitsOp1(OpTest):
"""Test sigmoid_cross_entropy_with_logit_op with binary label
"""
def setUp(self):
self.op_type = "mpc_sigmoid_cross_entropy_with_logits"
self.init_input_output()
self.inputs = {
'X': self.lazy_share(self.x),
'Label': self.lazy_share(self.label)
}
self.outputs = {'Out': self.lazy_share(self.out)}
def init_input_output(self):
batch_size = 10
num_classes = 4
self.x = logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype("float64"))
self.label = np.random.randint(0, 2, (batch_size, num_classes))
# approximate sigmoid with f(x) = {=0, x < -0.5; x + 0.5, -0.5 <= x <= 0.5; 1, x> 0.5}
self.out = np.minimum(np.maximum(0, self.x + 0.5), 1)
def test_check_output(self):
place = core.CPUPlace()
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
place = core.CPUPlace()
# TODO max_relative_error is too large, find reason
self.check_grad_with_place(place, ['X'], "Out", max_relative_error = 50)
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.randint",
"numpy.random.uniform",
"unittest.main",
"numpy.maximum",
"paddle.fluid.core.CPUPlace"
] | [((2104, 2119), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2117, 2119), False, 'import unittest\n'), ((1531, 1581), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(batch_size, num_classes)'], {}), '(0, 2, (batch_size, num_classes))\n', (1548, 1581), True, 'import numpy as np\n'), ((1792, 1807), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (1805, 1807), True, 'import paddle.fluid.core as core\n'), ((1910, 1925), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (1923, 1925), True, 'import paddle.fluid.core as core\n'), ((1709, 1736), 'numpy.maximum', 'np.maximum', (['(0)', '(self.x + 0.5)'], {}), '(0, self.x + 0.5)\n', (1719, 1736), True, 'import numpy as np\n'), ((1423, 1473), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(batch_size, num_classes)'], {}), '(0, 1, (batch_size, num_classes))\n', (1440, 1473), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
""" Robot Module
This module initializes the robot class
Author: <NAME>
"""
import numpy as np
class Robot:
def __init__(self, x_dim, y_dim):
self._x_dim = x_dim
self._y_dim = y_dim
self._tunnel_grid = np.zeros((self._x_dim, self._y_dim))
self._explored_map = np.zeros_like(self._tunnel_grid)
# Keeps track of fidelity values in spaces that have been observed but not explored
self._observed_map = np.zeros_like(self._tunnel_grid)
self._frontiers = np.zeros_like(self._tunnel_grid)
# Definition of entry point can be changed subject to map generation
# Note: state = (x,y)
self._entry_point = [int(self._x_dim/2), 0]
self._current_position = self._entry_point
self._update_explored_map()
# self._action_dict = {"none": 0, "up": 1, "right": 2, "down": 3, "left": 4}
# self._action_coords = [(0, 0), (0, -1), (1, 0), (0, 1), (-1, 0)]
# Actions without a "none" option
self._action_dict = {"up": 0, "right": 1, "down": 2, "left": 3}
self._action_coords = [(0, -1), (1, 0), (0, 1), (-1, 0)]
self._reward = 0
def _get_current_location(self):
# returns (x, y)
return self._current_position
def _next_action(self, goal, allowed_actions):
if self._current_position[0] < goal[0] and "right" in allowed_actions:
return "right"
elif self._current_position[0] > goal[0] and "left" in allowed_actions:
return "left"
elif self._current_position[1] < goal[1] and "down" in allowed_actions:
return "down"
elif self._current_position[1] > goal[1] and "up" in allowed_actions:
return "up"
else:
# This breaks if there are no valid actions
print("Allowed actions", allowed_actions)
print("Robot position", self._current_position)
print("Goal:", goal)
print("SOMETHING HAS GONE TERRIBLY WRONG")
return False
def _give_action(self, action):
# print(action)
new_state = (self._current_position[0] + self._action_coords[self._action_dict[action]][0], self._current_position[1] + self._action_coords[self._action_dict[action]][1])
self._update_location(new_state)
def _get_explored_map(self):
return self._explored_map
def _get_total_reward(self):
return self._reward
def _update_explored_map(self):
if self._explored_map[self._current_position[0], self._current_position[1]] == 0:
self._explored_map[self._current_position[0], self._current_position[1]] = 1
# Remove explored cells from the frontiers map
self._frontiers[self._current_position[0], self._current_position[1]] = 0
# Observed map keeps track of the observed areas
# Frontier map keeps track of areas that have been observed but not explored
def update_observed_map(self, observation, radius):
state = self._get_current_location()
tunnel_grid_size = self._tunnel_grid.shape
for y in range(radius * 2 + 1):
# y coordinate of state in loop
state_y = state[1] + y - radius
# If out of bounds, continue to next loop
if state_y < 0 or state_y >= tunnel_grid_size[1]:
continue
for x in range(radius * 2 + 1):
# Similar logic for x coordinate
state_x = state[0] + x - radius
if state_x < 0 or state_x >= tunnel_grid_size[0]:
continue
# print("Observed Map", self._observed_map.shape)
# print("State x, state y", state_x, state_y)
self._observed_map[state_x][state_y] = observation[x][y]
# If the robot has not yet explored that area, add it to the observation map and frontiers map
if self._explored_map[state_x][state_y] == 0:
self._frontiers[state_x][state_y] = observation[x][y]
def _update_location(self, state):
self._current_position = state
self._update_explored_map()
def _update_reward(self, found_artifact):
if found_artifact:
self._reward += 100
return True
else:
self._reward -= 1
return False
| [
"numpy.zeros",
"numpy.zeros_like"
] | [((239, 275), 'numpy.zeros', 'np.zeros', (['(self._x_dim, self._y_dim)'], {}), '((self._x_dim, self._y_dim))\n', (247, 275), True, 'import numpy as np\n'), ((299, 331), 'numpy.zeros_like', 'np.zeros_like', (['self._tunnel_grid'], {}), '(self._tunnel_grid)\n', (312, 331), True, 'import numpy as np\n'), ((441, 473), 'numpy.zeros_like', 'np.zeros_like', (['self._tunnel_grid'], {}), '(self._tunnel_grid)\n', (454, 473), True, 'import numpy as np\n'), ((494, 526), 'numpy.zeros_like', 'np.zeros_like', (['self._tunnel_grid'], {}), '(self._tunnel_grid)\n', (507, 526), True, 'import numpy as np\n')] |
import torch
import numpy as np
import carb
from pxr import UsdGeom, Gf, Sdf, Usd, PhysxSchema, PhysicsSchema, PhysicsSchemaTools, Semantics
import os
import time
import atexit
import asyncio
import numpy as np
import random
import matplotlib.pyplot as plt
import collections
from omni.isaac.synthetic_utils import visualization as vis
from omni.isaac.synthetic_utils import OmniKitHelper
from omni.isaac.synthetic_utils import SyntheticDataHelper
from jetbot import Jetbot
from road_environment import Environment
import gym
from gym import spaces
class JetbotEnv:
metadata = {"render.modes": ["human"]}
def __init__(
self, omni_kit, z_height=0, max_resets=10, updates_per_step=3, steps_per_rollout=1000, mirror_mode=False
):
self.MIRROR_MODE = mirror_mode
self.action_space = spaces.Box(low=0, high=2.0, shape=(2,), dtype=np.float32)
# IMPORTANT NOTE! SB3 wraps all image spaces in a transposer.
# it assumes the image outputed is of standard form
self.observation_space = spaces.Box(low=0, high=255, shape=(224, 224, 1), dtype=np.uint8)
self.noise = 0.05
# every time we update the stage, this is how much time will be simulated
self.dt = 1 / 30.0
self.omniverse_kit = omni_kit
self.sd_helper = SyntheticDataHelper()
self.roads = Environment(self.omniverse_kit)
# make environment z up
self.omniverse_kit.set_up_axis(UsdGeom.Tokens.z)
# we are going to train on a randomized loop that fits in a 6x6 tile area.
self.shape = [2, 2]
self.roads.generate_road(self.shape)
self.roads.generate_lights()
# spawn robot
self.jetbot = Jetbot(self.omniverse_kit)
self.initial_loc = self.roads.get_valid_location()
self.jetbot.spawn(Gf.Vec3d(self.initial_loc[0], self.initial_loc[1], 5), 0)
# switch kit camera to jetbot camera
self.jetbot.activate_camera()
# start simulation
self.omniverse_kit.play()
# Step simulation so that objects fall to rest
# wait until all materials are loaded
frame = 0
print("simulating physics...")
while frame < 60 or self.omniverse_kit.is_loading():
self.omniverse_kit.update(self.dt)
frame = frame + 1
print("done after frame: ", frame)
self.initialized = False
self.numsteps = 0
self.numresets = 0
self.maxresets = max_resets
self.updates_per_step = updates_per_step
self.steps_per_rollout = steps_per_rollout
self.hist_length = collections.deque([0.0] * 10, maxlen=10)
self.hist_forward_vel = collections.deque([0.0] * 10, maxlen=10)
self.hist_ang_vel = collections.deque([0.0] * 30, maxlen=30)
self.avg_forward_vel = 0
self.dist_traveled = 0
self.total_reward = 0
# Randomly mirror horizontally
self.update_mirror_mode()
def update_mirror_mode(self):
# Mirror if mode is enabled and we randomly sample True
self.mirror_mode = self.MIRROR_MODE & random.choice([False, True])
def calculate_reward(self):
# distance to nearest point on path in units of block. [0,1]
dist = self.roads.distance_to_path_in_tiles(self.current_pose)
self.dist = dist
dist_reward = np.exp(-dist ** 2 / 0.15 ** 2)
reward = self.current_forward_velocity * dist_reward
# if we are driving backwards, large negative reward
# if self.current_forward_velocity < 0:
# reward = self.current_forward_velocity
# THIS IS FOR DEBUGGING ONLY
if self.numsteps % 10 == 0 or reward < 0:
print(
"{:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f}".format(
reward,
self.current_forward_velocity,
self.avg_forward_vel,
dist_reward,
self.dist_traveled,
self.current_ang_vel,
)
)
self.total_reward += reward
return reward
def is_dead(self):
done = False
# terminate if we leave boundary
if not self.roads.is_inside_path_boundary(self.current_pose):
print("dead not inside boundary", self.numsteps)
done = True
# kill the episode after 500 steps
if self.numsteps > self.steps_per_rollout:
print("dead self.numsteps > self.steps_per_rollout", self.numsteps)
done = True
# terminate if we are driving backwards for too long
if self.avg_forward_vel <= 0 and self.numsteps > 35:
print("dead self.avg_forward_vel <= 1 after 35 steps ", self.avg_forward_vel)
done = True
return done
def transform_action(self, action):
# If mirrored, swap wheel controls
if self.mirror_mode:
action = action[::-1]
return action
def transform_state_image(self, im):
# If enabled, mirror image horizontally
if self.mirror_mode:
return np.flip(im, axis=1)
return im
def step(self, action):
if self.initialized:
self.previous_loc = self.current_loc
transformed_action = self.transform_action(action)
self.jetbot.command(transformed_action)
frame = 0
reward = 0
# every time step is called we actually update the scene by updates_per_step.
while frame < self.updates_per_step:
# render at 1/30, simulate at 1/60, which means 2 substeps per frame
self.omniverse_kit.update(self.dt, 1.0 / 60.0, 2.0)
frame = frame + 1
# compute reward once simulation is complete
obs = self.jetbot.observations()
self.current_pose = obs["pose"]
self.current_speed = np.linalg.norm(np.array(obs["linear_velocity"]))
self.current_forward_velocity = obs["local_linear_velocity"][0]
self.current_ang_vel = obs["angular_velocity"][2]
self.current_loc = self.roads.get_tile_from_pose(self.current_pose)
self.hist_forward_vel.append(self.current_forward_velocity)
self.dist_traveled = self.dist_traveled + self.current_forward_velocity * self.dt
self.hist_ang_vel.append(self.current_ang_vel)
self.avg_forward_vel = sum(self.hist_forward_vel) / len(self.hist_forward_vel)
if not self.initialized:
self.previous_loc = self.roads.get_tile_from_pose(self.current_pose)
reward = self.calculate_reward()
# the synthetic data helper is our way of grabbing the image data we need from the camera. currently the SD helper
# only supports a single camera, however you can use it to access camera data as a cuda tensor directly on the
# device. stable baselines 3 is expecting a numpy array, so we pull the data to the host
# additional sensors that could be of interest and can be added to this list:
# "depth", "instanceSegmentation", "semanticSegmentation"
gt = self.sd_helper.get_groundtruth(["rgb", "camera"])
# we only need the rgb channels of the rgb image
currentState = gt["rgb"][:, :, :3].astype(np.float)
currentState = self.transform_state_image(currentState)
if not self.initialized:
self.previousState = currentState
img = np.dot(currentState, [0.299, 0.587, 0.114]) # np.concatenate((currentState, self.previousState), axis=2)
img = img.reshape((img.shape[0], img.shape[1], 1))
# the real camera will have noise on each pixel, so we add some uniform noise here to simulate thats
# uncomment below to add noise to image
img = np.clip((255 * self.noise * np.random.randn(224, 224, 1) + img.astype(np.float)), 0, 255).astype(np.uint8)
self.previousState = currentState
self.numsteps += 1
done = self.is_dead()
return img, reward, done, {}
def reset(self):
# Randomly mirror horizontally
self.update_mirror_mode()
# randomize the road configuration every self.maxresets resets.
if self.numresets % self.maxresets == 0:
size = random.randrange(2, 6)
self.shape = [size, size]
self.roads.reset(self.shape)
if not self.initialized:
state, reward, done, info, = self.step([0, 0])
self.initialized = True
# every time we reset, we move the robot to a random location, and pointing along the direction of the road
loc = self.roads.get_valid_location()
# the random angle offset can be increased here
rot = self.roads.get_forward_direction(loc) + random.uniform(-10, 10)
self.jetbot.teleport(
Gf.Vec3d(loc[0] + random.uniform(-2.5, 2.5), loc[1] + random.uniform(-2.5, 2.5), 5), rot, settle=True
)
obs = self.jetbot.observations()
self.current_pose = obs["pose"]
self.current_speed = np.linalg.norm(np.array(obs["linear_velocity"]))
self.current_forward_velocity = obs["local_linear_velocity"][0]
self.current_loc = self.roads.get_tile_from_pose(self.current_pose)
self.previous_loc = self.roads.get_tile_from_pose(self.current_pose)
self.dist = self.roads.distance_to_path_in_tiles(self.current_pose)
# wait for loading
if self.numresets % self.maxresets == 0:
while self.omniverse_kit.is_loading():
self.omniverse_kit.update(self.dt)
gt = self.sd_helper.get_groundtruth(["rgb", "camera"])
currentState = gt["rgb"][:, :, :3]
currentState = self.transform_state_image(currentState)
img = np.dot(
currentState.astype(np.float), [0.299, 0.587, 0.114]
) # np.concatenate((currentState, currentState), axis=2)
img = img.reshape((img.shape[0], img.shape[1], 1))
# uncomment below to add noise to image
img = np.clip((255 * self.noise * np.random.randn(224, 224, 1) + img.astype(np.float)), 0, 255).astype(np.uint8)
print(
"reset ",
sum(self.hist_length) / len(self.hist_length),
self.numresets,
self.dist_traveled,
self.avg_forward_vel,
self.total_reward,
)
self.numsteps = 0
self.previousState = currentState
self.numresets += 1
self.total_reward = 0
self.dist_traveled = 0
return img
| [
"numpy.flip",
"omni.isaac.synthetic_utils.SyntheticDataHelper",
"random.choice",
"collections.deque",
"random.uniform",
"road_environment.Environment",
"random.randrange",
"gym.spaces.Box",
"numpy.exp",
"numpy.array",
"numpy.dot",
"jetbot.Jetbot",
"numpy.random.randn",
"pxr.Gf.Vec3d"
] | [((825, 882), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(2.0)', 'shape': '(2,)', 'dtype': 'np.float32'}), '(low=0, high=2.0, shape=(2,), dtype=np.float32)\n', (835, 882), False, 'from gym import spaces\n'), ((1047, 1111), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(224, 224, 1)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(224, 224, 1), dtype=np.uint8)\n', (1057, 1111), False, 'from gym import spaces\n'), ((1311, 1332), 'omni.isaac.synthetic_utils.SyntheticDataHelper', 'SyntheticDataHelper', ([], {}), '()\n', (1330, 1332), False, 'from omni.isaac.synthetic_utils import SyntheticDataHelper\n'), ((1354, 1385), 'road_environment.Environment', 'Environment', (['self.omniverse_kit'], {}), '(self.omniverse_kit)\n', (1365, 1385), False, 'from road_environment import Environment\n'), ((1715, 1741), 'jetbot.Jetbot', 'Jetbot', (['self.omniverse_kit'], {}), '(self.omniverse_kit)\n', (1721, 1741), False, 'from jetbot import Jetbot\n'), ((2622, 2662), 'collections.deque', 'collections.deque', (['([0.0] * 10)'], {'maxlen': '(10)'}), '([0.0] * 10, maxlen=10)\n', (2639, 2662), False, 'import collections\n'), ((2695, 2735), 'collections.deque', 'collections.deque', (['([0.0] * 10)'], {'maxlen': '(10)'}), '([0.0] * 10, maxlen=10)\n', (2712, 2735), False, 'import collections\n'), ((2764, 2804), 'collections.deque', 'collections.deque', (['([0.0] * 30)'], {'maxlen': '(30)'}), '([0.0] * 30, maxlen=30)\n', (2781, 2804), False, 'import collections\n'), ((3368, 3398), 'numpy.exp', 'np.exp', (['(-dist ** 2 / 0.15 ** 2)'], {}), '(-dist ** 2 / 0.15 ** 2)\n', (3374, 3398), True, 'import numpy as np\n'), ((7439, 7482), 'numpy.dot', 'np.dot', (['currentState', '[0.299, 0.587, 0.114]'], {}), '(currentState, [0.299, 0.587, 0.114])\n', (7445, 7482), True, 'import numpy as np\n'), ((1827, 1880), 'pxr.Gf.Vec3d', 'Gf.Vec3d', (['self.initial_loc[0]', 'self.initial_loc[1]', '(5)'], {}), '(self.initial_loc[0], self.initial_loc[1], 5)\n', (1835, 1880), False, 'from pxr import UsdGeom, Gf, Sdf, Usd, PhysxSchema, PhysicsSchema, PhysicsSchemaTools, Semantics\n'), ((3118, 3146), 'random.choice', 'random.choice', (['[False, True]'], {}), '([False, True])\n', (3131, 3146), False, 'import random\n'), ((5131, 5150), 'numpy.flip', 'np.flip', (['im'], {'axis': '(1)'}), '(im, axis=1)\n', (5138, 5150), True, 'import numpy as np\n'), ((5908, 5940), 'numpy.array', 'np.array', (["obs['linear_velocity']"], {}), "(obs['linear_velocity'])\n", (5916, 5940), True, 'import numpy as np\n'), ((8259, 8281), 'random.randrange', 'random.randrange', (['(2)', '(6)'], {}), '(2, 6)\n', (8275, 8281), False, 'import random\n'), ((8763, 8786), 'random.uniform', 'random.uniform', (['(-10)', '(10)'], {}), '(-10, 10)\n', (8777, 8786), False, 'import random\n'), ((9067, 9099), 'numpy.array', 'np.array', (["obs['linear_velocity']"], {}), "(obs['linear_velocity'])\n", (9075, 9099), True, 'import numpy as np\n'), ((8847, 8872), 'random.uniform', 'random.uniform', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (8861, 8872), False, 'import random\n'), ((8883, 8908), 'random.uniform', 'random.uniform', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (8897, 8908), False, 'import random\n'), ((7804, 7832), 'numpy.random.randn', 'np.random.randn', (['(224)', '(224)', '(1)'], {}), '(224, 224, 1)\n', (7819, 7832), True, 'import numpy as np\n'), ((10054, 10082), 'numpy.random.randn', 'np.random.randn', (['(224)', '(224)', '(1)'], {}), '(224, 224, 1)\n', (10069, 10082), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# <NAME> <<EMAIL>> <https://hanxiao.github.io>
import multiprocessing
import os
import random
import sys
import threading
import time
from collections import defaultdict
from datetime import datetime
from itertools import chain
from multiprocessing import Process
from multiprocessing.pool import Pool
import numpy as np
import zmq
import zmq.decorators as zmqd
from termcolor import colored
from zmq.utils import jsonapi
from .helper import *
from .http import BertHTTPProxy
from .zmq_decor import multi_socket
from .tf_server import TFServer
from .tf_server import TFSink
from .tf_server import TFSinkJob
from .tf_server import TFWorker
from .tf_server import ServerCmd
__all__ = ['__version__', 'BertServer']
__version__ = '1.10.0'
_tf_ver_ = check_tf_version()
# class ServerCmd:
# terminate = b'TERMINATION'
# show_config = b'SHOW_CONFIG'
# show_status = b'SHOW_STATUS'
# new_job = b'REGISTER'
# data_token = b'TOKENS'
# data_embed = b'EMBEDDINGS'
#
# @staticmethod
# def is_valid(cmd):
# return any(not k.startswith('__') and v == cmd for k, v in vars(ServerCmd).items())
class BertServer(TFServer):
def __init__(self, args):
super().__init__(args)
self.logger = set_logger(colored('VENTILATOR', 'magenta'), args.verbose)
## add new status
self.status_static.update({
'bert_server_version': __version__
})
def create_sink(self, addr_front2sink, config):
return BertSink(self.args, addr_front2sink, config)
def create_worker(self, idx, addr_backend_list, addr_sink, device_id, graph_path, config):
return BertWorker(idx, self.args, addr_backend_list, addr_sink, device_id, graph_path, config)
class BertSink(TFSink):
def __init__(self, args, front_sink_addr, bert_config):
super().__init__(args, front_sink_addr, bert_config)
# self.show_tokens_to_client = args.show_tokens_to_client
# self.max_seq_len = args.max_seq_len
# self.max_position_embeddings = bert_config.max_position_embeddings
# self.fixed_embed_length = args.fixed_embed_length
def create_sink_job(self, args, config):
return BertSinkJob(args, config)
class BertSinkJob(TFSinkJob):
def __init__(self, cmd_args, model_config):
super().__init__(cmd_args, model_config)
# max_seq_len, max_position_embeddings, with_tokens, fixed_embed_length
# self._pending_embeds = []
# self.tokens = []
# self.tokens_ids = []
# self.checksum = 0
# self.final_ndarray = None
# self.progress_tokens = 0 # replace process_extra
# self.progress_embeds = 0 # replace process+
## todo ???优雅一点
self.with_extras = cmd_args.show_tokens_to_client # with_extras
self.max_seq_len_unset = cmd_args.max_seq_len is None
self.max_position_embeddings = model_config.max_position_embeddings
# self.max_effective_len = 0
self.fixed_embed_length = cmd_args.fixed_embed_length
def reset_array_element_shape(self, d_shape):
"""
根据第一条数据的shape,是否需要修改每个返回结果的shape,
:param d_shape:第一条数据不包括batch_size的shape,(seq长度,embedding长度)
:return:
"""
# bert中未设置一致的seq长度,
if self.max_seq_len_unset and len(d_shape) > 1:
# if not set max_seq_len, then we have no choice but set result ndarray to
# [B, max_position_embeddings, dim] and truncate it at the end
d_shape[0] = self.model_config.max_position_embeddings
return d_shape
def post_process(self, final_ndarray):
if self.max_seq_len_unset and not self.fixed_embed_length:
# https://zhuanlan.zhihu.com/p/59767914
x = np.ascontiguousarray(final_ndarray[:, 0:self.max_effective_len])
else:
x = self.final_ndarray
return x
class BertWorker(TFWorker):
def __init__(self, id, args, worker_address_list, sink_address, device_id, graph_path, graph_config):
super().__init__(id, args, worker_address_list, sink_address, device_id, graph_path, graph_config)
# self.worker_id = id
# self.device_id = device_id
# self.logger = set_logger(colored('WORKER-%d' % self.worker_id, 'yellow'), args.verbose)
# self.max_seq_len = args.max_seq_len
# self.do_lower_case = args.do_lower_case
# self.mask_cls_sep = args.mask_cls_sep
# self.daemon = True
# self.exit_flag = multiprocessing.Event()
# self.worker_address = worker_address_list
# self.num_concurrent_socket = len(self.worker_address)
# self.sink_address = sink_address
# self.prefetch_size = args.prefetch_size if self.device_id > 0 else None # set to zero for CPU-worker
# self.gpu_memory_fraction = args.gpu_memory_fraction
# self.model_dir = args.model_dir
# self.verbose = args.verbose
# self.graph_path = graph_path
# self.bert_config = graph_config
# self.use_fp16 = args.fp16
# self.show_tokens_to_client = args.show_tokens_to_client
# self.no_special_token = args.no_special_token
# self.is_ready = multiprocessing.Event()
def init(self):
from .bert.tokenization import FullTokenizer
from .bert.extract_features import convert_lst_to_features
self.convert_lst_to_features = convert_lst_to_features
self.tokenizer = FullTokenizer(vocab_file=os.path.join(self.model_dir, 'vocab.txt'),
do_lower_case=self.do_lower_case)
def to_features(self, client_id, raw_msg, extra_params, logger, sink):
msg = jsonapi.loads(raw_msg)
# check if msg is a list of list, if yes consider the input is already tokenized
is_tokenized = all(isinstance(el, list) for el in msg)
tmp_f = list(self.convert_lst_to_features(msg, self.max_seq_len,
self.max_position_embeddings,
self.tokenizer, logger,
is_tokenized, self.mask_cls_sep, self.no_special_token))
if self.show_tokens_to_client:
sink.send_multipart([client_id, jsonapi.dumps([f.tokens for f in tmp_f]),
b'', ServerCmd.extra_data_single])
return {
'client_id': client_id,
'input_ids': [f.input_ids for f in tmp_f],
'input_mask': [f.input_mask for f in tmp_f],
'input_type_ids': [f.input_type_ids for f in tmp_f]
}
def get_tf_feature_inputs(self, tf):
return {'input_ids': {'shape': (None, None), 'type': tf.int32},
'input_mask': {'shape': (None, None), 'type': tf.int32},
'input_type_ids': {'shape': (None, None), 'type': tf.int32}}
def get_tf_feature_output(self, tf):
return "final_encodes"
#
# def close(self):
# self.logger.info('shutting down...')
# self.exit_flag.set()
# self.is_ready.clear()
# self.terminate()
# self.join()
# self.logger.info('terminated!')
#
# def get_estimator(self, tf):
# from tensorflow.python.estimator.estimator import Estimator
# from tensorflow.python.estimator.run_config import RunConfig
# from tensorflow.python.estimator.model_fn import EstimatorSpec
#
# def model_fn(features, labels, mode, params):
# with tf.gfile.GFile(self.graph_path, 'rb') as f:
# graph_def = tf.GraphDef()
# graph_def.ParseFromString(f.read())
#
# input_names = ['input_ids', 'input_mask', 'input_type_ids']
#
# output = tf.import_graph_def(graph_def,
# input_map={k + ':0': features[k] for k in input_names},
# return_elements=['final_encodes:0'])
#
# return EstimatorSpec(mode=mode, predictions={
# 'client_id': features['client_id'],
# 'encodes': output[0]
# })
#
# config = tf.ConfigProto(device_count={'GPU': 0 if self.device_id < 0 else 1})
# config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
# config.log_device_placement = False
# # session-wise XLA doesn't seem to work on tf 1.10
# # if args.xla:
# # config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
#
# return Estimator(model_fn=model_fn, config=RunConfig(session_config=config))
#
# def run(self):
# self._run()
#
# @zmqd.socket(zmq.PUSH)
# @zmqd.socket(zmq.PUSH)
# @multi_socket(zmq.PULL, num_socket='num_concurrent_socket')
# def _run(self, sink_embed, sink_token, *receivers):
# # Windows does not support logger in MP environment, thus get a new logger
# # inside the process for better compatibility
# logger = set_logger(colored('WORKER-%d' % self.worker_id, 'yellow'), self.verbose)
#
# logger.info('use device %s, load graph from %s' %
# ('cpu' if self.device_id < 0 else ('gpu: %d' % self.device_id), self.graph_path))
#
# tf = import_tf(self.device_id, self.verbose, use_fp16=self.use_fp16)
# estimator = self.get_estimator(tf)
#
# for sock, addr in zip(receivers, self.worker_address):
# sock.connect(addr)
#
# sink_embed.connect(self.sink_address)
# sink_token.connect(self.sink_address)
# for r in estimator.predict(self.input_fn_builder(receivers, tf, sink_token), yield_single_examples=False):
# send_ndarray(sink_embed, r['client_id'], r['encodes'], ServerCmd.data_embed)
# logger.info('job done\tsize: %s\tclient: %s' % (r['encodes'].shape, r['client_id']))
#
# def input_fn_builder(self, socks, tf, sink):
# from .bert.extract_features import convert_lst_to_features
# from .bert.tokenization import FullTokenizer
#
# def gen():
# # Windows does not support logger in MP environment, thus get a new logger
# # inside the process for better compatibility
# logger = set_logger(colored('WORKER-%d' % self.worker_id, 'yellow'), self.verbose)
# tokenizer = FullTokenizer(vocab_file=os.path.join(self.model_dir, 'vocab.txt'),
# do_lower_case=self.do_lower_case)
#
# poller = zmq.Poller()
# for sock in socks:
# poller.register(sock, zmq.POLLIN)
#
# logger.info('ready and listening!')
# self.is_ready.set()
#
# while not self.exit_flag.is_set():
# events = dict(poller.poll())
# for sock_idx, sock in enumerate(socks):
# if sock in events:
# client_id, raw_msg = sock.recv_multipart()
# msg = jsonapi.loads(raw_msg)
# logger.info('new job\tsocket: %d\tsize: %d\tclient: %s' % (sock_idx, len(msg), client_id))
# # check if msg is a list of list, if yes consider the input is already tokenized
# is_tokenized = all(isinstance(el, list) for el in msg)
# tmp_f = list(convert_lst_to_features(msg, self.max_seq_len,
# self.bert_config.max_position_embeddings,
# tokenizer, logger,
# is_tokenized, self.mask_cls_sep, self.no_special_token))
# if self.show_tokens_to_client:
# sink.send_multipart([client_id, jsonapi.dumps([f.tokens for f in tmp_f]),
# b'', ServerCmd.data_token])
# yield {
# 'client_id': client_id,
# 'input_ids': [f.input_ids for f in tmp_f],
# 'input_mask': [f.input_mask for f in tmp_f],
# 'input_type_ids': [f.input_type_ids for f in tmp_f]
# }
#
# def input_fn():
# return (tf.data.Dataset.from_generator(
# gen,
# output_types={'input_ids': tf.int32,
# 'input_mask': tf.int32,
# 'input_type_ids': tf.int32,
# 'client_id': tf.string},
# output_shapes={
# 'client_id': (),
# 'input_ids': (None, None),
# 'input_mask': (None, None),
# 'input_type_ids': (None, None)}).prefetch(self.prefetch_size))
#
# return input_fn
#
# class ServerStatistic:
# def __init__(self):
# self._hist_client = CappedHistogram(500)
# self._hist_msg_len = defaultdict(int)
# self._client_last_active_time = CappedHistogram(500)
# self._num_data_req = 0
# self._num_sys_req = 0
# self._num_total_seq = 0
# self._last_req_time = time.perf_counter()
# self._last_two_req_interval = []
# self._num_last_two_req = 200
#
# def update(self, request):
# client, msg, req_id, msg_len = request
# self._hist_client[client] += 1
# if ServerCmd.is_valid(msg):
# self._num_sys_req += 1
# # do not count for system request, as they are mainly for heartbeats
# else:
# self._hist_msg_len[int(msg_len)] += 1
# self._num_total_seq += int(msg_len)
# self._num_data_req += 1
# tmp = time.perf_counter()
# self._client_last_active_time[client] = tmp
# if len(self._last_two_req_interval) < self._num_last_two_req:
# self._last_two_req_interval.append(tmp - self._last_req_time)
# else:
# self._last_two_req_interval.pop(0)
# self._last_req_time = tmp
#
# @property
# def value(self):
# def get_min_max_avg(name, stat, avg=None):
# if len(stat) > 0:
# avg = sum(stat) / len(stat) if avg is None else avg
# min_, max_ = min(stat), max(stat)
# return {
# 'avg_%s' % name: avg,
# 'min_%s' % name: min_,
# 'max_%s' % name: max_,
# 'num_min_%s' % name: sum(v == min_ for v in stat),
# 'num_max_%s' % name: sum(v == max_ for v in stat),
# }
# else:
# return {}
#
# def get_num_active_client(interval=180):
# # we count a client active when its last request is within 3 min.
# now = time.perf_counter()
# return sum(1 for v in self._client_last_active_time.values() if (now - v) < interval)
#
# avg_msg_len = None
# if len(self._hist_msg_len) > 0:
# avg_msg_len = sum(k * v for k, v in self._hist_msg_len.items()) / sum(self._hist_msg_len.values())
#
# parts = [{
# 'num_data_request': self._num_data_req,
# 'num_total_seq': self._num_total_seq,
# 'num_sys_request': self._num_sys_req,
# 'num_total_request': self._num_data_req + self._num_sys_req,
# 'num_total_client': self._hist_client.total_size(),
# 'num_active_client': get_num_active_client()},
# self._hist_client.get_stat_map('request_per_client'),
# get_min_max_avg('size_per_request', self._hist_msg_len.keys(), avg=avg_msg_len),
# get_min_max_avg('last_two_interval', self._last_two_req_interval),
# get_min_max_avg('request_per_second', [1. / v for v in self._last_two_req_interval]),
# ]
#
# return {k: v for d in parts for k, v in d.items()}
| [
"termcolor.colored",
"zmq.utils.jsonapi.dumps",
"os.path.join",
"numpy.ascontiguousarray",
"zmq.utils.jsonapi.loads"
] | [((5715, 5737), 'zmq.utils.jsonapi.loads', 'jsonapi.loads', (['raw_msg'], {}), '(raw_msg)\n', (5728, 5737), False, 'from zmq.utils import jsonapi\n'), ((1294, 1326), 'termcolor.colored', 'colored', (['"""VENTILATOR"""', '"""magenta"""'], {}), "('VENTILATOR', 'magenta')\n", (1301, 1326), False, 'from termcolor import colored\n'), ((3790, 3854), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['final_ndarray[:, 0:self.max_effective_len]'], {}), '(final_ndarray[:, 0:self.max_effective_len])\n', (3810, 3854), True, 'import numpy as np\n'), ((5509, 5550), 'os.path.join', 'os.path.join', (['self.model_dir', '"""vocab.txt"""'], {}), "(self.model_dir, 'vocab.txt')\n", (5521, 5550), False, 'import os\n'), ((6308, 6348), 'zmq.utils.jsonapi.dumps', 'jsonapi.dumps', (['[f.tokens for f in tmp_f]'], {}), '([f.tokens for f in tmp_f])\n', (6321, 6348), False, 'from zmq.utils import jsonapi\n')] |
import os
if not os.path.exists('AdsorptionParameters.py'):
os.symlink('../AdsorptionParameters.py', 'AdsorptionParameters.py')
import matplotlib.pyplot as plt
import asap3.nanoparticle_mc.langmuirExpression as le
import numpy as np
#Test the CO Plot now
#Get one coverage for each CN for each temperature at 1mbar
x = np.linspace(0.1,800,100)
covs = np.zeros((len(x),16))
for i in range(len(x)):
covs[i] = le.getCoverages(T=x[i],P=1E2,species="AuCO")
CN_4 = [c[4] for c in covs]
CN_6 = [c[6] for c in covs]
CN_8 = [c[8] for c in covs]
CN_9 = [c[9] for c in covs]
CN_12 = [c[12] for c in covs]
plt.plot(x,CN_4,label="CN 4")
plt.plot(x,CN_6,label="CN 6")
plt.plot(x,CN_8,label="CN 8")
plt.plot(x,CN_9,label="CN 9")
plt.plot(x,CN_12,label="CN 12")
plt.legend(loc=3)
plt.ylim([0,1.1])
plt.xlabel("Temperature[K]",fontsize=20)
plt.ylabel("Coverage",fontsize=20)
plt.show()
| [
"os.path.exists",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.symlink",
"numpy.linspace",
"asap3.nanoparticle_mc.langmuirExpression.getCoverages",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((324, 350), 'numpy.linspace', 'np.linspace', (['(0.1)', '(800)', '(100)'], {}), '(0.1, 800, 100)\n', (335, 350), True, 'import numpy as np\n'), ((602, 633), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'CN_4'], {'label': '"""CN 4"""'}), "(x, CN_4, label='CN 4')\n", (610, 633), True, 'import matplotlib.pyplot as plt\n'), ((632, 663), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'CN_6'], {'label': '"""CN 6"""'}), "(x, CN_6, label='CN 6')\n", (640, 663), True, 'import matplotlib.pyplot as plt\n'), ((662, 693), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'CN_8'], {'label': '"""CN 8"""'}), "(x, CN_8, label='CN 8')\n", (670, 693), True, 'import matplotlib.pyplot as plt\n'), ((692, 723), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'CN_9'], {'label': '"""CN 9"""'}), "(x, CN_9, label='CN 9')\n", (700, 723), True, 'import matplotlib.pyplot as plt\n'), ((722, 755), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'CN_12'], {'label': '"""CN 12"""'}), "(x, CN_12, label='CN 12')\n", (730, 755), True, 'import matplotlib.pyplot as plt\n'), ((754, 771), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(3)'}), '(loc=3)\n', (764, 771), True, 'import matplotlib.pyplot as plt\n'), ((772, 790), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.1]'], {}), '([0, 1.1])\n', (780, 790), True, 'import matplotlib.pyplot as plt\n'), ((790, 831), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperature[K]"""'], {'fontsize': '(20)'}), "('Temperature[K]', fontsize=20)\n", (800, 831), True, 'import matplotlib.pyplot as plt\n'), ((831, 866), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Coverage"""'], {'fontsize': '(20)'}), "('Coverage', fontsize=20)\n", (841, 866), True, 'import matplotlib.pyplot as plt\n'), ((866, 876), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (874, 876), True, 'import matplotlib.pyplot as plt\n'), ((17, 58), 'os.path.exists', 'os.path.exists', (['"""AdsorptionParameters.py"""'], {}), "('AdsorptionParameters.py')\n", (31, 58), False, 'import os\n'), ((61, 128), 'os.symlink', 'os.symlink', (['"""../AdsorptionParameters.py"""', '"""AdsorptionParameters.py"""'], {}), "('../AdsorptionParameters.py', 'AdsorptionParameters.py')\n", (71, 128), False, 'import os\n'), ((413, 461), 'asap3.nanoparticle_mc.langmuirExpression.getCoverages', 'le.getCoverages', ([], {'T': 'x[i]', 'P': '(100.0)', 'species': '"""AuCO"""'}), "(T=x[i], P=100.0, species='AuCO')\n", (428, 461), True, 'import asap3.nanoparticle_mc.langmuirExpression as le\n')] |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from queue import Queue
from moviepy.editor import VideoFileClip
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
rgb_imgshow = []
# Make a list of calibration images
images = glob.glob('./camera_cal/calibration*.jpg')
total_no_of_images = len(images)
print("total_no_of_images : ",total_no_of_images)
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#cv2.imshow('Original image',fname)
#cv2.waitkey(1000)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#rgb_imgshow.append(rgb_img)
#plt.imshow(rgb_img)
#plt.title("Output Corners"+str(i+1))
#plt.show()
#axes.imshow(img)
#cv2.imshow('img',img)
#cv2.waitKey(500)
cv2.destroyAllWindows()
img = cv2.imread('./camera_cal/calibration1.jpg')
img_size = (img.shape[1], img.shape[0])
# Find the Camera calibration Once given object points, image points(derived
#above line 1-32) and the shape of the grayscale image:
ret, mtx, dist, rvesc, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
# Above function return distortion coffecient(dist), cameera matrix (mtx)
# and rotation and translation vector in real world and these information will be used
# in section bellow to correct the distortion of an image
undistort = cv2.undistort(img, mtx, dist, None, mtx)
img_test1 = cv2.imread("./test_images_ll/straight_lines1.jpg")
img_test2 = cv2.imread("./test_images_ll/test1.jpg")
img_test3 = cv2.imread("./test_images_ll/test2.jpg")
img_test4 = cv2.imread("./test_images_ll/test3.jpg")
img_test5 = cv2.imread("./test_images_ll/test4.jpg")
img_test6 = cv2.imread("./test_images_ll/test5.jpg")
img_test7 = cv2.imread("./test_images_ll/test6.jpg")
undistorted_imgs = []
test_images=[img_test1,img_test2,img_test3,img_test4,img_test5,img_test6,img_test7]
print("Distortion correction on test images")
def undistort(img):
return cv2.undistort(img,mtx,dist, None, mtx)
'''
warper - Transform the perspective of a given image using the source and destination points.
'''
def warper(img, src, dst):
# Compute and apply perpective transform
img_size = (img.shape[1], img.shape[0])
#Transform matrix
M = cv2.getPerspectiveTransform(src, dst)
#Inverse transform matrix
Minv = cv2.getPerspectiveTransform(dst, src)
# keep same size as input image
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_NEAREST)
return warped, M, Minv
Warped_images = []
offset=200
height, width= img_test1.shape[0], img_test1.shape[1]
src=np.float32([(593,450),(700,450),(1200,700),(200,700)]) #top_left, top_right, bottom_right,bottom_left
dst=np.float32([(offset,0),(width-offset,0),(width-offset,height),(offset,height)]) #top_left, top_right,bottom_right,bottom_left
def channel_threshold(img,thresh):
img = img*(255/np.max(img))
# 2) Apply a threshold to the L channel
binary_output = np.zeros_like(img)
binary_output[(img > thresh[0]) & (img <= thresh[1])] = 1
return binary_output
def Custom_channel(img):
imgY=cv2.cvtColor(img,cv2.COLOR_RGB2YCrCb)[:,:,0] # Y channel
imgCr=cv2.cvtColor(img,cv2.COLOR_RGB2YCrCb)[:,:,1] #Cr channel
imgHL_S=cv2.cvtColor(img,cv2.COLOR_RGB2HLS)[:,:,2] #S channel
imgLab_L=cv2.cvtColor(img,cv2.COLOR_RGB2Lab)[:,:,0] #L channel
return imgY, imgCr, imgHL_S, imgLab_L
def sobel_image(img, orient='x', thresh_min=0, thresh_max=255, convert=True):
gray= img
if(convert):
gray= cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobel=None
if(orient=='x'):
sobel= cv2.Sobel(gray, cv2.CV_64F, 1,0)
else:
sobel= cv2.Sobel(gray, cv2.CV_64F, 0,1)
sobel_abs= np.absolute(sobel)
sobel_8bit= np.uint8(255* sobel_abs/np.max(sobel_abs))
binary_output= np.zeros_like(sobel_8bit)
binary_output[(sobel_8bit>=thresh_min) & (thresh_max>=sobel_8bit)]=1
return binary_output
def locate_lines(binary_warped, nwindows = 9, margin = 100, minpix = 50):
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[int(binary_warped.shape[0]/2):,:], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
if len(leftx) != 0:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) != 0:
right_fit = np.polyfit(righty, rightx, 2)
return left_fit, right_fit,left_lane_inds, right_lane_inds, nonzerox, nonzeroy
def locate_line_further(left_fit, right_fit, binary_warped):
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 50
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin))
& (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin))
& (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
if len(leftx) != 0:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) != 0:
right_fit = np.polyfit(righty, rightx, 2)
return left_fit, right_fit
def visulizeLanes(left_fit, right_fit, left_lane_inds, right_lane_inds, binary_warped, nonzerox, nonzeroy, margin = 100):
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
plt.imshow(result)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
def radius_curvature(binary_warped, left_fit, right_fit):
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
y_eval = np.max(ploty)
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curvature = ((1 + (2*left_fit_cr[0] *y_eval*ym_per_pix + left_fit_cr[1])**2) **1.5) / np.absolute(2*left_fit_cr[0])
right_curvature = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Calculate vehicle center
#left_lane and right lane bottom in pixels
left_lane_bottom = left_fit[0]*(y_eval**2) + left_fit[1]*y_eval + left_fit[2]
right_lane_bottom = right_fit[0]*(y_eval**2) + right_fit[1]*y_eval + right_fit[2]
# Lane center as mid of left and right lane bottom
lane_center = (left_lane_bottom + right_lane_bottom)/2.
center_image = 640
center = (lane_center - center_image)*xm_per_pix #Convert to meters
position = "left" if center < 0 else "right"
center = "Vehicle is {:.2f}m {}".format(center, position)
# Now our radius of curvature is in meters
return left_curvature, right_curvature, center
def draw_on_image(dist, warped_img, left_fit, right_fit, M, left_curvature, right_curvature, center, show_values = False):
ploty = np.linspace(0, warped_img.shape[0]-1, warped_img.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
y,x = warped_img.shape
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped_img).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
Minv = np.linalg.inv(M)
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (x,y))
# Combine the result with the original image
result = cv2.addWeighted(dist, 1, newwarp, 0.3, 0)
cv2.putText(result, 'Left curvature: {:.0f} m'.format(left_curvature), (50, 50), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 2)
cv2.putText(result, 'Right curvature: {:.0f} m'.format(right_curvature), (50, 100), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 2)
cv2.putText(result, '{}'.format(center), (50, 150), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 2)
if show_values == True:
fig, ax = plt.subplots(figsize=(20, 10))
ax.imshow(result)
return result
img = cv2.cvtColor(img_test1, cv2.COLOR_BGR2RGB)
def is_lane_valid(left_fit, right_fit):
#Check if left and right fit returned a value
if len(left_fit) ==0 or len(right_fit) == 0:
status = False
else:
#Check distance b/w lines
ploty = np.linspace(0, 20, num=10 )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
delta_lines = np.mean(right_fitx - left_fitx)
if delta_lines >= 150 and delta_lines <=430:
status = True
else:
status = False
# Calculate slope of left and right lanes at midpoint of y (i.e. 360)
left = 2*left_fit[0]*360+left_fit[1]
right = 2*right_fit[0]*360+right_fit[1]
delta_slope_mid = np.abs(left-right)
#Check if lines are parallel at the middle
if delta_slope_mid <= 0.1:
status = True
else:
status = False
return status
# Define a class to receive the characteristics of each line detection
class Lane():
def __init__(self):
self.last_left = None
self.last_right = None
self.left_fit = None
self.right_fit = None
self.counter = 0
self.reset_counter = 0
lane = Lane()
def find_lanes(img):
#img = undistort(img, mtx, dist)
#combined_binary = combined_s_gradient_thresholds(img)
#warped_img, M = transform_image(combined_binary, nx, ny)
undist_img = undistort(img)
# Do a perspective Transform
warp_img, M, Minv = warper(undist_img, src, dst)
imgY, imgCr, imgHL_S, imgLab_L= Custom_channel(warp_img)
Ybinary= channel_threshold(imgY,(215,255))
Crbinary= channel_threshold(imgCr,(215,255))
Lbinary= channel_threshold(imgLab_L,(215,255))
Sbinary= channel_threshold(imgHL_S,(200,255))
combined1 = np.zeros_like(imgY)
combined1[(Crbinary==1)|(Ybinary==1)|((Lbinary==1)&(Sbinary==1))]=1
#axes.imshow(combined1,cmap='gray')
#axes.set_title('Channels Combined')
l_channel= cv2.cvtColor(warp_img, cv2.COLOR_RGB2HLS)[:,:,1]
l_binary = np.zeros_like(l_channel)
l_binary[(l_channel >= 120) & (l_channel <= 255)] = 1
#axes[0,1].imshow(l_binary,cmap='gray')
#axes[0,1].set_title('HLS-L Channel')
image_S_channel= cv2.cvtColor(warp_img, cv2.COLOR_RGB2HLS)[:,:,2]
convert=False
sobelx_image= sobel_image(image_S_channel,'x', 15,60, convert)
#axes[0,2].imshow(sobelx_image,cmap='gray')
#axes[0,2].set_title('Sobel X')
combined_YCrSL = np.zeros_like(sobelx_image)
#combined_YCrSL[((Crbinary==1)|(Ybinary==1)|((Lbinary==1)& (Sbinary == 1) )|(sobelx_image==1))& (l_binary == 1)]=1
combined_YCrSL[((Sbinary == 1) | (sobelx_image == 1)) & (l_binary == 1) ] = 1
if lane.counter == 0:
lane.left_fit, lane.right_fit,left_lane_inds, right_lane_inds, nonzerox, nonzeroy = locate_lines(combined_YCrSL)
else:
lane.left_fit, lane.right_fit = locate_line_further(lane.left_fit, lane.right_fit, combined_YCrSL)
#Sanity check
status = is_lane_valid(lane.left_fit, lane.right_fit)
if status == True:
lane.last_left, lane.last_right = lane.left_fit, lane.right_fit
lane.counter += 1
lane.reset_counter = 0
else:
#Reset
if lane.reset_counter > 4:
lane.left_fit, lane.right_fit,left_lane_inds, right_lane_inds, nonzerox, nonzeroy = locate_lines(combined_YCrSL)
print(lane.left_fit)
lane.reset_counter = 0
else:
lane.left_fit, lane.right_fit = lane.last_left, lane.last_right
lane.reset_counter += 1
return combined_YCrSL, lane.left_fit, lane.right_fit, M
def pipeline(img, show_values=False):
warped_img, left_fit, right_fit, M = find_lanes(img)
left_curvature, right_curvature, center = radius_curvature(warped_img, left_fit, right_fit)
return draw_on_image(img, warped_img, left_fit, right_fit, M, left_curvature, right_curvature, center, show_values)
def process_image(img):
return pipeline(img) | [
"cv2.rectangle",
"numpy.hstack",
"numpy.polyfit",
"numpy.array",
"cv2.warpPerspective",
"cv2.destroyAllWindows",
"cv2.calibrateCamera",
"cv2.findChessboardCorners",
"matplotlib.pyplot.imshow",
"numpy.mean",
"matplotlib.pyplot.plot",
"cv2.undistort",
"numpy.max",
"cv2.addWeighted",
"numpy... | [((262, 294), 'numpy.zeros', 'np.zeros', (['(6 * 9, 3)', 'np.float32'], {}), '((6 * 9, 3), np.float32)\n', (270, 294), True, 'import numpy as np\n'), ((574, 616), 'glob.glob', 'glob.glob', (['"""./camera_cal/calibration*.jpg"""'], {}), "('./camera_cal/calibration*.jpg')\n", (583, 616), False, 'import glob\n'), ((1576, 1599), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1597, 1599), False, 'import cv2\n'), ((1611, 1654), 'cv2.imread', 'cv2.imread', (['"""./camera_cal/calibration1.jpg"""'], {}), "('./camera_cal/calibration1.jpg')\n", (1621, 1654), False, 'import cv2\n'), ((1866, 1929), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'img_size', 'None', 'None'], {}), '(objpoints, imgpoints, img_size, None, None)\n', (1885, 1929), False, 'import cv2\n'), ((2168, 2208), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (2181, 2208), False, 'import cv2\n'), ((2222, 2272), 'cv2.imread', 'cv2.imread', (['"""./test_images_ll/straight_lines1.jpg"""'], {}), "('./test_images_ll/straight_lines1.jpg')\n", (2232, 2272), False, 'import cv2\n'), ((2286, 2326), 'cv2.imread', 'cv2.imread', (['"""./test_images_ll/test1.jpg"""'], {}), "('./test_images_ll/test1.jpg')\n", (2296, 2326), False, 'import cv2\n'), ((2340, 2380), 'cv2.imread', 'cv2.imread', (['"""./test_images_ll/test2.jpg"""'], {}), "('./test_images_ll/test2.jpg')\n", (2350, 2380), False, 'import cv2\n'), ((2394, 2434), 'cv2.imread', 'cv2.imread', (['"""./test_images_ll/test3.jpg"""'], {}), "('./test_images_ll/test3.jpg')\n", (2404, 2434), False, 'import cv2\n'), ((2448, 2488), 'cv2.imread', 'cv2.imread', (['"""./test_images_ll/test4.jpg"""'], {}), "('./test_images_ll/test4.jpg')\n", (2458, 2488), False, 'import cv2\n'), ((2502, 2542), 'cv2.imread', 'cv2.imread', (['"""./test_images_ll/test5.jpg"""'], {}), "('./test_images_ll/test5.jpg')\n", (2512, 2542), False, 'import cv2\n'), ((2556, 2596), 'cv2.imread', 'cv2.imread', (['"""./test_images_ll/test6.jpg"""'], {}), "('./test_images_ll/test6.jpg')\n", (2566, 2596), False, 'import cv2\n'), ((3460, 3521), 'numpy.float32', 'np.float32', (['[(593, 450), (700, 450), (1200, 700), (200, 700)]'], {}), '([(593, 450), (700, 450), (1200, 700), (200, 700)])\n', (3470, 3521), True, 'import numpy as np\n'), ((3570, 3665), 'numpy.float32', 'np.float32', (['[(offset, 0), (width - offset, 0), (width - offset, height), (offset, height)]'], {}), '([(offset, 0), (width - offset, 0), (width - offset, height), (\n offset, height)])\n', (3580, 3665), True, 'import numpy as np\n'), ((14975, 15017), 'cv2.cvtColor', 'cv2.cvtColor', (['img_test1', 'cv2.COLOR_BGR2RGB'], {}), '(img_test1, cv2.COLOR_BGR2RGB)\n', (14987, 15017), False, 'import cv2\n'), ((796, 813), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (806, 813), False, 'import cv2\n'), ((832, 869), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (844, 869), False, 'import cv2\n'), ((989, 1034), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(9, 6)', 'None'], {}), '(gray, (9, 6), None)\n', (1014, 1034), False, 'import cv2\n'), ((2789, 2829), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (2802, 2829), False, 'import cv2\n'), ((3097, 3134), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (3124, 3134), False, 'import cv2\n'), ((3180, 3217), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dst', 'src'], {}), '(dst, src)\n', (3207, 3217), False, 'import cv2\n'), ((3271, 3333), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', 'img_size'], {'flags': 'cv2.INTER_NEAREST'}), '(img, M, img_size, flags=cv2.INTER_NEAREST)\n', (3290, 3333), False, 'import cv2\n'), ((3841, 3859), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (3854, 3859), True, 'import numpy as np\n'), ((4640, 4658), 'numpy.absolute', 'np.absolute', (['sobel'], {}), '(sobel)\n', (4651, 4658), True, 'import numpy as np\n'), ((4739, 4764), 'numpy.zeros_like', 'np.zeros_like', (['sobel_8bit'], {}), '(sobel_8bit)\n', (4752, 4764), True, 'import numpy as np\n'), ((5323, 5353), 'numpy.int', 'np.int', (['(histogram.shape[0] / 2)'], {}), '(histogram.shape[0] / 2)\n', (5329, 5353), True, 'import numpy as np\n'), ((5370, 5401), 'numpy.argmax', 'np.argmax', (['histogram[:midpoint]'], {}), '(histogram[:midpoint])\n', (5379, 5401), True, 'import numpy as np\n'), ((5520, 5561), 'numpy.int', 'np.int', (['(binary_warped.shape[0] / nwindows)'], {}), '(binary_warped.shape[0] / nwindows)\n', (5526, 5561), True, 'import numpy as np\n'), ((5688, 5708), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (5696, 5708), True, 'import numpy as np\n'), ((5725, 5745), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (5733, 5745), True, 'import numpy as np\n'), ((7841, 7871), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (7855, 7871), True, 'import numpy as np\n'), ((7895, 7926), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (7909, 7926), True, 'import numpy as np\n'), ((8550, 8570), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (8558, 8570), True, 'import numpy as np\n'), ((8587, 8607), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (8595, 8607), True, 'import numpy as np\n'), ((9729, 9795), 'numpy.linspace', 'np.linspace', (['(0)', '(binary_warped.shape[0] - 1)', 'binary_warped.shape[0]'], {}), '(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n', (9740, 9795), True, 'import numpy as np\n'), ((10119, 10141), 'numpy.zeros_like', 'np.zeros_like', (['out_img'], {}), '(out_img)\n', (10132, 10141), True, 'import numpy as np\n'), ((10729, 10778), 'numpy.hstack', 'np.hstack', (['(left_line_window1, left_line_window2)'], {}), '((left_line_window1, left_line_window2))\n', (10738, 10778), True, 'import numpy as np\n'), ((11028, 11079), 'numpy.hstack', 'np.hstack', (['(right_line_window1, right_line_window2)'], {}), '((right_line_window1, right_line_window2))\n', (11037, 11079), True, 'import numpy as np\n'), ((11282, 11329), 'cv2.addWeighted', 'cv2.addWeighted', (['out_img', '(1)', 'window_img', '(0.3)', '(0)'], {}), '(out_img, 1, window_img, 0.3, 0)\n', (11297, 11329), False, 'import cv2\n'), ((11335, 11353), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (11345, 11353), True, 'import matplotlib.pyplot as plt\n'), ((11359, 11401), 'matplotlib.pyplot.plot', 'plt.plot', (['left_fitx', 'ploty'], {'color': '"""yellow"""'}), "(left_fitx, ploty, color='yellow')\n", (11367, 11401), True, 'import matplotlib.pyplot as plt\n'), ((11407, 11450), 'matplotlib.pyplot.plot', 'plt.plot', (['right_fitx', 'ploty'], {'color': '"""yellow"""'}), "(right_fitx, ploty, color='yellow')\n", (11415, 11450), True, 'import matplotlib.pyplot as plt\n'), ((11456, 11473), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1280)'], {}), '(0, 1280)\n', (11464, 11473), True, 'import matplotlib.pyplot as plt\n'), ((11479, 11495), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(720)', '(0)'], {}), '(720, 0)\n', (11487, 11495), True, 'import matplotlib.pyplot as plt\n'), ((11582, 11648), 'numpy.linspace', 'np.linspace', (['(0)', '(binary_warped.shape[0] - 1)', 'binary_warped.shape[0]'], {}), '(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n', (11593, 11648), True, 'import numpy as np\n'), ((11994, 12007), 'numpy.max', 'np.max', (['ploty'], {}), '(ploty)\n', (12000, 12007), True, 'import numpy as np\n'), ((12082, 12139), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(left_fitx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, left_fitx * xm_per_pix, 2)\n', (12092, 12139), True, 'import numpy as np\n'), ((12156, 12214), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(right_fitx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, right_fitx * xm_per_pix, 2)\n', (12166, 12214), True, 'import numpy as np\n'), ((13376, 13436), 'numpy.linspace', 'np.linspace', (['(0)', '(warped_img.shape[0] - 1)', 'warped_img.shape[0]'], {}), '(0, warped_img.shape[0] - 1, warped_img.shape[0])\n', (13387, 13436), True, 'import numpy as np\n'), ((13734, 13778), 'numpy.dstack', 'np.dstack', (['(warp_zero, warp_zero, warp_zero)'], {}), '((warp_zero, warp_zero, warp_zero))\n', (13743, 13778), True, 'import numpy as np\n'), ((14020, 14052), 'numpy.hstack', 'np.hstack', (['(pts_left, pts_right)'], {}), '((pts_left, pts_right))\n', (14029, 14052), True, 'import numpy as np\n'), ((14174, 14190), 'numpy.linalg.inv', 'np.linalg.inv', (['M'], {}), '(M)\n', (14187, 14190), True, 'import numpy as np\n'), ((14297, 14342), 'cv2.warpPerspective', 'cv2.warpPerspective', (['color_warp', 'Minv', '(x, y)'], {}), '(color_warp, Minv, (x, y))\n', (14316, 14342), False, 'import cv2\n'), ((14407, 14448), 'cv2.addWeighted', 'cv2.addWeighted', (['dist', '(1)', 'newwarp', '(0.3)', '(0)'], {}), '(dist, 1, newwarp, 0.3, 0)\n', (14422, 14448), False, 'import cv2\n'), ((16968, 16987), 'numpy.zeros_like', 'np.zeros_like', (['imgY'], {}), '(imgY)\n', (16981, 16987), True, 'import numpy as np\n'), ((17231, 17255), 'numpy.zeros_like', 'np.zeros_like', (['l_channel'], {}), '(l_channel)\n', (17244, 17255), True, 'import numpy as np\n'), ((17687, 17714), 'numpy.zeros_like', 'np.zeros_like', (['sobelx_image'], {}), '(sobelx_image)\n', (17700, 17714), True, 'import numpy as np\n'), ((1233, 1285), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['img', '(9, 6)', 'corners', 'ret'], {}), '(img, (9, 6), corners, ret)\n', (1258, 1285), False, 'import cv2\n'), ((1314, 1350), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1326, 1350), False, 'import cv2\n'), ((3994, 4032), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2YCrCb'], {}), '(img, cv2.COLOR_RGB2YCrCb)\n', (4006, 4032), False, 'import cv2\n'), ((4062, 4100), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2YCrCb'], {}), '(img, cv2.COLOR_RGB2YCrCb)\n', (4074, 4100), False, 'import cv2\n'), ((4132, 4168), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (4144, 4168), False, 'import cv2\n'), ((4200, 4236), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2Lab'], {}), '(img, cv2.COLOR_RGB2Lab)\n', (4212, 4236), False, 'import cv2\n'), ((4427, 4464), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (4439, 4464), False, 'import cv2\n'), ((4525, 4558), 'cv2.Sobel', 'cv2.Sobel', (['gray', 'cv2.CV_64F', '(1)', '(0)'], {}), '(gray, cv2.CV_64F, 1, 0)\n', (4534, 4558), False, 'import cv2\n'), ((4585, 4618), 'cv2.Sobel', 'cv2.Sobel', (['gray', 'cv2.CV_64F', '(0)', '(1)'], {}), '(gray, cv2.CV_64F, 0, 1)\n', (4594, 4618), False, 'import cv2\n'), ((5421, 5452), 'numpy.argmax', 'np.argmax', (['histogram[midpoint:]'], {}), '(histogram[midpoint:])\n', (5430, 5452), True, 'import numpy as np\n'), ((6092, 6148), 'numpy.dstack', 'np.dstack', (['(binary_warped, binary_warped, binary_warped)'], {}), '((binary_warped, binary_warped, binary_warped))\n', (6101, 6148), True, 'import numpy as np\n'), ((6705, 6805), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xleft_low, win_y_low)', '(win_xleft_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xleft_low, win_y_low), (win_xleft_high,\n win_y_high), (0, 255, 0), 2)\n', (6718, 6805), False, 'import cv2\n'), ((6815, 6917), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xright_low, win_y_low)', '(win_xright_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xright_low, win_y_low), (win_xright_high,\n win_y_high), (0, 255, 0), 2)\n', (6828, 6917), False, 'import cv2\n'), ((8230, 8257), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (8240, 8257), True, 'import numpy as np\n'), ((8305, 8334), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (8315, 8334), True, 'import numpy as np\n'), ((9397, 9424), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (9407, 9424), True, 'import numpy as np\n'), ((9472, 9501), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (9482, 9501), True, 'import numpy as np\n'), ((10040, 10096), 'numpy.dstack', 'np.dstack', (['(binary_warped, binary_warped, binary_warped)'], {}), '((binary_warped, binary_warped, binary_warped))\n', (10049, 10096), True, 'import numpy as np\n'), ((11161, 11185), 'numpy.int_', 'np.int_', (['[left_line_pts]'], {}), '([left_line_pts])\n', (11168, 11185), True, 'import numpy as np\n'), ((11229, 11254), 'numpy.int_', 'np.int_', (['[right_line_pts]'], {}), '([right_line_pts])\n', (11236, 11254), True, 'import numpy as np\n'), ((12358, 12389), 'numpy.absolute', 'np.absolute', (['(2 * left_fit_cr[0])'], {}), '(2 * left_fit_cr[0])\n', (12369, 12389), True, 'import numpy as np\n'), ((12485, 12517), 'numpy.absolute', 'np.absolute', (['(2 * right_fit_cr[0])'], {}), '(2 * right_fit_cr[0])\n', (12496, 12517), True, 'import numpy as np\n'), ((14134, 14148), 'numpy.int_', 'np.int_', (['[pts]'], {}), '([pts])\n', (14141, 14148), True, 'import numpy as np\n'), ((14879, 14909), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (14891, 14909), True, 'import matplotlib.pyplot as plt\n'), ((15257, 15283), 'numpy.linspace', 'np.linspace', (['(0)', '(20)'], {'num': '(10)'}), '(0, 20, num=10)\n', (15268, 15283), True, 'import numpy as np\n'), ((15464, 15495), 'numpy.mean', 'np.mean', (['(right_fitx - left_fitx)'], {}), '(right_fitx - left_fitx)\n', (15471, 15495), True, 'import numpy as np\n'), ((15835, 15855), 'numpy.abs', 'np.abs', (['(left - right)'], {}), '(left - right)\n', (15841, 15855), True, 'import numpy as np\n'), ((17166, 17207), 'cv2.cvtColor', 'cv2.cvtColor', (['warp_img', 'cv2.COLOR_RGB2HLS'], {}), '(warp_img, cv2.COLOR_RGB2HLS)\n', (17178, 17207), False, 'import cv2\n'), ((17425, 17466), 'cv2.cvtColor', 'cv2.cvtColor', (['warp_img', 'cv2.COLOR_RGB2HLS'], {}), '(warp_img, cv2.COLOR_RGB2HLS)\n', (17437, 17466), False, 'import cv2\n'), ((3762, 3773), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (3768, 3773), True, 'import numpy as np\n'), ((4700, 4717), 'numpy.max', 'np.max', (['sobel_abs'], {}), '(sobel_abs)\n', (4706, 4717), True, 'import numpy as np\n'), ((13673, 13698), 'numpy.zeros_like', 'np.zeros_like', (['warped_img'], {}), '(warped_img)\n', (13686, 13698), True, 'import numpy as np\n'), ((7617, 7650), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (7624, 7650), True, 'import numpy as np\n'), ((7740, 7774), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (7747, 7774), True, 'import numpy as np\n'), ((10533, 10571), 'numpy.vstack', 'np.vstack', (['[left_fitx - margin, ploty]'], {}), '([left_fitx - margin, ploty])\n', (10542, 10571), True, 'import numpy as np\n'), ((10828, 10867), 'numpy.vstack', 'np.vstack', (['[right_fitx - margin, ploty]'], {}), '([right_fitx - margin, ploty])\n', (10837, 10867), True, 'import numpy as np\n'), ((13891, 13920), 'numpy.vstack', 'np.vstack', (['[left_fitx, ploty]'], {}), '([left_fitx, ploty])\n', (13900, 13920), True, 'import numpy as np\n'), ((10631, 10669), 'numpy.vstack', 'np.vstack', (['[left_fitx + margin, ploty]'], {}), '([left_fitx + margin, ploty])\n', (10640, 10669), True, 'import numpy as np\n'), ((10928, 10967), 'numpy.vstack', 'np.vstack', (['[right_fitx + margin, ploty]'], {}), '([right_fitx + margin, ploty])\n', (10937, 10967), True, 'import numpy as np\n'), ((13974, 14004), 'numpy.vstack', 'np.vstack', (['[right_fitx, ploty]'], {}), '([right_fitx, ploty])\n', (13983, 14004), True, 'import numpy as np\n')] |
import numpy as np
import cv2
circle_directions = ('north', 'north-northeast', 'northeast', 'east-northeast', 'east', 'east-southeast',
'southeast', 'south-southeast', 'south', 'south-southwest', 'southwest', 'west-southwest',
'west', 'west-northwest', 'northwest', 'north-northwest')
class Ellipse:
def __init__(self):
# Ellipse parameters
self.radius_size = (25, 90)
self.radius_validate_threshold = 40
self.min_area_size = 30*30
self.rotate_semicircle = False
self.angle_rotate = 30
self.black_color = 0
self.white_color = 255
self.lin = self.col = 0
self.image = None
def search_ellipse(self, image, contours):
self.image = image
self.lin, self.col = image.shape
center = rad = 0
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
center = (x + int(w / 2), y + int(h / 2))
radius = []
if x < self.lin and y < self.col:
for direction in circle_directions:
radio = self.calculate_radius(center=center, direction=direction)
radius.append(radio)
if self.validate_radius(radius):
rad = int(np.array(radius).max())
break
return center, rad
def calculate_radius(self, center, direction):
x, y = center
radius = 0
white_verification = self.image[y, x] == self.white_color
while (1 < x < self.col-2) and (1 < y < self.lin-2):
x, y = self.calculating_coordinates((x, y), direction)
if white_verification:
if self.image[y, x] != self.white_color:
white_verification = False
else:
if self.image[y, x] != self.black_color:
radius = self.calc_radius(center, (x, y))
break
return radius
def validate_radius(self, radius):
validate = [1 for x in radius if x in range(self.radius_size[1])[slice(*self.radius_size)]]
return np.sum(validate) >= int((len(circle_directions) * self.radius_validate_threshold / 100))
@staticmethod
def calc_radius(center, position):
if position[0] > center[0]:
return abs(position[0] - center[0])
else:
return abs(position[1] - center[1])
@staticmethod
def calculating_coordinates(coordinates, direction):
x, y = coordinates
for i in range(str(direction).count('north')):
y -= 1
for i in range(str(direction).count('south')):
y += 1
for i in range(str(direction).count('east')):
x += 1
for i in range(str(direction).count('west')):
x -= 1
return x, y
| [
"numpy.sum",
"numpy.array",
"cv2.boundingRect"
] | [((904, 929), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (920, 929), False, 'import cv2\n'), ((2161, 2177), 'numpy.sum', 'np.sum', (['validate'], {}), '(validate)\n', (2167, 2177), True, 'import numpy as np\n'), ((1314, 1330), 'numpy.array', 'np.array', (['radius'], {}), '(radius)\n', (1322, 1330), True, 'import numpy as np\n')] |
from ..utils import constants, utils
import folium
from folium.plugins import HeatMap
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import shapely
from geojson import LineString
import geopandas as gpd
import json
import warnings
STACKLEVEL = 2
# COLOR = {
# 0: '#FF0000', # Red
# 1: '#008000', # Green
# 2: '#000080', # Navy
# 3: '#800000', # Maroon
# 4: '#FFD700', # Gold
# 5: '#00FF00', # Lime
# 6: '#800080', # Purple
# 7: '#00FFFF', # Aqua
# 8: '#DC143C', # Crimson
# 9: '#0000FF', # Blue
# 10: '#F08080', # LightCoral
# 11: '#FF00FF', # Fuchsia
# 12: '#FF8C00', # DarkOrange
# 13: '#6A5ACD', # SlateBlue
# 14: '#8B4513', # SaddleBrown
# 15: '#1E90FF', # DodgerBlue
# 16: '#FFFF00', # Yellow
# 17: '#808080', # Gray
# 18: '#008080', # Teal
# 19: '#9370DB', # MediumPurple
# 20: '#2F4F4F' # DarkSlateGray
# }
# PALETTE FOR COLOR BLINDNESS
# from <http://mkweb.bcgsc.ca/colorblind/palettes.mhtml>
COLOR = {
0: '#6A0213',
1: '#008607',
2: '#F60239',
3: '#00E307',
4: '#FFDC3D',
5: '#003C86',
6: '#9400E6',
7: '#009FFA',
8: '#FF71FD',
9: '#7CFFFA',
10: '#68023F',
11: '#008169',
12: '#EF0096',
13: '#00DCB5',
14: '#FFCFE2'
}
def get_color(k=-2, color_dict=COLOR):
"""
Return a color (gray if k == -1, random if k < -1)
"""
if k < -1:
return np.random.choice(list(color_dict.values())) # color_dict[random.randint(0,20)]
elif k == -1:
return '#808080' # Gray
else:
return color_dict[k % len(color_dict)]
def random_hex():
r = lambda: np.random.randint(0,255)
return '#%02X%02X%02X' % (r(),r(),r())
traj_style_function = lambda weight, color, opacity, dashArray: \
(lambda feature: dict(color=color, weight=weight, opacity=opacity, dashArray=dashArray))
def plot_trajectory(tdf, map_f=None, max_users=None, max_points=1000, style_function=traj_style_function,
tiles='cartodbpositron', zoom=12, hex_color=None, weight=2, opacity=0.75, dashArray='0, 0',
start_end_markers=True, control_scale=True):
"""
:param tdf: TrajDataFrame
TrajDataFrame to be plotted.
:param map_f: folium.Map
`folium.Map` object where the trajectory will be plotted. If `None`, a new map will be created.
:param max_users: int
maximum number of users whose trajectories should be plotted.
:param max_points: int
maximum number of points per user to plot.
If necessary, a user's trajectory will be down-sampled to have at most `max_points` points.
:param style_function: lambda function
function specifying the style (weight, color, opacity) of the GeoJson object.
:param tiles: str
folium's `tiles` parameter.
:param zoom: int
initial zoom.
:param hex_color: str
hex color of the trajectory line. If `None` a random color will be generated for each trajectory.
:param weight: float
thickness of the trajectory line.
:param opacity: float
opacity (alpha level) of the trajectory line.
:param dashArray: str
style of the trajectory line: '0, 0' for a solid trajectory line, '5, 5' for a dashed line
(where dashArray='size of segment, size of spacing').
:param start_end_markers: bool
add markers on the start and end points of the trajectory.
:param control_scale: bool
if `True`, add scale information in the bottom left corner of the visualization. The default is `True`.
Returns
-------
`folium.Map` object with the plotted trajectories.
"""
if max_users is None:
max_users = 10
warnings.warn("Only the trajectories of the first 10 users will be plotted. Use the argument `max_users` to specify the desired number of users, or filter the TrajDataFrame.", stacklevel=STACKLEVEL)
# group by user and keep only the first `max_users`
nu = 0
try:
# column 'uid' is present in the TrajDataFrame
groups = tdf.groupby(constants.UID)
except KeyError:
# column 'uid' is not present
groups = [[None, tdf]]
warned = False
for user, df in groups:
if nu >= max_users:
break
nu += 1
traj = df[[constants.LONGITUDE, constants.LATITUDE]]
if max_points is None:
di = 1
else:
if not warned:
warnings.warn("If necessary, trajectories will be down-sampled to have at most `max_points` points. To avoid this, specify `max_points=None`.", stacklevel=STACKLEVEL)
warned = True
di = max(1, len(traj) // max_points)
traj = traj[::di]
if nu == 1 and map_f is None:
# initialise map
center = list(np.median(traj, axis=0)[::-1])
map_f = folium.Map(location=center, zoom_start=zoom, tiles=tiles, control_scale=control_scale)
trajlist = traj.values.tolist()
line = LineString(trajlist)
if hex_color is None:
color = get_color(-2)
else:
color = hex_color
tgeojson = folium.GeoJson(line,
name='tgeojson',
style_function=style_function(weight, color, opacity, dashArray)
)
tgeojson.add_to(map_f)
if start_end_markers:
dtime, la, lo = df.loc[df['datetime'].idxmin()]\
[[constants.DATETIME, constants.LATITUDE, constants.LONGITUDE]].values
dtime = dtime.strftime('%Y/%m/%d %H:%M')
mker = folium.Marker(trajlist[0][::-1], icon=folium.Icon(color='green'))
popup = folium.Popup('<i>Start</i><BR>{}<BR>Coord: <a href="https://www.google.co.uk/maps/place/{},{}" target="_blank">{}, {}</a>'.\
format(dtime, la, lo, np.round(la, 4), np.round(lo, 4)), max_width=300)
mker = mker.add_child(popup)
mker.add_to(map_f)
dtime, la, lo = df.loc[df['datetime'].idxmax()]\
[[constants.DATETIME, constants.LATITUDE, constants.LONGITUDE]].values
dtime = dtime.strftime('%Y/%m/%d %H:%M')
mker = folium.Marker(trajlist[-1][::-1], icon=folium.Icon(color='red'))
popup = folium.Popup('<i>End</i><BR>{}<BR>Coord: <a href="https://www.google.co.uk/maps/place/{},{}" target="_blank">{}, {}</a>'.\
format(dtime, la, lo, np.round(la, 4), np.round(lo, 4)), max_width=300)
mker = mker.add_child(popup)
mker.add_to(map_f)
return map_f
def plot_points_heatmap(tdf, map_f=None, max_points=1000,
tiles='cartodbpositron', zoom=2,
min_opacity=0.5, radius=25, blur=15,
gradient=None):
"""
Plot the points in a trajectories on a Folium map.
Parameters
----------
map_f : folium.Map, optional
a `folium.Map` object where the trajectory will be plotted. If `None`, a new map will be created. The default is `None`.
max_points : int, optional
maximum number of points per individual to plot. The default is `1000`. If necessary, an individual's trajectory will be down-sampled to have at most `max_points` points.
tiles : str, optional
folium's `tiles` parameter. The default is 'cartodbpositron'.
zoom : int, optional
the initial zoom on the map. The default is `2`.
min_opacity : float, optional
the minimum opacity (alpha level) the heat will start at. The default is `0.5`.
radius : int, optional
radius of each "point" of the heatmap. The default is `25`.
blur : int, optional
amount of blur. The default is blur 15.
gradient : dict, optional
color gradient configuration, e.g. {0.4: ‘blue’, 0.65: ‘lime’, 1: ‘red’}. The default is `None`.
Returns
-------
folium.Map
a `folium.Map` object with the plotted trajectories.
Examples
--------
>>> import skmob
>>> import pandas as pd
>>> # read the trajectory data (GeoLife, Beijing, China)
>>> url = 'https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/geolife_sample.txt.gz'
>>> df = pd.read_csv(url, sep=',', compression='gzip')
>>> tdf = skmob.TrajDataFrame(df, latitude='lat', longitude='lon', user_id='user', datetime='datetime')
>>> print(tdf.head())
lat lng datetime uid
0 39.984094 116.319236 2008-10-23 05:53:05 1
1 39.984198 116.319322 2008-10-23 05:53:06 1
2 39.984224 116.319402 2008-10-23 05:53:11 1
3 39.984211 116.319389 2008-10-23 05:53:16 1
4 39.984217 116.319422 2008-10-23 05:53:21 1
>>> m = tdf.plot_points_heatmap(zoom=12, opacity=0.9, tiles='Stamen Toner')
>>> m
"""
if max_points is None:
di = 1
else:
di = max(1, len(tdf) // max_points)
traj = tdf[::di]
traj = traj[[constants.LATITUDE, constants.LONGITUDE]]
if map_f is None:
center = list(np.median(traj[[constants.LONGITUDE, constants.LATITUDE]], axis=0)[::-1])
map_f = folium.Map(zoom_start=zoom, tiles=tiles, control_scale=True, location=center)
HeatMap(traj.values,
min_opacity=min_opacity, radius=radius,
blur=blur, gradient=gradient).add_to(map_f)
return map_f
def plot_stops(stdf, map_f=None, max_users=None, tiles='cartodbpositron', zoom=12,
hex_color=None, opacity=0.3, radius=12, number_of_sides=4, popup=True, control_scale=True):
"""
:param stdf: TrajDataFrame
Requires a TrajDataFrame with stops or clusters, output of `preprocessing.detection.stops`
or `preprocessing.clustering.cluster`. The column `constants.LEAVING_DATETIME` must be present.
:param map_f: folium.Map
`folium.Map` object where the stops will be plotted. If `None`, a new map will be created.
:param max_users: int
maximum number of users whose stops should be plotted.
:param tiles: str
folium's `tiles` parameter.
:param zoom: int
initial zoom.
:param hex_color: str
hex color of the stop markers. If `None` a random color will be generated for each user.
:param opacity: float
opacity (alpha level) of the stop makers.
:param radius: float
size of the markers.
:param number_of_sides: int
number of sides of the markers.
:param popup: bool
if `True`, when clicking on a marker a popup window displaying information on the stop will appear.
:param control_scale: bool
if `True`, add scale information in the bottom left corner of the visualization. The default is `True`.
Returns
-------
`folium.Map` object with the plotted stops.
"""
if max_users is None:
max_users = 10
warnings.warn("Only the stops of the first 10 users will be plotted. Use the argument `max_users` to specify the desired number of users, or filter the TrajDataFrame.", stacklevel=STACKLEVEL)
if map_f is None:
# initialise map
lo_la = stdf[['lng', 'lat']].values
center = list(np.median(lo_la, axis=0)[::-1])
map_f = folium.Map(location=center, zoom_start=zoom, tiles=tiles, control_scale=control_scale)
# group by user and keep only the first `max_users`
nu = 0
try:
# column 'uid' is present in the TrajDataFrame
groups = stdf.groupby(constants.UID)
except KeyError:
# column 'uid' is not present
groups = [[None, stdf]]
for user, df in groups:
if nu >= max_users:
break
nu += 1
if hex_color is None:
color = get_color(-2)
else:
color = hex_color
for idx, row in df.iterrows():
la = row[constants.LATITUDE]
lo = row[constants.LONGITUDE]
t0 = row[constants.DATETIME]
try:
t1 = row[constants.LEAVING_DATETIME]
_number_of_sides = number_of_sides
marker_radius = radius
except KeyError:
t1 = t0
_number_of_sides = number_of_sides
marker_radius = radius // 2
u = user
try:
ncluster = row[constants.CLUSTER]
cl = '<BR>Cluster: {}'.format(ncluster)
color = get_color(ncluster)
except (KeyError, NameError):
cl = ''
fpoly = folium.RegularPolygonMarker([la, lo],
radius=marker_radius,
color=color,
fill_color=color,
fill_opacity=opacity,
number_of_sides=_number_of_sides
)
if popup:
popup = folium.Popup('User: {}<BR>Coord: <a href="https://www.google.co.uk/maps/place/{},{}" target="_blank">{}, {}</a><BR>Arr: {}<BR>Dep: {}{}' \
.format(u, la, lo, np.round(la, 4), np.round(lo, 4),
t0.strftime('%Y/%m/%d %H:%M'),
t1.strftime('%Y/%m/%d %H:%M'), cl), max_width=300)
fpoly = fpoly.add_child(popup)
fpoly.add_to(map_f)
return map_f
def plot_diary(cstdf, user, start_datetime=None, end_datetime=None, ax=None, legend=False):
"""
Plot a mobility diary of an individual in a TrajDataFrame. It requires a TrajDataFrame with clusters, output of `preprocessing.clustering.cluster`. The column `constants.CLUSTER` must be present.
Parameters
----------
user : str or int
user identifier whose diary should be plotted.
start_datetime : datetime.datetime, optional
only stops made after this date will be plotted. If `None` the datetime of the oldest stop will be selected. The default is `None`.
end_datetime : datetime.datetime, optional
only stops made before this date will be plotted. If `None` the datetime of the newest stop will be selected. The default is `None`.
ax : matplotlib.axes, optional
axes where the diary will be plotted. If `None` a new ax is created. The default is `None`.
legend : bool, optional
If `True`, legend with cluster IDs is shown. The default is `False`.
Returns
-------
matplotlib.axes
the `matplotlib.axes` object of the plotted diary.
"""
if ax is None:
fig, ax = plt.subplots(figsize=(20, 2))
if user is None:
df = cstdf
else:
df = cstdf[cstdf[constants.UID] == user]
if len(df) == 0:
raise KeyError("""User id is not in the input TrajDataFrame.""")
# TODO: add warning if days between start_datetime and end_datetime do not overlap with cstdf
if start_datetime is None:
start_datetime = df[constants.DATETIME].min()
elif type(start_datetime) is str:
start_datetime = pd.to_datetime(start_datetime)
if end_datetime is None:
end_datetime = df[constants.LEAVING_DATETIME].max()
elif type(end_datetime) is str:
end_datetime = pd.to_datetime(end_datetime)
current_labels = []
for idx, row in df.iterrows():
t0 = row[constants.DATETIME]
t1 = row[constants.LEAVING_DATETIME]
cl = row[constants.CLUSTER]
color = get_color(cl)
if start_datetime <= t0 <= end_datetime:
if cl in current_labels:
ax.axvspan(t0.to_pydatetime(), t1.to_pydatetime(), lw=0.0, alpha=0.75, color=color)
else:
current_labels += [cl]
ax.axvspan(t0.to_pydatetime(), t1.to_pydatetime(), lw=0.0, alpha=0.75, color=color, label=cl)
plt.xlim(start_datetime, end_datetime)
if legend:
handles, labels_str = ax.get_legend_handles_labels()
labels = list(map(int, labels_str))
# sort them by labels
import operator
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, ncol=15, bbox_to_anchor=(1., -0.2), frameon=0)
ax.set_title('user %s' % user)
return ax
flow_style_function = lambda weight, color, opacity, weight_factor, flow_exp: \
(lambda feature: dict(color=color, weight=weight_factor * weight ** flow_exp, opacity=opacity)) #, dashArray='5, 5'))
def plot_flows(fdf, map_f=None, min_flow=0, tiles='cartodbpositron', zoom=6, flow_color='red', opacity=0.5,
flow_weight=5, flow_exp=0.5, style_function=flow_style_function,
flow_popup=False, num_od_popup=5, tile_popup=True, radius_origin_point=5,
color_origin_point='#3186cc', control_scale=True):
"""
:param fdf: FlowDataFrame
`FlowDataFrame` to visualize.
:param map_f: folium.Map
`folium.Map` object where the flows will be plotted. If `None`, a new map will be created.
:param min_flow: float
only flows larger than `min_flow` will be plotted.
:param tiles: str
folium's `tiles` parameter.
:param zoom: int
initial zoom.
:param flow_color: str
color of the flow edges
:param opacity: float
opacity (alpha level) of the flow edges.
:param flow_weight: float
weight factor used in the function to compute the thickness of the flow edges.
:param flow_exp: float
weight exponent used in the function to compute the thickness of the flow edges.
:param style_function: lambda function
GeoJson style function.
:param flow_popup: bool
if `True`, when clicking on a flow edge a popup window displaying information on the flow will appear.
:param num_od_popup: int
number of origin-destination pairs to show in the popup window of each origin location.
:param tile_popup: bool
if `True`, when clicking on a location marker a popup window displaying information on the flows
departing from that location will appear.
:param radius_origin_point: float
size of the location markers.
:param color_origin_point: str
color of the location markers.
:param control_scale: bool
if `True`, add scale information in the bottom left corner of the visualization. The default is `True`.
Returns
-------
`folium.Map` object with the plotted flows.
"""
if map_f is None:
# initialise map
lon, lat = np.mean(np.array(list(fdf.tessellation.geometry.apply(utils.get_geom_centroid).values)), axis=0)
map_f = folium.Map(location=[lat,lon], tiles=tiles, zoom_start=zoom, control_scale=control_scale)
mean_flows = fdf[constants.FLOW].mean()
O_groups = fdf.groupby(by=constants.ORIGIN)
for O, OD in O_groups:
geom = fdf.get_geometry(O)
lonO, latO = utils.get_geom_centroid(geom)
for D, T in OD[[constants.DESTINATION, constants.FLOW]].values:
if O == D:
continue
if T < min_flow:
continue
geom = fdf.get_geometry(D)
lonD, latD = utils.get_geom_centroid(geom)
gjc = LineString([(lonO,latO), (lonD,latD)])
fgeojson = folium.GeoJson(gjc,
name='geojson',
style_function = style_function(T / mean_flows, flow_color, opacity,
flow_weight, flow_exp)
)
if flow_popup:
popup = folium.Popup('flow from %s to %s: %s'%(O, D, int(T)), max_width=300)
fgeojson = fgeojson.add_child(popup)
fgeojson.add_to(map_f)
if radius_origin_point > 0:
for O, OD in O_groups:
name = 'origin: %s' % O.replace('\'', '_')
T_D = [[T, D] for D, T in OD[[constants.DESTINATION, constants.FLOW]].values]
trips_info = '<br/>'.join(["flow to %s: %s" %
(dd.replace('\'', '_'), int(tt)) \
for tt, dd in sorted(T_D, reverse=True)[:num_od_popup]])
geom = fdf.get_geometry(O)
lonO, latO = utils.get_geom_centroid(geom)
fmarker = folium.CircleMarker([latO, lonO],
radius=radius_origin_point,
weight=2,
color=color_origin_point,
fill=True, fill_color=color_origin_point
)
if tile_popup:
popup = folium.Popup(name+'<br/>'+trips_info, max_width=300)
fmarker = fmarker.add_child(popup)
fmarker.add_to(map_f)
return map_f
default_style_func_args = {'weight': 1, 'color': 'random', 'opacity': 0.5,
'fillColor': 'random', 'fillOpacity': 0.25, 'radius': 5}
geojson_style_function = lambda weight, color, opacity, fillColor, fillOpacity: \
(lambda feature: dict(weight=weight, color=color, opacity=opacity, fillColor=fillColor,
fillOpacity=fillOpacity))
def manage_colors(color, fillColor):
if color == 'random':
if fillColor == 'random':
color = random_hex()
fillColor = color
else:
color = random_hex()
elif fillColor == 'random':
fillColor = random_hex()
return color, fillColor
def add_to_map(gway, g, map_f, style_func_args, popup_features=[]):
styles = []
for k in ['weight', 'color', 'opacity', 'fillColor', 'fillOpacity', 'radius']:
if k in style_func_args:
if callable(style_func_args[k]):
styles += [style_func_args[k](g)]
else:
styles += [style_func_args[k]]
else:
styles += [default_style_func_args[k]]
weight, color, opacity, fillColor, fillOpacity, radius = styles
color, fillColor = manage_colors(color, fillColor)
if type(gway) == shapely.geometry.multipolygon.MultiPolygon:
# Multipolygon
vertices = [list(zip(*p.exterior.xy)) for p in gway]
gj = folium.GeoJson({"type": "MultiPolygon", "coordinates": [vertices]},
style_function=geojson_style_function(weight=weight, color=color, opacity=opacity,
fillColor=fillColor, fillOpacity=fillOpacity))
elif type(gway) == shapely.geometry.polygon.Polygon:
# Polygon
vertices = list(zip(*gway.exterior.xy))
gj = folium.GeoJson({"type": "Polygon", "coordinates": [vertices]},
style_function=geojson_style_function(weight=weight, color=color, opacity=opacity,
fillColor=fillColor, fillOpacity=fillOpacity))
elif type(gway) == shapely.geometry.multilinestring.MultiLineString:
# MultiLine
vertices = [list(zip(*l.xy)) for l in gway]
gj = folium.GeoJson({"type": "MultiLineString", "coordinates": vertices},
style_function=geojson_style_function(weight=weight, color=color, opacity=opacity,
fillColor=fillColor, fillOpacity=fillOpacity))
elif type(gway) == shapely.geometry.linestring.LineString:
# LineString
vertices = list(zip(*gway.xy))
gj = folium.GeoJson({"type": "LineString", "coordinates": vertices},
style_function=geojson_style_function(weight=weight, color=color, opacity=opacity,
fillColor=fillColor, fillOpacity=fillOpacity))
else:
# Point
point = list(zip(*gway.xy))[0]
# gj = folium.CircleMarker(
gj = folium.Circle(
location=point[::-1],
radius=radius,
color=color, # '#3186cc',
fill=True,
fill_color=fillColor
)
popup = []
for pf in popup_features:
try:
popup += ['%s: %s' % (pf, g[pf])]
except KeyError:
pass
try:
popup = '<br>'.join(popup)
popup += json.dumps(g.tags)
popup = popup.replace("""'""", """_""")
except AttributeError:
pass
if len(popup) > 0:
gj.add_child(folium.Popup(popup, max_width=300))
gj.add_to(map_f)
return map_f
def plot_gdf(gdf, map_f=None, maxitems=-1, style_func_args={}, popup_features=[],
tiles='cartodbpositron', zoom=6, geom_col='geometry', control_scale=True):
"""
:param gdf: GeoDataFrame
GeoDataFrame to visualize.
:param map_f: folium.Map
`folium.Map` object where the GeoDataFrame `gdf` will be plotted. If `None`, a new map will be created.
:param maxitems: int
maximum number of tiles to plot. If `-1`, all tiles will be plotted.
:param style_func_args: dict
dictionary to pass the following style parameters (keys) to the GeoJson style function of the polygons:
'weight', 'color', 'opacity', 'fillColor', 'fillOpacity', 'radius'
:param popup_features: list
when clicking on a tile polygon, a popup window displaying the information in the
columns of `gdf` listed in `popup_features` will appear.
:param tiles: str
folium's `tiles` parameter.
:param zoom: int
initial zoom.
:param geom_col: str
name of the geometry column of `gdf`.
:param control_scale: bool
if `True`, add scale information in the bottom left corner of the visualization. The default is `True`.
Returns
-------
`folium.Map` object with the plotted GeoDataFrame.
"""
if map_f is None:
# initialise map
lon, lat = np.mean(np.array(list(gdf[geom_col].apply(utils.get_geom_centroid).values)), axis=0)
map_f = folium.Map(location=[lat, lon], tiles=tiles, zoom_start=zoom, control_scale=control_scale)
count = 0
for k in gdf.index:
g = gdf.loc[k]
if type(g[geom_col]) == gpd.geoseries.GeoSeries:
for i in range(len(g[geom_col])):
map_f = add_to_map(g[geom_col].iloc[i], g.iloc[i], map_f,
popup_features=popup_features,
style_func_args=style_func_args)
else:
map_f = add_to_map(g[geom_col], g, map_f,
popup_features=popup_features,
style_func_args=style_func_args)
count += 1
if count == maxitems:
break
return map_f
| [
"geojson.LineString",
"numpy.median",
"folium.Icon",
"numpy.round",
"folium.Circle",
"json.dumps",
"folium.RegularPolygonMarker",
"operator.itemgetter",
"folium.Map",
"numpy.random.randint",
"folium.CircleMarker",
"folium.Popup",
"warnings.warn",
"folium.plugins.HeatMap",
"matplotlib.pyp... | [((16118, 16156), 'matplotlib.pyplot.xlim', 'plt.xlim', (['start_datetime', 'end_datetime'], {}), '(start_datetime, end_datetime)\n', (16126, 16156), True, 'import matplotlib.pyplot as plt\n'), ((1695, 1720), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (1712, 1720), True, 'import numpy as np\n'), ((3795, 4003), 'warnings.warn', 'warnings.warn', (['"""Only the trajectories of the first 10 users will be plotted. Use the argument `max_users` to specify the desired number of users, or filter the TrajDataFrame."""'], {'stacklevel': 'STACKLEVEL'}), "(\n 'Only the trajectories of the first 10 users will be plotted. Use the argument `max_users` to specify the desired number of users, or filter the TrajDataFrame.'\n , stacklevel=STACKLEVEL)\n", (3808, 4003), False, 'import warnings\n'), ((5103, 5123), 'geojson.LineString', 'LineString', (['trajlist'], {}), '(trajlist)\n', (5113, 5123), False, 'from geojson import LineString\n'), ((9342, 9419), 'folium.Map', 'folium.Map', ([], {'zoom_start': 'zoom', 'tiles': 'tiles', 'control_scale': '(True)', 'location': 'center'}), '(zoom_start=zoom, tiles=tiles, control_scale=True, location=center)\n', (9352, 9419), False, 'import folium\n'), ((11080, 11281), 'warnings.warn', 'warnings.warn', (['"""Only the stops of the first 10 users will be plotted. Use the argument `max_users` to specify the desired number of users, or filter the TrajDataFrame."""'], {'stacklevel': 'STACKLEVEL'}), "(\n 'Only the stops of the first 10 users will be plotted. Use the argument `max_users` to specify the desired number of users, or filter the TrajDataFrame.'\n , stacklevel=STACKLEVEL)\n", (11093, 11281), False, 'import warnings\n'), ((11434, 11525), 'folium.Map', 'folium.Map', ([], {'location': 'center', 'zoom_start': 'zoom', 'tiles': 'tiles', 'control_scale': 'control_scale'}), '(location=center, zoom_start=zoom, tiles=tiles, control_scale=\n control_scale)\n', (11444, 11525), False, 'import folium\n'), ((14869, 14898), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 2)'}), '(figsize=(20, 2))\n', (14881, 14898), True, 'import matplotlib.pyplot as plt\n'), ((18978, 19073), 'folium.Map', 'folium.Map', ([], {'location': '[lat, lon]', 'tiles': 'tiles', 'zoom_start': 'zoom', 'control_scale': 'control_scale'}), '(location=[lat, lon], tiles=tiles, zoom_start=zoom, control_scale\n =control_scale)\n', (18988, 19073), False, 'import folium\n'), ((24780, 24798), 'json.dumps', 'json.dumps', (['g.tags'], {}), '(g.tags)\n', (24790, 24798), False, 'import json\n'), ((26484, 26579), 'folium.Map', 'folium.Map', ([], {'location': '[lat, lon]', 'tiles': 'tiles', 'zoom_start': 'zoom', 'control_scale': 'control_scale'}), '(location=[lat, lon], tiles=tiles, zoom_start=zoom, control_scale\n =control_scale)\n', (26494, 26579), False, 'import folium\n'), ((4960, 5051), 'folium.Map', 'folium.Map', ([], {'location': 'center', 'zoom_start': 'zoom', 'tiles': 'tiles', 'control_scale': 'control_scale'}), '(location=center, zoom_start=zoom, tiles=tiles, control_scale=\n control_scale)\n', (4970, 5051), False, 'import folium\n'), ((9424, 9518), 'folium.plugins.HeatMap', 'HeatMap', (['traj.values'], {'min_opacity': 'min_opacity', 'radius': 'radius', 'blur': 'blur', 'gradient': 'gradient'}), '(traj.values, min_opacity=min_opacity, radius=radius, blur=blur,\n gradient=gradient)\n', (9431, 9518), False, 'from folium.plugins import HeatMap\n'), ((12738, 12888), 'folium.RegularPolygonMarker', 'folium.RegularPolygonMarker', (['[la, lo]'], {'radius': 'marker_radius', 'color': 'color', 'fill_color': 'color', 'fill_opacity': 'opacity', 'number_of_sides': '_number_of_sides'}), '([la, lo], radius=marker_radius, color=color,\n fill_color=color, fill_opacity=opacity, number_of_sides=_number_of_sides)\n', (12765, 12888), False, 'import folium\n'), ((15341, 15371), 'pandas.to_datetime', 'pd.to_datetime', (['start_datetime'], {}), '(start_datetime)\n', (15355, 15371), True, 'import pandas as pd\n'), ((15520, 15548), 'pandas.to_datetime', 'pd.to_datetime', (['end_datetime'], {}), '(end_datetime)\n', (15534, 15548), True, 'import pandas as pd\n'), ((19565, 19605), 'geojson.LineString', 'LineString', (['[(lonO, latO), (lonD, latD)]'], {}), '([(lonO, latO), (lonD, latD)])\n', (19575, 19605), False, 'from geojson import LineString\n'), ((20706, 20849), 'folium.CircleMarker', 'folium.CircleMarker', (['[latO, lonO]'], {'radius': 'radius_origin_point', 'weight': '(2)', 'color': 'color_origin_point', 'fill': '(True)', 'fill_color': 'color_origin_point'}), '([latO, lonO], radius=radius_origin_point, weight=2,\n color=color_origin_point, fill=True, fill_color=color_origin_point)\n', (20725, 20849), False, 'import folium\n'), ((24931, 24965), 'folium.Popup', 'folium.Popup', (['popup'], {'max_width': '(300)'}), '(popup, max_width=300)\n', (24943, 24965), False, 'import folium\n'), ((4543, 4719), 'warnings.warn', 'warnings.warn', (['"""If necessary, trajectories will be down-sampled to have at most `max_points` points. To avoid this, specify `max_points=None`."""'], {'stacklevel': 'STACKLEVEL'}), "(\n 'If necessary, trajectories will be down-sampled to have at most `max_points` points. To avoid this, specify `max_points=None`.'\n , stacklevel=STACKLEVEL)\n", (4556, 4719), False, 'import warnings\n'), ((9252, 9318), 'numpy.median', 'np.median', (['traj[[constants.LONGITUDE, constants.LATITUDE]]'], {'axis': '(0)'}), '(traj[[constants.LONGITUDE, constants.LATITUDE]], axis=0)\n', (9261, 9318), True, 'import numpy as np\n'), ((11386, 11410), 'numpy.median', 'np.median', (['lo_la'], {'axis': '(0)'}), '(lo_la, axis=0)\n', (11395, 11410), True, 'import numpy as np\n'), ((16378, 16400), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (16397, 16400), False, 'import operator\n'), ((21108, 21164), 'folium.Popup', 'folium.Popup', (["(name + '<br/>' + trips_info)"], {'max_width': '(300)'}), "(name + '<br/>' + trips_info, max_width=300)\n", (21120, 21164), False, 'import folium\n'), ((4909, 4932), 'numpy.median', 'np.median', (['traj'], {'axis': '(0)'}), '(traj, axis=0)\n', (4918, 4932), True, 'import numpy as np\n'), ((5781, 5807), 'folium.Icon', 'folium.Icon', ([], {'color': '"""green"""'}), "(color='green')\n", (5792, 5807), False, 'import folium\n'), ((6002, 6017), 'numpy.round', 'np.round', (['la', '(4)'], {}), '(la, 4)\n', (6010, 6017), True, 'import numpy as np\n'), ((6019, 6034), 'numpy.round', 'np.round', (['lo', '(4)'], {}), '(lo, 4)\n', (6027, 6034), True, 'import numpy as np\n'), ((6384, 6408), 'folium.Icon', 'folium.Icon', ([], {'color': '"""red"""'}), "(color='red')\n", (6395, 6408), False, 'import folium\n'), ((6601, 6616), 'numpy.round', 'np.round', (['la', '(4)'], {}), '(la, 4)\n', (6609, 6616), True, 'import numpy as np\n'), ((6618, 6633), 'numpy.round', 'np.round', (['lo', '(4)'], {}), '(lo, 4)\n', (6626, 6633), True, 'import numpy as np\n'), ((24390, 24490), 'folium.Circle', 'folium.Circle', ([], {'location': 'point[::-1]', 'radius': 'radius', 'color': 'color', 'fill': '(True)', 'fill_color': 'fillColor'}), '(location=point[::-1], radius=radius, color=color, fill=True,\n fill_color=fillColor)\n', (24403, 24490), False, 'import folium\n'), ((13350, 13365), 'numpy.round', 'np.round', (['la', '(4)'], {}), '(la, 4)\n', (13358, 13365), True, 'import numpy as np\n'), ((13367, 13382), 'numpy.round', 'np.round', (['lo', '(4)'], {}), '(lo, 4)\n', (13375, 13382), True, 'import numpy as np\n')] |
import src as ai
import numpy as np
import wandb
wandb.init(project='lstm-timeseries', config={'version' : 'new-a'})
model = ai.lstm(inshape=1, outshape=1, outactivation=ai.identity(), learningrate=0.01)
Data = np.genfromtxt(r"data/timeseries/airpassenger.csv", dtype=int)
Data = (Data - min(Data)) / (max(Data) - min(Data)).astype(float)
TrainingData = Data[0 : 100, np.newaxis]
TestData = Data[100 :, np.newaxis]
NumOfTrainGen = 15000
NumOfTestGen = len(TestData) - 5
for Generation in range(NumOfTrainGen):
Random = np.random.randint(0, len(TrainingData) - 5)
input = TrainingData[Random : Random + 5]
Out = model.forward(input)
OutError = TrainingData[Random + 1 : Random + 6] - Out
inError = model.backward(OutError)
error = 0
for Generation in range(NumOfTestGen):
input = TestData[Generation : Generation + 5]
Out = model.forward(input)
OutError = TestData[Generation + 1 : Generation + 6] - Out
error += np.sum(OutError)
inError = model.backward(OutError)
print(error / NumOfTestGen)
wandb.log({"test accuracy" : error / NumOfTestGen}) | [
"wandb.log",
"src.identity",
"wandb.init",
"numpy.sum",
"numpy.genfromtxt"
] | [((50, 116), 'wandb.init', 'wandb.init', ([], {'project': '"""lstm-timeseries"""', 'config': "{'version': 'new-a'}"}), "(project='lstm-timeseries', config={'version': 'new-a'})\n", (60, 116), False, 'import wandb\n'), ((214, 274), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/timeseries/airpassenger.csv"""'], {'dtype': 'int'}), "('data/timeseries/airpassenger.csv', dtype=int)\n", (227, 274), True, 'import numpy as np\n'), ((1023, 1073), 'wandb.log', 'wandb.log', (["{'test accuracy': error / NumOfTestGen}"], {}), "({'test accuracy': error / NumOfTestGen})\n", (1032, 1073), False, 'import wandb\n'), ((940, 956), 'numpy.sum', 'np.sum', (['OutError'], {}), '(OutError)\n', (946, 956), True, 'import numpy as np\n'), ((172, 185), 'src.identity', 'ai.identity', ([], {}), '()\n', (183, 185), True, 'import src as ai\n')] |
from numpy.testing import assert_
from alns.Statistics import Statistics
def test_empty_new_statistics():
"""
Tests if a new Statistics object starts empty.
"""
statistics = Statistics()
assert_(len(statistics.objectives) == 0)
def test_collect_objectives():
"""
Tests if a Statistics object properly collects objective values.
"""
statistics = Statistics()
for objective in range(1, 100):
statistics.collect_objective(objective)
assert_(len(statistics.objectives) == objective)
assert_(statistics.objectives[-1] == objective)
| [
"alns.Statistics.Statistics",
"numpy.testing.assert_"
] | [((193, 205), 'alns.Statistics.Statistics', 'Statistics', ([], {}), '()\n', (203, 205), False, 'from alns.Statistics import Statistics\n'), ((386, 398), 'alns.Statistics.Statistics', 'Statistics', ([], {}), '()\n', (396, 398), False, 'from alns.Statistics import Statistics\n'), ((550, 597), 'numpy.testing.assert_', 'assert_', (['(statistics.objectives[-1] == objective)'], {}), '(statistics.objectives[-1] == objective)\n', (557, 597), False, 'from numpy.testing import assert_\n')] |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
# www.biota.com
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import unittest
import tempfile
import pandas as pd
from biom import Table, load_table
from qiime2 import Artifact
from qiime2 import Metadata
from numpy.testing import assert_allclose
from qiime2.plugins.sourcetracker2.actions import gibbs
from sourcetracker._q2._visualizer import (barplot,
assignment_barplot)
class Test_QIIME2_gibbs(unittest.TestCase):
def setUp(self):
# different cli perams to test
# all used in the example section
self.examples = {'example1': {'mapping': 'map.txt',
'restarts': 2,
'draws_per_restart': 3,
'burnin': 10,
'delay': 2,
'loo': False,
'source_sink_column': 'SourceSink',
'source_column_value': 'source',
'sink_column_value': 'sink',
'source_category_column': 'Env',
'sink_rarefaction_depth': 1000,
'source_rarefaction_depth': 1000},
'example2': {'mapping': 'alt-map.txt',
'restarts': 2,
'draws_per_restart': 3,
'burnin': 10,
'delay': 2,
'loo': False,
'source_sink_column': 'source-or-sink',
'source_column_value': 'src',
'sink_column_value': 'snk',
'source_category_column': 'sample-type',
'sink_rarefaction_depth': 1000,
'source_rarefaction_depth': 1000},
'example3': {'mapping': 'map.txt',
'restarts': 2,
'draws_per_restart': 3,
'burnin': 10,
'delay': 2,
'loo': True,
'source_sink_column': 'SourceSink',
'source_column_value': 'source',
'sink_column_value': 'sink',
'source_category_column': 'Env',
'sink_rarefaction_depth': 1000,
'source_rarefaction_depth': 1000},
'example4': {'mapping': 'map.txt',
'restarts': 2,
'draws_per_restart': 3,
'burnin': 25,
'delay': 2,
'loo': False,
'source_sink_column': 'SourceSink',
'source_column_value': 'source',
'sink_column_value': 'sink',
'source_category_column': 'Env',
'sink_rarefaction_depth': 1000,
'source_rarefaction_depth': 1000},
'example5': {'mapping': 'map.txt',
'restarts': 2,
'draws_per_restart': 3,
'burnin': 10,
'delay': 2,
'loo': False,
'source_sink_column': 'SourceSink',
'source_column_value': 'source',
'sink_column_value': 'sink',
'source_category_column': 'Env',
'sink_rarefaction_depth': 1700,
'source_rarefaction_depth': 1500}}
def test_q2_gibbs(self):
"""Tests that the Q2 and standalone gibbs results match.
Also validates against ground truth "expected" results.
"""
crnt_dir = os.path.dirname(os.path.abspath(__file__))
tst_pth = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, os.pardir, os.pardir)
# test the cli for each example peram set
for exmp_i, perams in self.examples.items():
# get the tables input pth and out pth
tbl_pth = os.path.join(tst_pth, 'data/tiny-test/otu_table.biom')
tax_pth = os.path.join(tst_pth, 'data/tiny-test/taxonomy.qza')
mta_pth = os.path.join(
tst_pth, 'data/tiny-test', perams['mapping'])
# import tables
q2table = Artifact.import_data("FeatureTable[Frequency]",
load_table(tbl_pth))
q2tax = Artifact.load(tax_pth)
q2meta = Metadata(pd.read_csv(mta_pth,
sep='\t',
index_col=0))
# Run gemelli through QIIME 2 (specifically, the Artifact API)
# save a few as var to avoid long lines
rs_ = perams['source_rarefaction_depth']
rss_ = perams['sink_rarefaction_depth']
scv_ = perams['source_column_value']
scc_ = perams['source_category_column']
draw_ = perams['draws_per_restart']
ssc_ = perams['source_sink_column']
sincv_ = perams['sink_column_value']
mp, mpstd, fas, fasmf = gibbs(q2table,
q2meta,
loo=perams['loo'],
source_rarefaction_depth=rs_,
sink_rarefaction_depth=rss_,
restarts=perams['restarts'],
draws_per_restart=draw_,
burnin=perams['burnin'],
delay=perams['delay'],
source_sink_column=ssc_,
source_column_value=scv_,
sink_column_value=sincv_,
source_category_column=scc_)
# run prop barplot
with tempfile.TemporaryDirectory() as output_dir:
barplot(output_dir,
mp.view(pd.DataFrame),
q2meta,
scc_)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
# run a per-sink prop
if perams['loo']:
per_ = 'drainwater'
else:
per_ = 's0'
with tempfile.TemporaryDirectory() as output_dir:
assignment_barplot(output_dir,
fas.view(pd.DataFrame),
q2tax.view(pd.DataFrame),
fasmf.view(pd.DataFrame),
per_)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
# Get the underlying data from these artifacts
res_mp = mp.view(Table).to_dataframe().T
# check mixing proportions from cli
exp_pth = os.path.join(crnt_dir,
os.pardir,
os.pardir,
'_cli',
'tests',
'data',
'exp_' + exmp_i,
'mixing_proportions.txt')
exp_mp = pd.read_csv(exp_pth, sep='\t', index_col=0).T
# compare the results
assert_allclose(exp_mp,
res_mp.loc[exp_mp.index,
exp_mp.columns],
atol=.50)
if __name__ == "__main__":
unittest.main()
| [
"biom.load_table",
"tempfile.TemporaryDirectory",
"qiime2.Artifact.load",
"os.path.exists",
"pandas.read_csv",
"numpy.testing.assert_allclose",
"os.path.join",
"unittest.main",
"os.path.abspath",
"qiime2.plugins.sourcetracker2.actions.gibbs"
] | [((8970, 8985), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8983, 8985), False, 'import unittest\n'), ((4888, 4913), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4903, 4913), False, 'import os\n'), ((5230, 5284), 'os.path.join', 'os.path.join', (['tst_pth', '"""data/tiny-test/otu_table.biom"""'], {}), "(tst_pth, 'data/tiny-test/otu_table.biom')\n", (5242, 5284), False, 'import os\n'), ((5307, 5359), 'os.path.join', 'os.path.join', (['tst_pth', '"""data/tiny-test/taxonomy.qza"""'], {}), "(tst_pth, 'data/tiny-test/taxonomy.qza')\n", (5319, 5359), False, 'import os\n'), ((5382, 5440), 'os.path.join', 'os.path.join', (['tst_pth', '"""data/tiny-test"""', "perams['mapping']"], {}), "(tst_pth, 'data/tiny-test', perams['mapping'])\n", (5394, 5440), False, 'import os\n'), ((5640, 5662), 'qiime2.Artifact.load', 'Artifact.load', (['tax_pth'], {}), '(tax_pth)\n', (5653, 5662), False, 'from qiime2 import Artifact\n'), ((6336, 6661), 'qiime2.plugins.sourcetracker2.actions.gibbs', 'gibbs', (['q2table', 'q2meta'], {'loo': "perams['loo']", 'source_rarefaction_depth': 'rs_', 'sink_rarefaction_depth': 'rss_', 'restarts': "perams['restarts']", 'draws_per_restart': 'draw_', 'burnin': "perams['burnin']", 'delay': "perams['delay']", 'source_sink_column': 'ssc_', 'source_column_value': 'scv_', 'sink_column_value': 'sincv_', 'source_category_column': 'scc_'}), "(q2table, q2meta, loo=perams['loo'], source_rarefaction_depth=rs_,\n sink_rarefaction_depth=rss_, restarts=perams['restarts'],\n draws_per_restart=draw_, burnin=perams['burnin'], delay=perams['delay'],\n source_sink_column=ssc_, source_column_value=scv_, sink_column_value=\n sincv_, source_category_column=scc_)\n", (6341, 6661), False, 'from qiime2.plugins.sourcetracker2.actions import gibbs\n'), ((8295, 8412), 'os.path.join', 'os.path.join', (['crnt_dir', 'os.pardir', 'os.pardir', '"""_cli"""', '"""tests"""', '"""data"""', "('exp_' + exmp_i)", '"""mixing_proportions.txt"""'], {}), "(crnt_dir, os.pardir, os.pardir, '_cli', 'tests', 'data', \n 'exp_' + exmp_i, 'mixing_proportions.txt')\n", (8307, 8412), False, 'import os\n'), ((8766, 8841), 'numpy.testing.assert_allclose', 'assert_allclose', (['exp_mp', 'res_mp.loc[exp_mp.index, exp_mp.columns]'], {'atol': '(0.5)'}), '(exp_mp, res_mp.loc[exp_mp.index, exp_mp.columns], atol=0.5)\n', (8781, 8841), False, 'from numpy.testing import assert_allclose\n'), ((4962, 4987), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4977, 4987), False, 'import os\n'), ((5599, 5618), 'biom.load_table', 'load_table', (['tbl_pth'], {}), '(tbl_pth)\n', (5609, 5618), False, 'from biom import Table, load_table\n'), ((5693, 5736), 'pandas.read_csv', 'pd.read_csv', (['mta_pth'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(mta_pth, sep='\\t', index_col=0)\n", (5704, 5736), True, 'import pandas as pd\n'), ((7197, 7226), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7224, 7226), False, 'import tempfile\n'), ((7414, 7452), 'os.path.join', 'os.path.join', (['output_dir', '"""index.html"""'], {}), "(output_dir, 'index.html')\n", (7426, 7452), False, 'import os\n'), ((7674, 7703), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7701, 7703), False, 'import tempfile\n'), ((8015, 8053), 'os.path.join', 'os.path.join', (['output_dir', '"""index.html"""'], {}), "(output_dir, 'index.html')\n", (8027, 8053), False, 'import os\n'), ((8674, 8717), 'pandas.read_csv', 'pd.read_csv', (['exp_pth'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(exp_pth, sep='\\t', index_col=0)\n", (8685, 8717), True, 'import pandas as pd\n'), ((7485, 7509), 'os.path.exists', 'os.path.exists', (['index_fp'], {}), '(index_fp)\n', (7499, 7509), False, 'import os\n'), ((8086, 8110), 'os.path.exists', 'os.path.exists', (['index_fp'], {}), '(index_fp)\n', (8100, 8110), False, 'import os\n')] |
from __future__ import division, print_function
from action_detector_diagnosis import ActionDetectorDiagnosis
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import pandas as pd
import os
from collections import OrderedDict
from utils import interpolated_prec_rec
from matplotlib import gridspec, rc
import matplotlib as mpl
mpl.use('Agg')
params = {'font.family': 'serif','font.serif': 'Times',
'text.usetex': True,
'xtick.major.size': 8,
'ytick.major.size': 8,
'xtick.major.width': 3,
'ytick.major.width': 3,
'mathtext.fontset': 'custom',
}
mpl.rcParams.update(params)
import matplotlib.pyplot as plt
def compute_mAP_N(result,this_cls_pred,this_cls_gt):
ap = np.zeros(len(result.tiou_thresholds))
tp = np.zeros((len(result.tiou_thresholds), len(this_cls_pred)))
fp = np.zeros((len(result.tiou_thresholds), len(this_cls_pred)))
for tidx, tiou in enumerate(result.tiou_thresholds):
fp[tidx,pd.isnull(this_cls_pred[result.matched_gt_id_cols[tidx]]).values] = 1
tp[tidx,~(pd.isnull(this_cls_pred[result.matched_gt_id_cols[tidx]]).values)] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float)
recall_cumsum = tp_cumsum / len(np.unique(this_cls_gt['gt-id']))
precision_cumsum = recall_cumsum * result.average_num_instance_per_class / (recall_cumsum * result.average_num_instance_per_class + fp_cumsum)
for tidx in range(len(result.tiou_thresholds)):
ap[tidx] = interpolated_prec_rec(precision_cumsum[tidx,:], recall_cumsum[tidx,:])
return ap.mean()
# Initialize true positive and false positive vectors.
def compute_average_mAP_N_for_characteristic(sensitivity_analysis, characteristic_name):
gt_by_characteristic = sensitivity_analysis.ground_truth.groupby(characteristic_name)
average_mAP_n_by_characteristic_value = OrderedDict()
for characteristic_value, this_characteristic_gt in gt_by_characteristic:
ap = np.nan*np.zeros(len(sensitivity_analysis.activity_index))
gt_by_cls = this_characteristic_gt.groupby('label')
pred_by_cls = sensitivity_analysis.prediction.groupby('label')
for cls in sensitivity_analysis.activity_index.values():
this_cls_pred = pred_by_cls.get_group(cls).sort_values(by='score',ascending=False)
try:
this_cls_gt = gt_by_cls.get_group(cls)
except:
continue
gt_id_to_keep = np.append(this_cls_gt['gt-id'].values, [np.nan])
for tidx, tiou in enumerate(sensitivity_analysis.tiou_thresholds):
this_cls_pred = this_cls_pred[this_cls_pred[sensitivity_analysis.matched_gt_id_cols[tidx]].isin(gt_id_to_keep)]
ap[cls] = compute_mAP_N(sensitivity_analysis,this_cls_pred,this_cls_gt)
average_mAP_n_by_characteristic_value[characteristic_value] = np.nanmean(ap)
return average_mAP_n_by_characteristic_value
def plot_sensitivity_analysis(sensitivity_analysis, save_filename,
colors=['#7fc97f','#beaed4','#fdc086','#386cb0','#f0027f','#bf5b17'],
characteristic_names=['context-size', 'context-distance', 'agreement','coverage', 'length', 'num-instances'],
characteristic_names_in_text=['Context Size', 'Context Distance', 'Agreement', 'Coverage', 'Length', '\# Instances'],
characteristic_names_delta_positions=[1.1,-1.4,0.25,0.5,1,-0.2],
buckets_order=['0','1','2','3','4','5','6','XW', 'W', 'XS','S', 'N', 'M', 'F', 'Inf', 'L', 'XL', 'H', 'XH'],
figsize=(25,6), fontsize=28, num_grids=4):
average_mAP_N_by_characteristic = OrderedDict()
average_mAP_N_by_characteristic['base'] = sensitivity_analysis.average_mAP
for characteristic_name in characteristic_names:
average_mAP_N_by_characteristic[characteristic_name] = compute_average_mAP_N_for_characteristic(sensitivity_analysis,
characteristic_name)
characteristic_name_lst,bucket_lst = ['base'],['base']
ratio_value_lst = [average_mAP_N_by_characteristic['base']]
for characteristic_name in characteristic_names:
characteristic_name_lst += len(average_mAP_N_by_characteristic[characteristic_name])*[characteristic_name]
bucket_lst += average_mAP_N_by_characteristic[characteristic_name].keys()
ratio_value_lst += average_mAP_N_by_characteristic[characteristic_name].values()
# characteristic-name,bucket,ratio-value
sensitivity_analysis_df = pd.DataFrame({'characteristic-name': characteristic_name_lst,
'bucket': bucket_lst,
'ratio-value': ratio_value_lst,
})
sensitivity_analysis_df['order'] = pd.Categorical(sensitivity_analysis_df['bucket'],
categories=buckets_order,ordered=True)
sensitivity_analysis_df.sort_values(by='order', inplace=True)
sensitivity_analysis_df_by_characteristic_name = sensitivity_analysis_df.groupby('characteristic-name')
base_average_mAP_N = sensitivity_analysis_df_by_characteristic_name.get_group('base')['ratio-value'].values[0]*100
fig = plt.figure(figsize=figsize)
grid = plt.GridSpec(1, num_grids)
ax1=fig.add_subplot(grid[:-1])
current_x_value = 0
xticks_lst,xvalues_lst = [],[]
for char_idx, characteristic_name in enumerate(characteristic_names):
this_sensitivity_analysis = sensitivity_analysis_df_by_characteristic_name.get_group(characteristic_name)
x_values = range(current_x_value, current_x_value + len(this_sensitivity_analysis))
y_values = this_sensitivity_analysis['ratio-value'].values*100
mybars=ax1.bar(x_values, y_values, color=colors[char_idx])
for bari in mybars:
height = bari.get_height()
ax1.text(bari.get_x() + bari.get_width()/2, bari.get_height()+0.025*100, '%.1f' % height,
ha='center', color='black', fontsize=fontsize/1.15)
ax1.annotate(characteristic_names_in_text[char_idx],
xy=(current_x_value+characteristic_names_delta_positions[char_idx],100),
fontsize=fontsize)
if char_idx < len(characteristic_names) - 1:
ax1.axvline(max(x_values)+1, linewidth=1.5, color="gray", linestyle='dotted')
current_x_value = max(x_values) + 2
xticks_lst.extend(this_sensitivity_analysis['bucket'].values.tolist())
xvalues_lst.extend(x_values)
ax1.plot([xvalues_lst[0]- 1, xvalues_lst[-1] + 1],[base_average_mAP_N, base_average_mAP_N], '--', color='k')
ax1.annotate('%.2f' % base_average_mAP_N,xy=(xvalues_lst[-1]-0.5,base_average_mAP_N+0.025*100), fontsize=fontsize/1.15)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.yaxis.grid(True, linestyle='dotted')
ax1.set_axisbelow(True)
ax1.xaxis.set_tick_params(width=0)
ax1.yaxis.set_tick_params(size=10, direction='in', width=2)
for axis in ['bottom','left']:
ax1.spines[axis].set_linewidth(2.5)
plt.xticks(xvalues_lst, xticks_lst, fontsize=fontsize/1.1)
plt.yticks(fontsize=fontsize)
ax1.set_ylabel('Average-mAP$_{N}$ $(\%)$', fontsize=fontsize)
ax1.set_ylim(0,100)
ax1.set_xlim(-1.5,current_x_value-1)
ax2=fig.add_subplot(grid[-1:])
current_x_value = 0
xticks_lst,xvalues_lst = [],[]
min_y_value,max_y_value=np.infty,-np.infty
for char_idx, characteristic_name in enumerate(characteristic_names):
this_sensitivity_analysis = sensitivity_analysis_df_by_characteristic_name.get_group(characteristic_name)
x_values = [current_x_value,current_x_value]
y_values = this_sensitivity_analysis['ratio-value'].values*100
y_values = [min(y_values)/base_average_mAP_N,max(y_values)/base_average_mAP_N]
this_min_y_value,this_max_y_value=min(y_values),max(y_values)
min_y_value,max_y_value=min(min_y_value,this_min_y_value),max(max_y_value,this_max_y_value)
ax2.plot([current_x_value,current_x_value],
[this_min_y_value,this_max_y_value], linestyle='-', marker='_', mew=5, markersize=25,lw=8,color=colors[char_idx])
for i,j in zip(x_values,y_values):
ax2.annotate('%.1f' % j,xy=(i+0.1,j+0.05), fontsize=fontsize/1.1)
current_x_value += 1
xticks_lst += [characteristic_names_in_text[char_idx]]
xvalues_lst += [x_values[0]]
ax2.plot([xvalues_lst[0]- 1, xvalues_lst[-1] + 1],[base_average_mAP_N/base_average_mAP_N, base_average_mAP_N/base_average_mAP_N], '--', color='k',zorder=0)
ax2.annotate('%.2f' % base_average_mAP_N,xy=(xvalues_lst[-1]+0.2,base_average_mAP_N/base_average_mAP_N+0.05), fontsize=fontsize/1.1)
ax2.yaxis.grid(color='gray', linestyle=':',lw=1)
ax2.xaxis.grid(color='gray', linestyle=':',lw=1)
ax2.set_axisbelow(True)
ax2.xaxis.set_tick_params(width=0)
ax2.yaxis.set_tick_params(size=10, direction='in', width=2)
for axis in ['bottom','left']:
ax2.spines[axis].set_linewidth(2.5)
plt.xticks(xvalues_lst, xticks_lst, fontsize=fontsize/1.5, rotation=90)
plt.yticks(fontsize=fontsize)
ax2.set_ylabel('Average-mAP$_{N}$\nRelative Change', fontsize=fontsize)
ax2.set_ylim(min_y_value*0.8,max_y_value*1.2)
plt.tight_layout()
fig.savefig(save_filename,bbox_inches='tight')
print('[Done] Output analysis is saved in %s' % save_filename)
def main(ground_truth_filename, subset, prediction_filename, output_folder, is_thumos14):
if not is_thumos14:
if subset == 'testing':
# ActivityNet testing
characteristic_names_to_bins = {'context-size': (range(-1,7), ['0','1','2','3','4','5','6']),
'context-distance': (range(-1,4), ['Inf','N','M','F']),
'agreement': (np.linspace(0,1.0,6), ['XW','W','M','H','XH']),
'coverage': (np.linspace(0,1.0,6), ['XS','S','M','L','XL']),
'length': (np.array([0,30,60,120,180,np.inf]), ['XS','S','M','L','XL']),
'num-instances': (np.array([-1,1,4,8,np.inf]), ['XS','S','M','L'])}
colors = ['#7fc97f','#beaed4','#fdc086','#386cb0','#f0027f','#bf5b17']
characteristic_names = ['context-size', 'context-distance', 'agreement','coverage', 'length', 'num-instances']
characteristic_names_in_text = ['Context Size', 'Context Distance', 'Agreement', 'Coverage', 'Length', '\# Instances']
characteristic_names_delta_positions = [1.1,-1.4,0.25,0.5,1,-0.2]
figsize = (25,6)
num_grids = 4
elif subset == 'validation':
# ActivityNet validation
characteristic_names_to_bins = {'coverage': (np.linspace(0,1.0,6), ['XS','S','M','L','XL']),
'length': (np.array([0,30,60,120,180,np.inf]), ['XS','S','M','L','XL']),
'num-instances': (np.array([-1,1,4,8,np.inf]), ['XS','S','M','L'])}
colors = ['#386cb0','#f0027f','#bf5b17']
characteristic_names = ['coverage', 'length', 'num-instances']
characteristic_names_in_text = ['Coverage', 'Length', '\# Instances']
characteristic_names_delta_positions = [0.5,1,-0.2]
figsize = (17.5,6)
num_grids = 3
else:
raise RuntimeError('%s is not a valid subset' % subset)
tiou_thresholds = np.linspace(0.5, 0.95, 10)
else:
# THUMOS14
characteristic_names_to_bins = {'coverage': (np.array([0,0.02,0.04,0.06,0.08,1]), ['XS','S','M','L','XL']),
'length': (np.array([0,3,6,12,18,np.inf]), ['XS','S','M','L','XL']),
'num-instances': (np.array([-1,1,40,80,np.inf]), ['XS','S','M','L'])}
colors = ['#386cb0','#f0027f','#bf5b17']
characteristic_names = ['coverage', 'length', 'num-instances']
characteristic_names_in_text = ['Coverage', 'Length', '\# Instances']
characteristic_names_delta_positions = [0.5,1,-0.2]
figsize = (17.5,6)
num_grids = 3
tiou_thresholds = [0.5]
sensitivity_analysis = ActionDetectorDiagnosis(ground_truth_filename=ground_truth_filename,
prediction_filename=prediction_filename,
tiou_thresholds=tiou_thresholds,
limit_factor=None,
min_tiou_thr=0.1,
subset=subset,
verbose=True,
check_status=True,
load_extra_annotations=True,
characteristic_names_to_bins=characteristic_names_to_bins,
normalize_ap=True,
minimum_normalized_precision_threshold_for_detection=0.0
)
sensitivity_analysis.evaluate()
plot_sensitivity_analysis(sensitivity_analysis=sensitivity_analysis,
save_filename=os.path.join(output_folder, 'sensitivity_analysis.pdf'),
colors=colors,
characteristic_names=characteristic_names,
characteristic_names_in_text=characteristic_names_in_text,
characteristic_names_delta_positions=characteristic_names_delta_positions,
figsize=figsize,
num_grids=num_grids)
if __name__ == '__main__':
parser = ArgumentParser(description='Run the sensitivity analysis.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--ground_truth_filename', required=True, type=str,
help='The path to the JSON file containing the ground truth annotations')
parser.add_argument('--subset', default='validation', type=str,
help='The dataset subset to use for the analysis')
parser.add_argument('--prediction_filename', required=True, type=str,
help='The path to the JSON file containing the method\'s predictions')
parser.add_argument('--output_folder', required=True, type=str,
help='The path to the folder in which the results will be saved')
parser.add_argument('--is_thumos14', default=False, action='store_true',
help='Pass this argument if the dataset used is THUMOS14 and not ActivityNet')
args = parser.parse_args()
main(args.ground_truth_filename, args.subset, args.prediction_filename, args.output_folder, args.is_thumos14)
| [
"action_detector_diagnosis.ActionDetectorDiagnosis",
"matplotlib.pyplot.GridSpec",
"numpy.nanmean",
"numpy.array",
"argparse.ArgumentParser",
"pandas.Categorical",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"pandas.DataFrame",
"collections.OrderedDict",
"matplotlib.rcParams.update",
"matplo... | [((364, 378), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (371, 378), True, 'import matplotlib as mpl\n'), ((662, 689), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['params'], {}), '(params)\n', (681, 689), True, 'import matplotlib as mpl\n'), ((1970, 1983), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1981, 1983), False, 'from collections import OrderedDict\n'), ((3874, 3887), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3885, 3887), False, 'from collections import OrderedDict\n'), ((4804, 4924), 'pandas.DataFrame', 'pd.DataFrame', (["{'characteristic-name': characteristic_name_lst, 'bucket': bucket_lst,\n 'ratio-value': ratio_value_lst}"], {}), "({'characteristic-name': characteristic_name_lst, 'bucket':\n bucket_lst, 'ratio-value': ratio_value_lst})\n", (4816, 4924), True, 'import pandas as pd\n'), ((5103, 5196), 'pandas.Categorical', 'pd.Categorical', (["sensitivity_analysis_df['bucket']"], {'categories': 'buckets_order', 'ordered': '(True)'}), "(sensitivity_analysis_df['bucket'], categories=buckets_order,\n ordered=True)\n", (5117, 5196), True, 'import pandas as pd\n'), ((5564, 5591), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (5574, 5591), True, 'import matplotlib.pyplot as plt\n'), ((5603, 5629), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(1)', 'num_grids'], {}), '(1, num_grids)\n', (5615, 5629), True, 'import matplotlib.pyplot as plt\n'), ((7474, 7534), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xvalues_lst', 'xticks_lst'], {'fontsize': '(fontsize / 1.1)'}), '(xvalues_lst, xticks_lst, fontsize=fontsize / 1.1)\n', (7484, 7534), True, 'import matplotlib.pyplot as plt\n'), ((7537, 7566), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fontsize'}), '(fontsize=fontsize)\n', (7547, 7566), True, 'import matplotlib.pyplot as plt\n'), ((9463, 9536), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xvalues_lst', 'xticks_lst'], {'fontsize': '(fontsize / 1.5)', 'rotation': '(90)'}), '(xvalues_lst, xticks_lst, fontsize=fontsize / 1.5, rotation=90)\n', (9473, 9536), True, 'import matplotlib.pyplot as plt\n'), ((9539, 9568), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fontsize'}), '(fontsize=fontsize)\n', (9549, 9568), True, 'import matplotlib.pyplot as plt\n'), ((9700, 9718), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9716, 9718), True, 'import matplotlib.pyplot as plt\n'), ((12752, 13166), 'action_detector_diagnosis.ActionDetectorDiagnosis', 'ActionDetectorDiagnosis', ([], {'ground_truth_filename': 'ground_truth_filename', 'prediction_filename': 'prediction_filename', 'tiou_thresholds': 'tiou_thresholds', 'limit_factor': 'None', 'min_tiou_thr': '(0.1)', 'subset': 'subset', 'verbose': '(True)', 'check_status': '(True)', 'load_extra_annotations': '(True)', 'characteristic_names_to_bins': 'characteristic_names_to_bins', 'normalize_ap': '(True)', 'minimum_normalized_precision_threshold_for_detection': '(0.0)'}), '(ground_truth_filename=ground_truth_filename,\n prediction_filename=prediction_filename, tiou_thresholds=\n tiou_thresholds, limit_factor=None, min_tiou_thr=0.1, subset=subset,\n verbose=True, check_status=True, load_extra_annotations=True,\n characteristic_names_to_bins=characteristic_names_to_bins, normalize_ap\n =True, minimum_normalized_precision_threshold_for_detection=0.0)\n', (12775, 13166), False, 'from action_detector_diagnosis import ActionDetectorDiagnosis\n'), ((14383, 14494), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Run the sensitivity analysis."""', 'formatter_class': 'ArgumentDefaultsHelpFormatter'}), "(description='Run the sensitivity analysis.', formatter_class\n =ArgumentDefaultsHelpFormatter)\n", (14397, 14494), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n'), ((1594, 1666), 'utils.interpolated_prec_rec', 'interpolated_prec_rec', (['precision_cumsum[tidx, :]', 'recall_cumsum[tidx, :]'], {}), '(precision_cumsum[tidx, :], recall_cumsum[tidx, :])\n', (1615, 1666), False, 'from utils import interpolated_prec_rec\n'), ((3007, 3021), 'numpy.nanmean', 'np.nanmean', (['ap'], {}), '(ap)\n', (3017, 3021), True, 'import numpy as np\n'), ((11993, 12019), 'numpy.linspace', 'np.linspace', (['(0.5)', '(0.95)', '(10)'], {}), '(0.5, 0.95, 10)\n', (12004, 12019), True, 'import numpy as np\n'), ((1212, 1233), 'numpy.cumsum', 'np.cumsum', (['tp'], {'axis': '(1)'}), '(tp, axis=1)\n', (1221, 1233), True, 'import numpy as np\n'), ((1267, 1288), 'numpy.cumsum', 'np.cumsum', (['fp'], {'axis': '(1)'}), '(fp, axis=1)\n', (1276, 1288), True, 'import numpy as np\n'), ((1342, 1373), 'numpy.unique', 'np.unique', (["this_cls_gt['gt-id']"], {}), "(this_cls_gt['gt-id'])\n", (1351, 1373), True, 'import numpy as np\n'), ((2574, 2622), 'numpy.append', 'np.append', (["this_cls_gt['gt-id'].values", '[np.nan]'], {}), "(this_cls_gt['gt-id'].values, [np.nan])\n", (2583, 2622), True, 'import numpy as np\n'), ((13875, 13930), 'os.path.join', 'os.path.join', (['output_folder', '"""sensitivity_analysis.pdf"""'], {}), "(output_folder, 'sensitivity_analysis.pdf')\n", (13887, 13930), False, 'import os\n'), ((12103, 12143), 'numpy.array', 'np.array', (['[0, 0.02, 0.04, 0.06, 0.08, 1]'], {}), '([0, 0.02, 0.04, 0.06, 0.08, 1])\n', (12111, 12143), True, 'import numpy as np\n'), ((12217, 12252), 'numpy.array', 'np.array', (['[0, 3, 6, 12, 18, np.inf]'], {}), '([0, 3, 6, 12, 18, np.inf])\n', (12225, 12252), True, 'import numpy as np\n'), ((12333, 12366), 'numpy.array', 'np.array', (['[-1, 1, 40, 80, np.inf]'], {}), '([-1, 1, 40, 80, np.inf])\n', (12341, 12366), True, 'import numpy as np\n'), ((1036, 1093), 'pandas.isnull', 'pd.isnull', (['this_cls_pred[result.matched_gt_id_cols[tidx]]'], {}), '(this_cls_pred[result.matched_gt_id_cols[tidx]])\n', (1045, 1093), True, 'import pandas as pd\n'), ((10282, 10304), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(6)'], {}), '(0, 1.0, 6)\n', (10293, 10304), True, 'import numpy as np\n'), ((10387, 10409), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(6)'], {}), '(0, 1.0, 6)\n', (10398, 10409), True, 'import numpy as np\n'), ((10490, 10529), 'numpy.array', 'np.array', (['[0, 30, 60, 120, 180, np.inf]'], {}), '([0, 30, 60, 120, 180, np.inf])\n', (10498, 10529), True, 'import numpy as np\n'), ((10614, 10645), 'numpy.array', 'np.array', (['[-1, 1, 4, 8, np.inf]'], {}), '([-1, 1, 4, 8, np.inf])\n', (10622, 10645), True, 'import numpy as np\n'), ((1124, 1181), 'pandas.isnull', 'pd.isnull', (['this_cls_pred[result.matched_gt_id_cols[tidx]]'], {}), '(this_cls_pred[result.matched_gt_id_cols[tidx]])\n', (1133, 1181), True, 'import pandas as pd\n'), ((11266, 11288), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(6)'], {}), '(0, 1.0, 6)\n', (11277, 11288), True, 'import numpy as np\n'), ((11369, 11408), 'numpy.array', 'np.array', (['[0, 30, 60, 120, 180, np.inf]'], {}), '([0, 30, 60, 120, 180, np.inf])\n', (11377, 11408), True, 'import numpy as np\n'), ((11493, 11524), 'numpy.array', 'np.array', (['[-1, 1, 4, 8, np.inf]'], {}), '([-1, 1, 4, 8, np.inf])\n', (11501, 11524), True, 'import numpy as np\n')] |
"""
@FileName: lsb.py
@Description: Implement lsb
@Author: Ryuk
@CreateDate: 2021/06/27
@LastEditTime: 2021/06/27
@LastEditors: Please set LastEditors
@Version: v0.1
"""
import scipy.io.wavfile as wav
import numpy as np
np.random.seed(2020)
stop_mark = np.random.randint(0, 2, 128)
class LSBEmbedder:
def __init__(self, seed, rate=0.9, mode='single'):
self.rate = rate
self.seed = seed
self.mode = mode
self.stop_mark = stop_mark
self.channels = None
self.fs = None
self.left_signal = None
self.right_signal = None
self.wavsignal = None
self.embed_length = None
def _waveReader(self, path):
"""
read wave file and its corresponding
:param path: wav fi
:return:
"""
fs, wavsignal = wav.read(path)
self.fs = fs
if len(wavsignal) == 2:
self.channels = 2
self.left_signal = wavsignal[0]
self.right_signal = wavsignal[1]
else:
self.channels = 1
self.wavsignal = wavsignal
def _LSBReplace(self, wavsignal, secret_message):
"""
embed watermarking a single wave
:param secret_message: secret message
:return:
"""
# choose random embedding location with roulette
np.random.seed(self.seed)
roulette = np.random.rand(len(wavsignal))
stego = np.array(wavsignal)
k = 0
for i in range(len(wavsignal)):
if roulette[i] <= self.rate:
value = wavsignal[i]
if value < 0:
value = -value
negative = True
else:
negative = False
# embed secret bit
if k < len(secret_message) and int(secret_message[k]) == 0:
value = value & 0b1111111111111110
k += 1
elif k < len(secret_message) and int(secret_message[k]) == 1:
value = value | 0b0000000000000001
k += 1
if negative:
stego[i] = -value
else:
stego[i] = value
stego = np.array(stego).astype(np.int16)
return stego
def _saveWave(self, stego, cover_path, stego_path, inplace=False):
"""
save stego wave
:param stego: stego wavsignal
:param cover_path: cover path
:param stego_path: stego path
:param inplace: whether to save in cover path
:return:
"""
if inplace:
wav.write(cover_path, self.fs, stego)
else:
assert stego_path is not None
wav.write(stego_path, self.fs, stego)
def embed(self, cover_path, stego_path, secret_message, inplace=False):
"""
steganogaphy
:param cover_path: cover wave path
:param stego_path: stego wave path
:param secret_message: secret message
:param inplace: steganogarphy in place
:return:
"""
# add stop mark
secret_message = np.concatenate([secret_message, self.stop_mark], axis=0)
# pre check
self._waveReader(cover_path)
assert self.channels * len(self.wavsignal) * self.rate >= 1.1 * len(secret_message)
assert self.channels in [1, 2]
# embed secret message
if self.channels == 1:
if self.mode == 'single':
stego = self._LSBReplace(self.wavsignal, secret_message)
self._saveWave(stego, cover_path, stego_path, inplace)
elif self.mode == 'batch':
for i in range(len(cover_path)):
stego = self._LSBReplace(self.wavsignal, secret_message)
self._saveWave(stego, cover_path, stego_path, inplace)
elif self.channels == 2:
if self.mode == 'single':
left_stego = self._LSBReplace(self.left_signal, secret_message)
right_stego = self._LSBReplace(self.right_signal, secret_message)
stego = [left_stego, right_stego]
self._saveWave(stego, cover_path, stego_path, inplace)
elif self.mode == 'batch':
# the same secret messages are embedding in different carrier
for i in range(len(stego_path)):
left_stego = self._LSBReplace(self.left_signal, secret_message)
right_stego = self._LSBReplace(self.right_signal, secret_message)
stego = [left_stego, right_stego]
self._saveWave(stego, cover_path[i], stego_path[i], inplace)
class LSBExtractor:
def __init__(self, seed, rate=0.9):
self.seed = seed
self.stop_mark = stop_mark
self.rate = rate
self.fs = None
self.channels= None
self.wavsignal = None
self.left_signal = None
self.right_signal = None
def _waveReader(self, path):
"""
read wave file and its corresponding
:param path: wav fi
:return:
"""
fs, wavsignal = wav.read(path)
self.fs = fs
if len(wavsignal) == 2:
self.channels = 2
self.left_signal = wavsignal[0]
self.right_signal = wavsignal[1]
else:
self.channels = 1
self.wavsignal = wavsignal
def _checkHeader(self, header):
"""
check the validness of header
:param header: header
:return: True/False
"""
return True
def _checkStop(self, message):
"""
check stop
:param message: secret message
:return: True/False
"""
message_stop = message[-len(self.stop_mark):]
count = 0
for i in range(len(self.stop_mark)):
if message_stop[i] == self.stop_mark[i]:
count += 1
if count == len(self.stop_mark):
return True
else:
return False
def _LSBExtract(self, roulette, wavsignal):
"""
extract LSB from stego wavsignal
:param roulette:
:param wavsignal:
:return: secret message
"""
message = []
for i in range(len(wavsignal)):
if roulette[i] <= self.rate:
value = wavsignal[i]
value = '{:016b}'.format(value)
message.append(int(value[-1]))
# check the validness of header
if len(message) == 44:
assert self._checkHeader(message) is True
# check stop mark
if len(message) >= len(self.stop_mark) and self._checkStop(message):
return message
return message
def extract(self, wave_path, message_path):
"""
extract message in wave
:param wave_path: wave path
:param message_path: message path
:return:
"""
# choose random embedding location with roulette
self._waveReader(wave_path)
np.random.seed(self.seed)
roulette = np.random.rand(len(self.wavsignal))
if self.channels == 1:
message = self._LSBExtract(roulette, self.wavsignal)
elif self.channels == 2:
message_left = self._LSBExtract(roulette, self.left_signal)
message_right = self._LSBExtract(roulette, self.right_signal)
message = np.hstack((message_left, message_right))
with open(message_path, "w", encoding='utf-8') as f:
f.write(''.join(str(i) for i in message))
return message
| [
"numpy.hstack",
"numpy.array",
"numpy.random.randint",
"scipy.io.wavfile.read",
"numpy.random.seed",
"numpy.concatenate",
"scipy.io.wavfile.write"
] | [((222, 242), 'numpy.random.seed', 'np.random.seed', (['(2020)'], {}), '(2020)\n', (236, 242), True, 'import numpy as np\n'), ((255, 283), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(128)'], {}), '(0, 2, 128)\n', (272, 283), True, 'import numpy as np\n'), ((822, 836), 'scipy.io.wavfile.read', 'wav.read', (['path'], {}), '(path)\n', (830, 836), True, 'import scipy.io.wavfile as wav\n'), ((1341, 1366), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (1355, 1366), True, 'import numpy as np\n'), ((1434, 1453), 'numpy.array', 'np.array', (['wavsignal'], {}), '(wavsignal)\n', (1442, 1453), True, 'import numpy as np\n'), ((3147, 3203), 'numpy.concatenate', 'np.concatenate', (['[secret_message, self.stop_mark]'], {'axis': '(0)'}), '([secret_message, self.stop_mark], axis=0)\n', (3161, 3203), True, 'import numpy as np\n'), ((5166, 5180), 'scipy.io.wavfile.read', 'wav.read', (['path'], {}), '(path)\n', (5174, 5180), True, 'import scipy.io.wavfile as wav\n'), ((7124, 7149), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (7138, 7149), True, 'import numpy as np\n'), ((2635, 2672), 'scipy.io.wavfile.write', 'wav.write', (['cover_path', 'self.fs', 'stego'], {}), '(cover_path, self.fs, stego)\n', (2644, 2672), True, 'import scipy.io.wavfile as wav\n'), ((2741, 2778), 'scipy.io.wavfile.write', 'wav.write', (['stego_path', 'self.fs', 'stego'], {}), '(stego_path, self.fs, stego)\n', (2750, 2778), True, 'import scipy.io.wavfile as wav\n'), ((2244, 2259), 'numpy.array', 'np.array', (['stego'], {}), '(stego)\n', (2252, 2259), True, 'import numpy as np\n'), ((7504, 7544), 'numpy.hstack', 'np.hstack', (['(message_left, message_right)'], {}), '((message_left, message_right))\n', (7513, 7544), True, 'import numpy as np\n')] |
import os
import random
import pickle as pk
import logging
from collections import defaultdict
from chemreader.readers.readmol2 import Mol2
import h5py
import numpy as np
from scipy import sparse
from tqdm import tqdm
from slgnn.config import PAD_ATOM, PAD_BOND
SEED = 1458907
random.seed(SEED)
class ZincToHdf5:
def __init__(self, mol2blocks: list, n_mols=None):
self._mol2s = mol2blocks
if n_mols is None:
self._n_mols = len(mol2blocks)
else:
self._n_mols = n_mols
@classmethod
def from_files(cls, fpaths: list, shuffle=False):
for f in fpaths:
if f.split(".")[-1] not in ["mol2", "gz"]:
raise ValueError("{} is not a valid path.".format(f))
mol2blocks = list()
n_mols = 0
for path in fpaths:
mol2 = Mol2(path)
blocks = mol2.mol2_blocks
mol2blocks.extend(blocks)
n_mols += mol2.n_mols
if shuffle:
shuffle(mol2blocks)
return cls(mol2blocks, n_mols)
@classmethod
def _get_index(cls, dir_path, verbose):
if not os.path.exists(os.path.join(dir_path, "index")):
logging.info("Generating index...")
index = cls.indexing(dir_path, verbose=verbose)
else:
with open(os.path.join(dir_path, "index"), "rb") as f:
index = pk.load(f)
return index
@classmethod
def _sample_all(cls, dir_path, mol2blocks, verbose):
files = os.scandir(dir_path)
n_mols = 0
for f in (tqdm(list(files)) if verbose else files):
if f.name.split(".")[-1] not in ["gz", "mol2"]:
continue
mol2 = Mol2(f.path)
mol2blocks.extend(mol2.mol2_blocks)
n_mols += mol2.n_mols
random.shuffle(mol2blocks)
return n_mols
@classmethod
def _group_samples(cls, samples, index):
groups = defaultdict(list)
for s in samples:
groups[index[s][0]].append(index[s][1])
return groups
@classmethod
def random_sample(cls, n_samples, dir_path=None, verbose=True):
if dir_path is None:
dir_path = "data"
index = cls._get_index(dir_path, verbose=verbose)
# Get Mol2Blocks randomly
mol2blocks = list()
# n_samples is larger than the number of samples in the dir_path
if n_samples > index["total"]:
logging.warning(
"{} does not have enough samples. Got {}/{}".format(
dir_path, index["total"], n_samples))
n_mols = cls._sample_all(dir_path, mol2blocks, verbose=verbose)
return cls(mol2blocks, n_mols)
# n_samples is smaller than the number of samples in the dir_path
samples = random.sample(list(range(index["total"])), n_samples)
groups = cls._group_samples(samples, index)
it = tqdm(list(groups.items())) if verbose else groups.items()
for fname, block_indices in it:
file_path = os.path.join(dir_path, fname)
mol2 = Mol2(file_path)
for id in block_indices:
mol2blocks.append(mol2.mol2_blocks[id])
return cls(mol2blocks, n_samples)
@classmethod
def _get_mol2files(cls, path):
files = list()
for item in os.scandir(path):
if item.is_file() and item.name.split(".")[-1] in ["gz", "mol2"]:
files.append(item)
elif item.is_dir():
files.extend(cls._get_mol2files(item.path))
return files
@classmethod
def random_sample_without_index(cls, n_samples, dir_path, verbose=True):
# Get Mol2Blocks randomly
mol2blocks = list()
files = cls._get_mol2files(dir_path)
if len(files) < n_samples:
logging.warning(
"{} does not have enough samples. Got {}/{}".format(
dir_path, len(files), n_samples))
samples = files
else:
samples = random.sample(files, n_samples)
it = tqdm(samples) if verbose else samples
counter = 0
for sample in it:
try:
mol2blocks.append(Mol2(sample.path).mol2_blocks[0])
except IndexError:
logging.warning("{} is an empty file.".format(sample.path))
continue
counter += 1
return cls(mol2blocks, counter)
@property
def n_mols(self):
return self._n_mols
def save_hdf5(self, out_path):
h5f = h5py.File(out_path, "w")
dt = h5py.string_dtype(encoding="utf-8")
smiles = list()
a_matrices = list()
atom_feats = list()
bond_feats = list()
for block in self._mol2s:
smiles.append(block.to_smiles(isomeric=True))
graph = block.to_graph(
sparse=True, pad_atom=PAD_ATOM, pad_bond=PAD_BOND)
a_matrices.append(graph["adjacency"])
atom_feats.append(graph["atom_features"])
bond_feats.append(graph["bond_features"])
h5f.create_dataset("smiles",
data=np.array(smiles).astype(bytes),
dtype=dt,
chunks=True)
h5f.create_dataset("atom_features",
data=np.array(atom_feats),
dtype=np.float16,
chunks=True)
h5f.create_dataset("bond_features",
data=np.array(bond_feats),
dtype=np.int8,
chunks=True)
# Save sparse adjacency matrices
adj = h5f.create_group("adjacency_matrices")
for i, mat in enumerate(a_matrices):
adj.create_dataset("data_{}".format(i), data=mat.data)
adj.create_dataset("indptr_{}".format(i), data=mat.indptr)
adj.create_dataset("indices_{}".format(i), data=mat.indices)
adj.attrs["shape_{}".format(i)] = mat.shape
h5f.attrs["total"] = self.n_mols
h5f.close()
@staticmethod
def indexing(dir_path, verbose=True):
""" Indexing the files. Speed up hdf5 file creation.
Args:
dir_path (str): path to the directory storing all the .mol2 files.
verbose (bool): if to show the progress bar.
Return:
dict: the generated index dict
Output:
A binary index file under the dir_path.
"""
files = list(os.scandir(dir_path))
index = dict()
indexer = 0
for f in (tqdm(files) if verbose else files):
if not f.name.split(".")[-1] in ["gz", "mol2"]:
continue
mol2 = Mol2(f.path)
n_mols = mol2.n_mols
for i in range(n_mols):
index[indexer] = tuple([f.name, i])
indexer += 1
index["total"] = indexer
with open(os.path.join(dir_path, "index"), "wb") as index_f:
pk.dump(index, index_f)
return index
class Hdf5Loader:
def __init__(self, path):
self.path = path
def load_adjacency_matrices(self, n=None):
h5f = h5py.File(self.path, "r")
adj = h5f["adjacency_matrices"]
matrices = list()
if n is None:
n = self.total
for i in range(n):
mat = sparse.csr_matrix(
(adj["data_{}".format(i)][:],
adj["indices_{}".format(i)][:],
adj["indptr_{}".format(i)][:]),
adj.attrs["shape_{}".format(i)])
matrices.append(mat)
h5f.close()
return matrices
def load_atom_features(self, n=None):
if n is None:
n = self.total
with h5py.File(self.path, "r") as h5f:
atom_features = h5f["atom_features"][:n]
return atom_features
def load_bond_features(self, n=None):
if n is None:
n = self.total
with h5py.File(self.path, "r") as h5f:
bond_features = h5f["bond_features"][:n]
return bond_features
def load_smiles(self, n=None):
if n is None:
n = self.total
with h5py.File(self.path, "r") as h5f:
smiles = h5f["smiles"][:n]
return smiles
@property
def total(self):
with h5py.File(self.path, "r") as h5f:
return h5f.attrs["total"]
| [
"random.sample",
"pickle.dump",
"random.shuffle",
"os.scandir",
"chemreader.readers.readmol2.Mol2",
"os.path.join",
"random.seed",
"h5py.File",
"tqdm.tqdm",
"pickle.load",
"numpy.array",
"collections.defaultdict",
"h5py.string_dtype",
"logging.info"
] | [((280, 297), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (291, 297), False, 'import random\n'), ((1515, 1535), 'os.scandir', 'os.scandir', (['dir_path'], {}), '(dir_path)\n', (1525, 1535), False, 'import os\n'), ((1822, 1848), 'random.shuffle', 'random.shuffle', (['mol2blocks'], {}), '(mol2blocks)\n', (1836, 1848), False, 'import random\n'), ((1951, 1968), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1962, 1968), False, 'from collections import defaultdict\n'), ((3350, 3366), 'os.scandir', 'os.scandir', (['path'], {}), '(path)\n', (3360, 3366), False, 'import os\n'), ((4573, 4597), 'h5py.File', 'h5py.File', (['out_path', '"""w"""'], {}), "(out_path, 'w')\n", (4582, 4597), False, 'import h5py\n'), ((4611, 4646), 'h5py.string_dtype', 'h5py.string_dtype', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (4628, 4646), False, 'import h5py\n'), ((7234, 7259), 'h5py.File', 'h5py.File', (['self.path', '"""r"""'], {}), "(self.path, 'r')\n", (7243, 7259), False, 'import h5py\n'), ((841, 851), 'chemreader.readers.readmol2.Mol2', 'Mol2', (['path'], {}), '(path)\n', (845, 851), False, 'from chemreader.readers.readmol2 import Mol2\n'), ((1191, 1226), 'logging.info', 'logging.info', (['"""Generating index..."""'], {}), "('Generating index...')\n", (1203, 1226), False, 'import logging\n'), ((1719, 1731), 'chemreader.readers.readmol2.Mol2', 'Mol2', (['f.path'], {}), '(f.path)\n', (1723, 1731), False, 'from chemreader.readers.readmol2 import Mol2\n'), ((3054, 3083), 'os.path.join', 'os.path.join', (['dir_path', 'fname'], {}), '(dir_path, fname)\n', (3066, 3083), False, 'import os\n'), ((3103, 3118), 'chemreader.readers.readmol2.Mol2', 'Mol2', (['file_path'], {}), '(file_path)\n', (3107, 3118), False, 'from chemreader.readers.readmol2 import Mol2\n'), ((4047, 4078), 'random.sample', 'random.sample', (['files', 'n_samples'], {}), '(files, n_samples)\n', (4060, 4078), False, 'import random\n'), ((4092, 4105), 'tqdm.tqdm', 'tqdm', (['samples'], {}), '(samples)\n', (4096, 4105), False, 'from tqdm import tqdm\n'), ((6551, 6571), 'os.scandir', 'os.scandir', (['dir_path'], {}), '(dir_path)\n', (6561, 6571), False, 'import os\n'), ((6634, 6645), 'tqdm.tqdm', 'tqdm', (['files'], {}), '(files)\n', (6638, 6645), False, 'from tqdm import tqdm\n'), ((6774, 6786), 'chemreader.readers.readmol2.Mol2', 'Mol2', (['f.path'], {}), '(f.path)\n', (6778, 6786), False, 'from chemreader.readers.readmol2 import Mol2\n'), ((7051, 7074), 'pickle.dump', 'pk.dump', (['index', 'index_f'], {}), '(index, index_f)\n', (7058, 7074), True, 'import pickle as pk\n'), ((7814, 7839), 'h5py.File', 'h5py.File', (['self.path', '"""r"""'], {}), "(self.path, 'r')\n", (7823, 7839), False, 'import h5py\n'), ((8035, 8060), 'h5py.File', 'h5py.File', (['self.path', '"""r"""'], {}), "(self.path, 'r')\n", (8044, 8060), False, 'import h5py\n'), ((8249, 8274), 'h5py.File', 'h5py.File', (['self.path', '"""r"""'], {}), "(self.path, 'r')\n", (8258, 8274), False, 'import h5py\n'), ((8393, 8418), 'h5py.File', 'h5py.File', (['self.path', '"""r"""'], {}), "(self.path, 'r')\n", (8402, 8418), False, 'import h5py\n'), ((1145, 1176), 'os.path.join', 'os.path.join', (['dir_path', '"""index"""'], {}), "(dir_path, 'index')\n", (1157, 1176), False, 'import os\n'), ((1392, 1402), 'pickle.load', 'pk.load', (['f'], {}), '(f)\n', (1399, 1402), True, 'import pickle as pk\n'), ((5362, 5382), 'numpy.array', 'np.array', (['atom_feats'], {}), '(atom_feats)\n', (5370, 5382), True, 'import numpy as np\n'), ((5545, 5565), 'numpy.array', 'np.array', (['bond_feats'], {}), '(bond_feats)\n', (5553, 5565), True, 'import numpy as np\n'), ((6988, 7019), 'os.path.join', 'os.path.join', (['dir_path', '"""index"""'], {}), "(dir_path, 'index')\n", (7000, 7019), False, 'import os\n'), ((1323, 1354), 'os.path.join', 'os.path.join', (['dir_path', '"""index"""'], {}), "(dir_path, 'index')\n", (1335, 1354), False, 'import os\n'), ((5177, 5193), 'numpy.array', 'np.array', (['smiles'], {}), '(smiles)\n', (5185, 5193), True, 'import numpy as np\n'), ((4227, 4244), 'chemreader.readers.readmol2.Mol2', 'Mol2', (['sample.path'], {}), '(sample.path)\n', (4231, 4244), False, 'from chemreader.readers.readmol2 import Mol2\n')] |
import networkx as nx
import numpy as np
import math
from tqdm import tqdm
import numba
from numba.experimental import jitclass
from numba import jit
steadyspec = [
('adj_matrix',numba.float64[:,:]),
('graph_size',numba.int32),
('background_field',numba.float64[:]),
('fixed_point_iter',numba.int32),
('fp_tol_fac',numba.float64),
]
@jitclass(steadyspec)
class steady_state(object):
"""
A class to calculate steady state of magnetisation.
...
Attributes
----------
adj_matrix : numpy.array
Adjacency matrix of the graph
fixed_point_iter : float, optional
Max number of iterations used in self-consistency equations (default 1e5)
fp_tol_fac : float, optional
Tolerance factor in stoppping condition for consistency equations (default 1e-6)
"""
def __init__(self,adj_matrix,fixed_point_iter=10000,fp_tol_fac=1e-6):
self.adj_matrix = adj_matrix
self.graph_size = self.adj_matrix.shape[0]
self.fixed_point_iter=fixed_point_iter
self.fp_tol_fac=fp_tol_fac
def single_mag(self,i,m,beta,field):
"""
Calculates magnetisation for a single node. Subfunction of magnetisation function.
Parameters
------------
i : int
Index of the node in question.
m : numpy.array
magnetisation array for all nodes.
beta: float
Interaction strength
field: numpy.array
Array of agent's control field for each node
"""
gamma=1.0
spin_field = self.adj_matrix[i].dot(m)
term = math.tanh(beta*(spin_field+field[i]))
return (1.0-gamma)*m[i] + gamma*term
def magnetisation(self,mag,beta,field):
"""
Calculates magnetisation for the whole system
Parameters
------------
m : numpy.array
magnetisation array for all nodes.
beta: float
Interaction strength
field: numpy.array
Array of agent's control field for each node
"""
m_old = mag
m_new = np.zeros(len(m_old))
for i in range(self.graph_size):
m_new[i]=self.single_mag(i,m_old,beta,field)
return m_new
def aitken_method(self,mag0,beta,field):
"""
Solves self-consistency equation by following Aitken method* for accelerating convergence.
* Numerical Analysis Richard L.Burden 9th Edition, p. 105
Parameters
------------
m0 : numpy.array
Initial guess of magnetisation for all nodes.
beta: float
Interaction strength
field: numpy.array
Array of agent's control field for each node
"""
mag1=self.magnetisation(mag0,beta,field)
for i in range(self.fixed_point_iter):
mag2=self.magnetisation(mag1,beta,field)
if ((mag0+mag2-2*mag1)!=0).all():
mag_d = mag0 - (mag1-mag0)**2/(mag0+mag2-2*mag1)
else:
mag_d = mag1
if abs(np.sum(mag0)-np.sum(mag_d))<self.fp_tol_fac:
break
mag0=mag1
mag1=mag2
if i+1==self.fixed_point_iter:
mag_d = mag1
return mag_d
@jit(nopython=True)
def isclose(a,b):
return abs(a-b) <= max(1e-9 * max(abs(a), abs(b)), 1e-5)
@jit(nopython=True)
def susc_grad(beta,mag,adj_matrix):
"""
Calculates mean field susceptibility.
Parameters
------------
beta: float
Interaction strength.
mag : numpy.array
Magnetisation array for all nodes.
adj_matrix : numpy.array
Adjacency matrix of the network.
"""
D=np.identity(mag.shape[0])*np.array([(1-i**2) for i in mag])
inv = np.linalg.inv(np.identity(mag.shape[0])-beta*D.dot(adj_matrix))
susc_matrix = beta*inv.dot(D)
gradient = np.sum(susc_matrix,axis=1).flatten()
return gradient
def mag_grad(beta,mag,adj_matrix):
"""
Calculates gradient of the magnetisation with respect to change in the external control field. Nominally mean field susceptibility.
Parameters
------------
beta : float
Interaction strength.
mag : numpy.array
Magnetisation array for all nodes.
adj_matrix : numpy.array
Adjacency matrix of the network.
"""
if np.all([isclose(i,j) for i,j in zip(mag,np.ones(mag.shape[0]))]):
return np.zeros(len(mag))
else:
return susc_grad(beta,mag,adj_matrix)
@jit(nopython=True)
def projection_simplex_sort(v, z):
"""
Bounds control field to agent's magnetic field budget.
...
Parameters
----------
v : numpy.array
Control field allocation of the agent.
z : float
Magnetic field budget.
"""
n_features = v.shape[0]
v = np.abs(v)
u = np.sort(v)[::-1]
cssv = np.cumsum(u) - z
ind = np.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = np.maximum(v - theta, 0)
return w
@jit(nopython=True)
def lr_1(x,iim_iter=5000):
return np.exp(-x/(0.1*iim_iter))
@jit(nopython=True)
def lr_2(x,iim_iter=5000):
return np.exp(-x/(0.1*iim_iter))
@jit(nopython=True)
def adam(grad,it,typ,ms,vs,iim_iter,beta1=0.9,beta2=0.999,eps=0.1):
"""
Adam optimiser.
Parameters
------------
grad : numpy.array
Gradient array for all nodes.
it : int
Iteration index.
typ : string
'pos' if calculating for positive agent; 'neg' if calculating for negative agent.
ms : numpy.array
Momentum array with linear gradients
vs : numpy.array
Momentum array with squared gradients
iim_iter : int
Max number of iteration in optimisation algorithm.
beta1 : float
Default 0.9
beta2 : float
Default 0.999
eps : float
Default 0.1
"""
if typ=='pos':
lr=lr_1(it)
elif typ=='neg':
lr=lr_2(it)
ms_new = beta1 * ms + (1.0 - beta1) * grad
vs_new = beta2 * vs + (1.0 - beta2) * grad**2
mhat = ms_new / (1.0 - beta1**(it+1))
vhat = vs_new / (1.0 - beta2**(it+1))
ms = ms_new
vs = vs_new
change = lr* mhat/(np.sqrt(vhat) + eps)
return change,ms,vs
class mf_ising_system():
"""
A class to calculate mean field Ising Maximisation Influence problem for a two agents.
...
Attributes
----------
graph : networkx graph
undirected graph
background_field : numpy.array
Background field applied to the system
iim_iter: float, optional
Number of gradient ascent iterations (default 1000)
iim_tol_fac: float, optional
Tolerance factor in stopping condition for gradient ascent algorithm (default 1e-5)
"""
def __init__(self,graph,background_field,iim_iter=10000,iim_tol_fac=1e-3):
self.graph=graph
self.adj_matrix = nx.to_numpy_matrix(graph).astype(np.float64)
self.graph_size = self.adj_matrix.shape[0]
self.background_field = background_field.astype(np.float64)
self.iim_iter=iim_iter
self.iim_tol_fac = iim_tol_fac
def positive_agent(self,mag_i,it,pos_budget,beta,neg_change):
"""
Single move by the positive agent.
...
Parameters
----------
mag_i : numpy.array
Magnetisation array for all nodes.
it : int
Iteration of the algorithm.
pos_budget : float
Magnetic field budget for the positive agent.
beta: float
Interaction strength
neg_change : numpy.array
Change in allocation of the negative agent.
"""
mag_i_grad = mag_grad(beta,mag_i,self.adj_matrix)
control_field = self.control_field_history_pos[-1]
change,ms,vs = adam(mag_i_grad,it,'pos',self.ms_pos,self.vs_pos,self.iim_iter)
self.ms_pos=ms
self.vs_pos=vs
control_field_update = control_field + (change+neg_change)
control_field_new = projection_simplex_sort(control_field_update.T,pos_budget)
self.control_field_history_pos.append(control_field_new)
return control_field_new,mag_i_grad
def negative_agent(self,mag_i,it,neg_budget,beta):
"""
Single move by the negative agent.
...
Parameters
----------
mag_i : numpy.array
Magnetisation array for all nodes.
it : int
Iteration of the algorithm.
neg_budget : float
Magnetic field budget for the negative agent.
beta: float
Interaction strength
"""
mag_i = -1.0*mag_i
mag_i_grad = -mag_grad(beta,mag_i,self.adj_matrix)
control_field = self.control_field_history_neg[-1]
change,ms,vs=adam(mag_i_grad,it,'neg',self.ms_neg,self.vs_neg,self.iim_iter)
self.ms_neg=ms
self.vs_neg=vs
control_field_update = control_field - change
control_field_new = projection_simplex_sort(control_field_update.T,neg_budget)
self.control_field_history_neg.append(control_field_new)
return control_field_new,mag_i_grad,change
def second_partial_dffs(self,state,mag_ii,tot_field,beta,a=1e-5):
"""
Calculates 2nd gradients of magnetisation with respect to each agents' control field.
Calculated using central difference formula.
...
Parameters
----------
state : class instance
steady_state class instance
mag_ii : numpy.array
Magnetisation array for all nodes.
tot_field : numpy.array
Total net magnetic field experienced by each node.
beta: float
Interaction strength
a : float, optional
Used in central difference formula. Specifies magnitude of change of control field.
"""
update = a*np.ones(self.graph_size)
upper_change=tot_field+update
mag_plus= -state.aitken_method(mag_ii,beta,upper_change)
grad_plus = -mag_grad(beta,mag_plus,self.adj_matrix)
lower_change = tot_field-update
mag_minus= -state.aitken_method(mag_ii,beta,lower_change)
grad_minus = -mag_grad(beta,mag_minus,self.adj_matrix)
second_total_grad = (grad_plus - grad_minus)/(2*update) # central difference formula
curv_player_neg = - second_total_grad # minus because product rule : H_pos = H_pos - H_neg
curv_player_pos = curv_player_neg
return np.array([curv_player_pos,curv_player_neg])
def init_lists(self):
"""
Initialises lists for storing variables.
"""
self.control_field_history_pos =[]
self.control_field_history_neg = []
self.mag_history = []
self.pos_gradient_history=np.zeros((self.iim_iter,self.graph_size))
self.neg_gradient_history=np.zeros((self.iim_iter,self.graph_size))
self.ms_pos = np.zeros(self.graph_size,dtype=np.float64)
self.vs_pos = np.zeros(self.graph_size,dtype=np.float64)
self.ms_neg = np.zeros(self.graph_size,dtype=np.float64)
self.vs_neg = np.zeros(self.graph_size,dtype=np.float64)
def MF_IIM(self,pos_budget,neg_budget,beta,init_alloc='random',progress=True):
"""
Calculates competitive MF-IIM by following stochastic gradient ascent optimisation with
Adam optimiser.
Parameters
------------
pos_budget : float
Maximum magnetic field budget to be spent by the positive agent.
neg_budget : float
Maximum magnetic field budget to be spent by the negative agent.
beta : float
Interaction strength
init_alloc : string or numpy.array, optional
Either 'uniform' which corresponds to uniform spread of financial budget equaly among nodes.
'random' corresponds to random initialisations. Alternatively, provide custom numpy.array
allocation of your own choice. Default 'random'.
progress : boolean
If True shows progress bar; False otherwise.
Outputs
-----------
control_field_pos : numpy.array
Positive agent's control field allocation that results in the equilibrium.
control_field_neg : numpy.array
Negative agent's control field allocation that results in the equilibrium.
final_mag : numpy.array
Final magnetisation of the system.
"""
if isinstance(init_alloc,(np.ndarray, np.generic)):
control_field_pos = init_alloc[0,:]
control_field_neg = init_alloc[1,:]
elif isinstance(init_alloc,str):
if init_alloc=='aligned':
control_field_pos =( pos_budget /self.graph_size)*np.ones(self.graph_size)
control_field_neg = ( neg_budget /self.graph_size)*np.ones(self.graph_size)
elif init_alloc=='random':
control_field_pos = np.random.dirichlet(np.ones(self.graph_size))*pos_budget
control_field_neg = np.random.dirichlet(np.ones(self.graph_size))*neg_budget
init_mag = np.array([np.random.choice([-1,1]) for i in range(self.graph_size)]).astype(np.float64)
self.init_lists()
self.control_field_history_pos.append(control_field_pos)
self.control_field_history_neg.append(control_field_neg)
state = steady_state(self.adj_matrix)
self.state=state
tot_field = control_field_pos-control_field_neg
mag_i = state.aitken_method(init_mag,beta,tot_field)
self.mag_history.append(mag_i)
for it in tqdm(range(self.iim_iter)) if progress else range(self.iim_iter):
gradients=[]
if neg_budget!=0:
control_neg,neg_gradient,neg_change = self.negative_agent(mag_i,it,neg_budget,beta)
tot_field-=control_neg
self.neg_gradient_history[it]=neg_gradient
gradients.append(neg_gradient)
if pos_budget!=0:
control_pos,pos_gradient = self.positive_agent(mag_i,it,pos_budget,beta,neg_change)
tot_field +=control_pos
self.pos_gradient_history[it]=pos_gradient
gradients.append(pos_gradient)
mag_ii= state.aitken_method(mag_i,beta,tot_field)
self.mag_history.append(mag_ii)
if np.all([(abs(gradient)<self.iim_tol_fac).all() for gradient in gradients]):
second_dffs=self.second_partial_dffs(state,mag_ii,tot_field,beta)
if (second_dffs[0]<0).all() and (second_dffs[1]<0).all():
break
mag_i=mag_ii
tot_field=0.0
if it==self.iim_iter-1:
print('Failed to converge after {} iterations'.format(self.iim_iter))
final_mag = mag_ii
elif it < self.iim_iter-1:
final_mag = mag_ii
self.control_field_history_pos = np.array(self.control_field_history_pos)
self.control_field_history_neg = np.array(self.control_field_history_neg)
self.mag_history = np.array(self.mag_history)
return self.control_field_history_pos[-1],self.control_field_history_neg[-1],final_mag | [
"numpy.identity",
"numpy.abs",
"numpy.sqrt",
"numpy.ones",
"numpy.arange",
"numpy.random.choice",
"numpy.sort",
"numba.experimental.jitclass",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numba.jit",
"numpy.sum",
"numpy.cumsum",
"numpy.maximum",
"networkx.to_numpy_matrix",
"math.tanh"... | [((361, 381), 'numba.experimental.jitclass', 'jitclass', (['steadyspec'], {}), '(steadyspec)\n', (369, 381), False, 'from numba.experimental import jitclass\n'), ((3349, 3367), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (3352, 3367), False, 'from numba import jit\n'), ((3451, 3469), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (3454, 3469), False, 'from numba import jit\n'), ((4602, 4620), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (4605, 4620), False, 'from numba import jit\n'), ((5165, 5183), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5168, 5183), False, 'from numba import jit\n'), ((5251, 5269), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5254, 5269), False, 'from numba import jit\n'), ((5337, 5355), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5340, 5355), False, 'from numba import jit\n'), ((4923, 4932), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (4929, 4932), True, 'import numpy as np\n'), ((5124, 5148), 'numpy.maximum', 'np.maximum', (['(v - theta)', '(0)'], {}), '(v - theta, 0)\n', (5134, 5148), True, 'import numpy as np\n'), ((5223, 5252), 'numpy.exp', 'np.exp', (['(-x / (0.1 * iim_iter))'], {}), '(-x / (0.1 * iim_iter))\n', (5229, 5252), True, 'import numpy as np\n'), ((5309, 5338), 'numpy.exp', 'np.exp', (['(-x / (0.1 * iim_iter))'], {}), '(-x / (0.1 * iim_iter))\n', (5315, 5338), True, 'import numpy as np\n'), ((1651, 1692), 'math.tanh', 'math.tanh', (['(beta * (spin_field + field[i]))'], {}), '(beta * (spin_field + field[i]))\n', (1660, 1692), False, 'import math\n'), ((3787, 3812), 'numpy.identity', 'np.identity', (['mag.shape[0]'], {}), '(mag.shape[0])\n', (3798, 3812), True, 'import numpy as np\n'), ((3813, 3850), 'numpy.array', 'np.array', (['[(1 - i ** 2) for i in mag]'], {}), '([(1 - i ** 2) for i in mag])\n', (3821, 3850), True, 'import numpy as np\n'), ((4941, 4951), 'numpy.sort', 'np.sort', (['v'], {}), '(v)\n', (4948, 4951), True, 'import numpy as np\n'), ((4969, 4981), 'numpy.cumsum', 'np.cumsum', (['u'], {}), '(u)\n', (4978, 4981), True, 'import numpy as np\n'), ((4996, 5017), 'numpy.arange', 'np.arange', (['n_features'], {}), '(n_features)\n', (5005, 5017), True, 'import numpy as np\n'), ((10794, 10838), 'numpy.array', 'np.array', (['[curv_player_pos, curv_player_neg]'], {}), '([curv_player_pos, curv_player_neg])\n', (10802, 10838), True, 'import numpy as np\n'), ((11095, 11137), 'numpy.zeros', 'np.zeros', (['(self.iim_iter, self.graph_size)'], {}), '((self.iim_iter, self.graph_size))\n', (11103, 11137), True, 'import numpy as np\n'), ((11171, 11213), 'numpy.zeros', 'np.zeros', (['(self.iim_iter, self.graph_size)'], {}), '((self.iim_iter, self.graph_size))\n', (11179, 11213), True, 'import numpy as np\n'), ((11244, 11287), 'numpy.zeros', 'np.zeros', (['self.graph_size'], {'dtype': 'np.float64'}), '(self.graph_size, dtype=np.float64)\n', (11252, 11287), True, 'import numpy as np\n'), ((11309, 11352), 'numpy.zeros', 'np.zeros', (['self.graph_size'], {'dtype': 'np.float64'}), '(self.graph_size, dtype=np.float64)\n', (11317, 11352), True, 'import numpy as np\n'), ((11374, 11417), 'numpy.zeros', 'np.zeros', (['self.graph_size'], {'dtype': 'np.float64'}), '(self.graph_size, dtype=np.float64)\n', (11382, 11417), True, 'import numpy as np\n'), ((11439, 11482), 'numpy.zeros', 'np.zeros', (['self.graph_size'], {'dtype': 'np.float64'}), '(self.graph_size, dtype=np.float64)\n', (11447, 11482), True, 'import numpy as np\n'), ((15400, 15440), 'numpy.array', 'np.array', (['self.control_field_history_pos'], {}), '(self.control_field_history_pos)\n', (15408, 15440), True, 'import numpy as np\n'), ((15482, 15522), 'numpy.array', 'np.array', (['self.control_field_history_neg'], {}), '(self.control_field_history_neg)\n', (15490, 15522), True, 'import numpy as np\n'), ((15550, 15576), 'numpy.array', 'np.array', (['self.mag_history'], {}), '(self.mag_history)\n', (15558, 15576), True, 'import numpy as np\n'), ((3872, 3897), 'numpy.identity', 'np.identity', (['mag.shape[0]'], {}), '(mag.shape[0])\n', (3883, 3897), True, 'import numpy as np\n'), ((3971, 3998), 'numpy.sum', 'np.sum', (['susc_matrix'], {'axis': '(1)'}), '(susc_matrix, axis=1)\n', (3977, 3998), True, 'import numpy as np\n'), ((6428, 6441), 'numpy.sqrt', 'np.sqrt', (['vhat'], {}), '(vhat)\n', (6435, 6441), True, 'import numpy as np\n'), ((10185, 10209), 'numpy.ones', 'np.ones', (['self.graph_size'], {}), '(self.graph_size)\n', (10192, 10209), True, 'import numpy as np\n'), ((7130, 7155), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['graph'], {}), '(graph)\n', (7148, 7155), True, 'import networkx as nx\n'), ((4482, 4503), 'numpy.ones', 'np.ones', (['mag.shape[0]'], {}), '(mag.shape[0])\n', (4489, 4503), True, 'import numpy as np\n'), ((3141, 3153), 'numpy.sum', 'np.sum', (['mag0'], {}), '(mag0)\n', (3147, 3153), True, 'import numpy as np\n'), ((3154, 3167), 'numpy.sum', 'np.sum', (['mag_d'], {}), '(mag_d)\n', (3160, 3167), True, 'import numpy as np\n'), ((13110, 13134), 'numpy.ones', 'np.ones', (['self.graph_size'], {}), '(self.graph_size)\n', (13117, 13134), True, 'import numpy as np\n'), ((13202, 13226), 'numpy.ones', 'np.ones', (['self.graph_size'], {}), '(self.graph_size)\n', (13209, 13226), True, 'import numpy as np\n'), ((13486, 13511), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (13502, 13511), True, 'import numpy as np\n'), ((13323, 13347), 'numpy.ones', 'np.ones', (['self.graph_size'], {}), '(self.graph_size)\n', (13330, 13347), True, 'import numpy as np\n'), ((13417, 13441), 'numpy.ones', 'np.ones', (['self.graph_size'], {}), '(self.graph_size)\n', (13424, 13441), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: <NAME>
# Creation date: 2021-09-11 (year-month-day)
"""
Implements SGLD to sample from the input data distribution, and various buffers to
store the MCMC samples.
"""
import torch
import torch.nn as nn
from torch.distributions.bernoulli import Bernoulli as TorchBernoulli
from torch.distributions.uniform import Uniform as TorchUnif
import numpy as np
from sgld_nrg.networks import EnergyModel
def make_batch(iterable, n=1):
assert n > 0
length = len(iterable)
for ndx in range(0, length, n):
yield iterable[ndx : min(ndx + n, length)]
class SgldLogitEnergy(object):
def __init__(
self, net: EnergyModel, replay_buffer, sgld_lr=1.0, sgld_step=20, noise=0.01
):
assert isinstance(replay_buffer, RingReplayBuffer)
assert isinstance(sgld_lr, float) and sgld_lr > 0.0
assert isinstance(sgld_step, int) and sgld_step > 0
assert isinstance(noise, float) and noise > 0.0
assert noise <= sgld_lr
self.net = net
self.sgld_lr = sgld_lr
self.sgld_step = sgld_step
self.noise = noise
self.replay = replay_buffer
def get_energy(self, x_hat_):
return -1.0 * self.net.logsumexp_logits(x_hat_)
def get_grad(self, x_hat_):
# https://discuss.pytorch.org/t/newbie-getting-the-gradient-with-respect-to-the-input/12709/7
net_mode = self.net.training
self.net.eval()
x_hat_split = torch.tensor_split(x_hat_, x_hat_.size(0))
nrg_split = (self.get_energy(x) for x in x_hat_split)
grad_j = torch.autograd.grad(nrg_split, x_hat_split, create_graph=False)
x_hat_grad = torch.stack(grad_j, 0).squeeze(1)
self.net.training = net_mode
return x_hat_grad
def step(self, x_hat_):
x_hat_grad = self.get_grad(x_hat_)
gradient_step = self.sgld_lr * x_hat_grad
noise = self.noise * torch.randn(x_hat_.shape)
# this is gradient ASCENT because we want the samples to have high probability
x_hat_ = x_hat_ + gradient_step + noise
x_hat_[x_hat_ < min(self.replay.range)] = min(self.replay.range)
x_hat_[x_hat_ > max(self.replay.range)] = max(self.replay.range)
return x_hat_
def __call__(self, batch_size):
x_hat_ = self.replay.sample(batch_size)
x_hat_.requires_grad = True
for sgld_step_num in range(self.sgld_step):
x_hat_ = self.step(x_hat_)
x_hat_ = x_hat_.detach()
self.replay.append(x_hat_)
return x_hat_
@torch.no_grad()
def summarize(self, k=36, batch_size=100):
"""
Finds the best samples in the MCMC buffer and returns them.
:param k: positive int - selects the k samples with the largest logsumexp(logit)
:param batch_size: positive int - how many samples to push through the network at a time
:return: torch.Tensor
"""
assert k > 0 and isinstance(k, int)
assert batch_size > 0 and isinstance(batch_size, int)
scores = torch.zeros(self.replay.maxlen)
net_mode = self.net.training
self.net.eval()
for ndx in make_batch(range(self.replay.maxlen), batch_size):
batch = self.replay.buffer[ndx, ...]
scores[ndx] = self.net.logsumexp_logits(batch)
ndx_sort = torch.argsort(scores)
best = self.replay.buffer[ndx_sort[:k], ...]
worst = self.replay.buffer[ndx_sort[-k:], ...]
self.net.training = net_mode
return best, worst, scores
class RingReplayBuffer(object):
def __init__(self, data_shape, data_range, maxlen=10000, prob_reinitialize=0.05):
"""
The ReplayBuffer is a ring buffer.
:param data_shape: the shape of a sample
:param data_range: the upper and lower values of the uniform random noise
:param maxlen: number of samples to maintain
:param prob_reinitialize: probability that a chain is initialized with random
noise
"""
# TODO -- set this up to store & retrieve a label alongside the generated images
assert len(data_range) == 2
assert data_range[0] != data_range[1]
assert all(isinstance(s, int) for s in data_shape)
assert all(s > 0 for s in data_shape)
assert isinstance(maxlen, int) and maxlen > 0
assert 0.0 <= prob_reinitialize < 1.0
self.range = data_range
self.shape = data_shape
self.prob_reinit = prob_reinitialize
self.maxlen = maxlen
self.pointer = 0
self.buffer = self.initialize(maxlen)
def append(self, new):
into = torch.arange(0, new.size(0), dtype=torch.long) + self.pointer
into %= self.maxlen
self.buffer[into, ...] = new
self.pointer += new.size(0)
self.pointer %= self.maxlen
def sample(self, batch_size):
assert isinstance(batch_size, int)
assert batch_size < self.maxlen
ndx = torch.randint(self.maxlen, size=(batch_size,))
x = self.buffer[ndx, ...]
x = self.maybe_reinitialize(x=x)
return x
def maybe_reinitialize(self, x):
mask = TorchBernoulli(probs=self.prob_reinit).sample((x.size(0),))
x[mask > 0.5, ...] = self.initialize(int(mask.sum()))
return x
def initialize(self, n):
x = TorchUnif(min(self.range), max(self.range)).sample((n, *self.shape))
return x
class IndependentReplayBuffer(RingReplayBuffer):
def __init__(self, data_shape, data_range, maxlen=10000, prob_reinitialize=0.05):
"""
The other replay buffer ReplayBuffer refreshes elements on a circular cadence;
This buffer only samples from index i and replaces into index i -- in other
words, each chain evolves independently.
"""
super(IndependentReplayBuffer, self).__init__(
data_shape=data_shape,
data_range=data_range,
maxlen=maxlen,
prob_reinitialize=prob_reinitialize,
)
self.latest_ndx = None
def append(self, new):
if new.size(0) == self.latest_ndx.numel():
self.buffer[self.latest_ndx, ...] = new
else:
raise ValueError(
f"cannot append `new` with shape {new.size} using latest_ndx with shape {self.latest_ndx}"
)
def sample(self, batch_size):
assert isinstance(batch_size, int)
assert batch_size < self.maxlen
self.latest_ndx = self.rand_index(batch_size)
out = self.buffer[self.latest_ndx, ...]
out = self.maybe_reinitialize(out)
return out
def rand_index(self, batch_size):
return torch.randint(self.maxlen, size=(batch_size,))
class EpochIndependentReplayBuffer(IndependentReplayBuffer):
def __init__(self, data_shape, data_range, maxlen=10000, prob_reinitialize=0.05):
"""
The ReplayBuffer is a ring buffer. By contrast, this buffer only samples from
index i and replaces into index i -- in other words,
each chain evolves independently. Additionally, it samples without replacement
until the pool is depleted.
A side-effect to this is that it guarantees that the samples are "stale" as
the epoch proceeds, creating a sawtooth pattern in the energy for the MCMC
data. It's not clear that this is truly a downside, though.
"""
super(IndependentReplayBuffer, self).__init__(
data_shape=data_shape,
data_range=data_range,
maxlen=maxlen,
prob_reinitialize=prob_reinitialize,
)
self.remaining_ndx = np.arange(maxlen)
self.latest_ndx = None
def sample(self, batch_size):
assert isinstance(batch_size, int)
assert batch_size < self.maxlen
if self.remaining_ndx.size < batch_size:
self.remaining_ndx = np.arange(self.maxlen)
self.latest_ndx = self.rand_index(batch_size)
out = self.buffer[self.latest_ndx, ...]
out = self.maybe_reinitialize(out)
return out
def rand_index(self, batch_size):
ndx = np.random.choice(self.remaining_ndx, size=batch_size, replace=False)
self.remaining_ndx = np.setdiff1d(self.remaining_ndx, ndx, assume_unique=True)
return torch.LongTensor(ndx)
| [
"numpy.random.choice",
"torch.LongTensor",
"torch.stack",
"torch.distributions.bernoulli.Bernoulli",
"torch.randint",
"torch.argsort",
"torch.autograd.grad",
"numpy.setdiff1d",
"torch.no_grad",
"torch.zeros",
"numpy.arange",
"torch.randn"
] | [((2576, 2591), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2589, 2591), False, 'import torch\n'), ((1606, 1669), 'torch.autograd.grad', 'torch.autograd.grad', (['nrg_split', 'x_hat_split'], {'create_graph': '(False)'}), '(nrg_split, x_hat_split, create_graph=False)\n', (1625, 1669), False, 'import torch\n'), ((3070, 3101), 'torch.zeros', 'torch.zeros', (['self.replay.maxlen'], {}), '(self.replay.maxlen)\n', (3081, 3101), False, 'import torch\n'), ((3360, 3381), 'torch.argsort', 'torch.argsort', (['scores'], {}), '(scores)\n', (3373, 3381), False, 'import torch\n'), ((4997, 5043), 'torch.randint', 'torch.randint', (['self.maxlen'], {'size': '(batch_size,)'}), '(self.maxlen, size=(batch_size,))\n', (5010, 5043), False, 'import torch\n'), ((6711, 6757), 'torch.randint', 'torch.randint', (['self.maxlen'], {'size': '(batch_size,)'}), '(self.maxlen, size=(batch_size,))\n', (6724, 6757), False, 'import torch\n'), ((7677, 7694), 'numpy.arange', 'np.arange', (['maxlen'], {}), '(maxlen)\n', (7686, 7694), True, 'import numpy as np\n'), ((8166, 8234), 'numpy.random.choice', 'np.random.choice', (['self.remaining_ndx'], {'size': 'batch_size', 'replace': '(False)'}), '(self.remaining_ndx, size=batch_size, replace=False)\n', (8182, 8234), True, 'import numpy as np\n'), ((8264, 8321), 'numpy.setdiff1d', 'np.setdiff1d', (['self.remaining_ndx', 'ndx'], {'assume_unique': '(True)'}), '(self.remaining_ndx, ndx, assume_unique=True)\n', (8276, 8321), True, 'import numpy as np\n'), ((8337, 8358), 'torch.LongTensor', 'torch.LongTensor', (['ndx'], {}), '(ndx)\n', (8353, 8358), False, 'import torch\n'), ((1939, 1964), 'torch.randn', 'torch.randn', (['x_hat_.shape'], {}), '(x_hat_.shape)\n', (1950, 1964), False, 'import torch\n'), ((7926, 7948), 'numpy.arange', 'np.arange', (['self.maxlen'], {}), '(self.maxlen)\n', (7935, 7948), True, 'import numpy as np\n'), ((1691, 1713), 'torch.stack', 'torch.stack', (['grad_j', '(0)'], {}), '(grad_j, 0)\n', (1702, 1713), False, 'import torch\n'), ((5189, 5227), 'torch.distributions.bernoulli.Bernoulli', 'TorchBernoulli', ([], {'probs': 'self.prob_reinit'}), '(probs=self.prob_reinit)\n', (5203, 5227), True, 'from torch.distributions.bernoulli import Bernoulli as TorchBernoulli\n')] |
"""
@author: <NAME>
@title: Third Activity - Binarization
"""
import skimage
from skimage.color import rgb2gray
from skimage import data
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.size'] = 18
import numpy as np
def Invert(image):
grayim = rgb2gray(image)
a, b = np.shape(grayim)
inverted = np.empty([a, b])
for k in range(a):
for i in range (b):
inverted[k,i] = 255 - grayim[k,i]
return inverted
def binarization(image, middle):
a, b = np.shape(image)
binarized = np.empty([a, b])
for k in range(a):
for i in range (b):
if (image[k,i]>=middle):
binarized[k,i] = 1
else:
binarized[k,i]=0
return binarized
image = data.chelsea()
plt.figure()
plt.imshow(image)
plt.show()
invertedimage = Invert(image)
plt.figure()
plt.imshow(invertedimage, cmap="gray")
plt.show()
value =0.5
binimage = binarization( rgb2gray(image),value)
plt.figure()
plt.imshow(binimage, cmap="gray")
plt.show()
| [
"matplotlib.pyplot.imshow",
"skimage.color.rgb2gray",
"skimage.data.chelsea",
"matplotlib.pyplot.figure",
"numpy.empty",
"numpy.shape",
"matplotlib.pyplot.show"
] | [((788, 802), 'skimage.data.chelsea', 'data.chelsea', ([], {}), '()\n', (800, 802), False, 'from skimage import data\n'), ((803, 815), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (813, 815), True, 'import matplotlib.pyplot as plt\n'), ((816, 833), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (826, 833), True, 'import matplotlib.pyplot as plt\n'), ((834, 844), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (842, 844), True, 'import matplotlib.pyplot as plt\n'), ((877, 889), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (887, 889), True, 'import matplotlib.pyplot as plt\n'), ((890, 928), 'matplotlib.pyplot.imshow', 'plt.imshow', (['invertedimage'], {'cmap': '"""gray"""'}), "(invertedimage, cmap='gray')\n", (900, 928), True, 'import matplotlib.pyplot as plt\n'), ((929, 939), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (937, 939), True, 'import matplotlib.pyplot as plt\n'), ((1001, 1013), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1011, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1047), 'matplotlib.pyplot.imshow', 'plt.imshow', (['binimage'], {'cmap': '"""gray"""'}), "(binimage, cmap='gray')\n", (1024, 1047), True, 'import matplotlib.pyplot as plt\n'), ((1048, 1058), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1056, 1058), True, 'import matplotlib.pyplot as plt\n'), ((284, 299), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (292, 299), False, 'from skimage.color import rgb2gray\n'), ((311, 327), 'numpy.shape', 'np.shape', (['grayim'], {}), '(grayim)\n', (319, 327), True, 'import numpy as np\n'), ((343, 359), 'numpy.empty', 'np.empty', (['[a, b]'], {}), '([a, b])\n', (351, 359), True, 'import numpy as np\n'), ((522, 537), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (530, 537), True, 'import numpy as np\n'), ((554, 570), 'numpy.empty', 'np.empty', (['[a, b]'], {}), '([a, b])\n', (562, 570), True, 'import numpy as np\n'), ((977, 992), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (985, 992), False, 'from skimage.color import rgb2gray\n')] |
import sys
import os
# Root directory of the project
ROOT_DIR = os.getcwd()
print(f"{ROOT_DIR}")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import visualize
from mrcnn import model as modellib
from mrcnn import utils
from mrcnn.config import Config
from mrcnn.model import log
import skimage.io
import numpy as np
import random
import matplotlib.pyplot as plt
"""
Mask R-CNN
Train on the nuclei segmentation dataset from the
Kaggle 2018 Data Science Bowl
https://www.kaggle.com/c/data-science-bowl-2018/
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from ImageNet weights
python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=imagenet
# Train a new model starting from specific weights file
python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=/path/to/weights.h5
# Resume training a model that you had trained earlier
python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=last
# Generate submission file
python3 nucleus.py detect --dataset=/path/to/dataset --subset=train --weights=<last or /path/to/weights.h5>
"""
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Results directory
# Save submission files here
RESULTS_DIR = os.path.join(ROOT_DIR, "results/IVD/")
# The dataset doesn't have a standard train/val split, so I picked
# a variety of images to surve as a validation set.
VAL_IMAGE_IDS = [
"0080",
"0081",
"0082",
"0083",
"0084",
"0085",
"0086",
"0087",
"0088",
"0089",
"0090",
"0091",
"0092",
"0093",
"0094",
"0095",
"0096",
"0097",
"0098",
"0099"
]
############################################################
# Dataset
############################################################
class IVDDataset(utils.Dataset):
def load_IVD(self, subset):
"""Load a subset of the nuclei dataset.
dataset_dir: Root directory of the dataset
subset: Subset to load. Either the name of the sub-directory,
such as stage1_train, stage1_test, ...etc. or, one of:
* train: stage1_train excluding validation images
* val: validation images from VAL_IMAGE_IDS
"""
# Add classes. We have one class.
# Naming the dataset nucleus, and the class nucleus
self.add_class("IVD", 1, "IVD")
self.dataset_dir = os.path.join(ROOT_DIR, 'datasets/IVD')
# Which subset?
# "val": use hard-coded list above
# "train": use data from stage1_train minus the hard-coded list above
# else: use the data from the specified sub-directory
assert subset in ["train", "val"]
# subset_dir = "stage1_train" if subset in ["train", "val"] else subset
# dataset_dir = os.path.join(dataset_dir, subset_dir)
image_ids = next(os.walk(self.dataset_dir))[1]
if subset == "val":
image_ids = VAL_IMAGE_IDS
else:
# Get image ids from directory names
image_ids = list(set(image_ids) - set(VAL_IMAGE_IDS))
# Add images
for image_id in image_ids:
self.add_image(
"IVD",
image_id=image_id,
path=os.path.join(self.dataset_dir, image_id, "input.png"))
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
info = self.image_info[image_id]
# Get mask directory from image path
mask_dir = os.path.join(self.dataset_dir, info['id'])
# Read mask files from .png image
mask = []
for f in next(os.walk(mask_dir))[2]:
if f.startswith('mask_') and f.endswith(".png"):
m = skimage.io.imread(os.path.join(
mask_dir, f)).astype(np.bool)
mask.append(m)
mask = np.stack(mask, axis=-1)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID, we return an array of ones
return mask, np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "IVD":
return info["id"]
else:
super(self.__class__, self).image_reference(image_id)
dataset_train = IVDDataset()
dataset_train.load_IVD('train')
dataset_train.prepare()
dataset_val = IVDDataset()
dataset_val.load_IVD("val")
dataset_val.prepare()
############################################################
# Configuration
############################################################
class IVDConfig(Config):
"""Configuration for training on the nucleus segmentation dataset."""
# Give the configuration a recognizable name
NAME = "IVD"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + vertebral
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
class IVDInferenceConfig(IVDConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
############################################################
# Training
############################################################
def train(model):
"""Train the model."""
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=30,
layers='heads')
############################################################
# Detection
############################################################
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
def detect(model):
"""Run detection on images in the given directory."""
# Recreate the model in inference mode
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()
# Load trained weights
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
dataset_val = IVDDataset()
dataset_val.load_IVD("val")
dataset_val.prepare()
# Test on a random image
image_id = random.choice(dataset_val.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
info = dataset_val.image_info[image_id]
log(f'image ID: {info["source"]}.{ info["id"]} \
({image_id}){dataset_val.image_reference(image_id)}')
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset_train.class_names, figsize=(8, 8))
results = model.detect([original_image], verbose=1)
r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset_val.class_names, r['scores'], ax=get_ax())
############################################################
# Command Line
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect vertebrals.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'test'")
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=MODEL_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path to image",
help='Image to apply the color splash effect on')
args = parser.parse_args()
print("Weights: ", args.weights)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = IVDConfig()
else:
config = IVDInferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training",
config=config,
model_dir=MODEL_DIR)
else:
inference_config = IVDInferenceConfig()
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "detect":
detect(model)
else:
print("'{}' is not recognized. "
"Use 'train' or 'splash'".format(args.command))
| [
"mrcnn.model.MaskRCNN",
"os.path.exists",
"random.choice",
"numpy.ones",
"argparse.ArgumentParser",
"mrcnn.utils.download_trained_weights",
"mrcnn.model.load_image_gt",
"os.path.join",
"os.getcwd",
"numpy.stack",
"mrcnn.visualize.display_instances",
"mrcnn.model.log",
"sys.path.append",
"m... | [((65, 76), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (74, 76), False, 'import os\n'), ((117, 142), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (132, 142), False, 'import sys\n'), ((1460, 1503), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_coco.h5"""'], {}), "(ROOT_DIR, 'mask_rcnn_coco.h5')\n", (1472, 1503), False, 'import os\n'), ((1624, 1654), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (1636, 1654), False, 'import os\n'), ((1719, 1757), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""results/IVD/"""'], {}), "(ROOT_DIR, 'results/IVD/')\n", (1731, 1757), False, 'import os\n'), ((6985, 7045), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': '(size * cols, size * rows)'}), '(rows, cols, figsize=(size * cols, size * rows))\n', (6997, 7045), True, 'import matplotlib.pyplot as plt\n'), ((7631, 7667), 'random.choice', 'random.choice', (['dataset_val.image_ids'], {}), '(dataset_val.image_ids)\n', (7644, 7667), False, 'import random\n'), ((7741, 7829), 'mrcnn.model.load_image_gt', 'modellib.load_image_gt', (['dataset_val', 'inference_config', 'image_id'], {'use_mini_mask': '(False)'}), '(dataset_val, inference_config, image_id,\n use_mini_mask=False)\n', (7763, 7829), True, 'from mrcnn import model as modellib\n'), ((8021, 8058), 'mrcnn.model.log', 'log', (['"""original_image"""', 'original_image'], {}), "('original_image', original_image)\n", (8024, 8058), False, 'from mrcnn.model import log\n'), ((8063, 8092), 'mrcnn.model.log', 'log', (['"""image_meta"""', 'image_meta'], {}), "('image_meta', image_meta)\n", (8066, 8092), False, 'from mrcnn.model import log\n'), ((8097, 8128), 'mrcnn.model.log', 'log', (['"""gt_class_id"""', 'gt_class_id'], {}), "('gt_class_id', gt_class_id)\n", (8100, 8128), False, 'from mrcnn.model import log\n'), ((8133, 8156), 'mrcnn.model.log', 'log', (['"""gt_bbox"""', 'gt_bbox'], {}), "('gt_bbox', gt_bbox)\n", (8136, 8156), False, 'from mrcnn.model import log\n'), ((8161, 8184), 'mrcnn.model.log', 'log', (['"""gt_mask"""', 'gt_mask'], {}), "('gt_mask', gt_mask)\n", (8164, 8184), False, 'from mrcnn.model import log\n'), ((8190, 8311), 'mrcnn.visualize.display_instances', 'visualize.display_instances', (['original_image', 'gt_bbox', 'gt_mask', 'gt_class_id', 'dataset_train.class_names'], {'figsize': '(8, 8)'}), '(original_image, gt_bbox, gt_mask, gt_class_id,\n dataset_train.class_names, figsize=(8, 8))\n', (8217, 8311), False, 'from mrcnn import visualize\n'), ((8821, 8898), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Mask R-CNN to detect vertebrals."""'}), "(description='Train Mask R-CNN to detect vertebrals.')\n", (8844, 8898), False, 'import argparse\n'), ((2886, 2924), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""datasets/IVD"""'], {}), "(ROOT_DIR, 'datasets/IVD')\n", (2898, 2924), False, 'import os\n'), ((4181, 4223), 'os.path.join', 'os.path.join', (['self.dataset_dir', "info['id']"], {}), "(self.dataset_dir, info['id'])\n", (4193, 4223), False, 'import os\n'), ((4538, 4561), 'numpy.stack', 'np.stack', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (4546, 4561), True, 'import numpy as np\n'), ((9928, 9998), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""training"""', 'config': 'config', 'model_dir': 'MODEL_DIR'}), "(mode='training', config=config, model_dir=MODEL_DIR)\n", (9945, 9998), True, 'from mrcnn import model as modellib\n'), ((10141, 10227), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'inference_config', 'model_dir': 'MODEL_DIR'}), "(mode='inference', config=inference_config, model_dir=\n MODEL_DIR)\n", (10158, 10227), True, 'from mrcnn import model as modellib\n'), ((4712, 4753), 'numpy.ones', 'np.ones', (['[mask.shape[-1]]'], {'dtype': 'np.int32'}), '([mask.shape[-1]], dtype=np.int32)\n', (4719, 4753), True, 'import numpy as np\n'), ((10453, 10481), 'os.path.exists', 'os.path.exists', (['weights_path'], {}), '(weights_path)\n', (10467, 10481), False, 'import os\n'), ((10495, 10539), 'mrcnn.utils.download_trained_weights', 'utils.download_trained_weights', (['weights_path'], {}), '(weights_path)\n', (10525, 10539), False, 'from mrcnn import utils\n'), ((3344, 3369), 'os.walk', 'os.walk', (['self.dataset_dir'], {}), '(self.dataset_dir)\n', (3351, 3369), False, 'import os\n'), ((4306, 4323), 'os.walk', 'os.walk', (['mask_dir'], {}), '(mask_dir)\n', (4313, 4323), False, 'import os\n'), ((3733, 3786), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'image_id', '"""input.png"""'], {}), "(self.dataset_dir, image_id, 'input.png')\n", (3745, 3786), False, 'import os\n'), ((4428, 4453), 'os.path.join', 'os.path.join', (['mask_dir', 'f'], {}), '(mask_dir, f)\n', (4440, 4453), False, 'import os\n')] |
from sklearn import datasets
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
iris = datasets.load_iris()
x = pd.DataFrame(iris.data)
x.columns = ['Sepal_Length','Sepal_Width','Petal_Length','Petal_Width']
y = pd.DataFrame(iris.target)
y.columns = ['Targets']
plt.figure(figsize=(14,7))
colormap = np.array(['red','lime','black'])
plt.subplot(1,2,1)
plt.scatter(x.Sepal_Length, x.Sepal_Width, c = colormap[y.Targets], s=40)
plt.title('Sepal')
plt.subplot(1,2,2)
plt.scatter(x.Petal_Length, x.Petal_Width, c = colormap[y.Targets], s=40)
plt.title('Petal')
model = KMeans(n_clusters = 3)
model.fit(x)
plt.figure(figsize=(14,7))
colormap = np.array(['red','lime','black'])
plt.subplot(1,2,1)
plt.scatter(x.Petal_Length, x.Petal_Width, c = colormap[y.Targets], s=40)
plt.title('Real')
plt.subplot(1,2,2)
plt.scatter(x.Petal_Length, x.Petal_Width, c = colormap[model.labels_], s=40)
plt.title('kmeans')
print("accuracy_score", accuracy_score(y.Targets, model.labels_))
print("confusion_matrix\n", confusion_matrix(y.Targets,model.labels_))
| [
"sklearn.datasets.load_iris",
"sklearn.cluster.KMeans",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusion_matrix"
] | [((232, 252), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (250, 252), False, 'from sklearn import datasets\n'), ((258, 281), 'pandas.DataFrame', 'pd.DataFrame', (['iris.data'], {}), '(iris.data)\n', (270, 281), True, 'import pandas as pd\n'), ((359, 384), 'pandas.DataFrame', 'pd.DataFrame', (['iris.target'], {}), '(iris.target)\n', (371, 384), True, 'import pandas as pd\n'), ((410, 437), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 7)'}), '(figsize=(14, 7))\n', (420, 437), True, 'import matplotlib.pyplot as plt\n'), ((448, 482), 'numpy.array', 'np.array', (["['red', 'lime', 'black']"], {}), "(['red', 'lime', 'black'])\n", (456, 482), True, 'import numpy as np\n'), ((481, 501), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (492, 501), True, 'import matplotlib.pyplot as plt\n'), ((500, 571), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x.Sepal_Length', 'x.Sepal_Width'], {'c': 'colormap[y.Targets]', 's': '(40)'}), '(x.Sepal_Length, x.Sepal_Width, c=colormap[y.Targets], s=40)\n', (511, 571), True, 'import matplotlib.pyplot as plt\n'), ((575, 593), 'matplotlib.pyplot.title', 'plt.title', (['"""Sepal"""'], {}), "('Sepal')\n", (584, 593), True, 'import matplotlib.pyplot as plt\n'), ((594, 614), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (605, 614), True, 'import matplotlib.pyplot as plt\n'), ((613, 684), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x.Petal_Length', 'x.Petal_Width'], {'c': 'colormap[y.Targets]', 's': '(40)'}), '(x.Petal_Length, x.Petal_Width, c=colormap[y.Targets], s=40)\n', (624, 684), True, 'import matplotlib.pyplot as plt\n'), ((687, 705), 'matplotlib.pyplot.title', 'plt.title', (['"""Petal"""'], {}), "('Petal')\n", (696, 705), True, 'import matplotlib.pyplot as plt\n'), ((715, 735), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(3)'}), '(n_clusters=3)\n', (721, 735), False, 'from sklearn.cluster import KMeans\n'), ((752, 779), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 7)'}), '(figsize=(14, 7))\n', (762, 779), True, 'import matplotlib.pyplot as plt\n'), ((790, 824), 'numpy.array', 'np.array', (["['red', 'lime', 'black']"], {}), "(['red', 'lime', 'black'])\n", (798, 824), True, 'import numpy as np\n'), ((823, 843), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (834, 843), True, 'import matplotlib.pyplot as plt\n'), ((842, 913), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x.Petal_Length', 'x.Petal_Width'], {'c': 'colormap[y.Targets]', 's': '(40)'}), '(x.Petal_Length, x.Petal_Width, c=colormap[y.Targets], s=40)\n', (853, 913), True, 'import matplotlib.pyplot as plt\n'), ((916, 933), 'matplotlib.pyplot.title', 'plt.title', (['"""Real"""'], {}), "('Real')\n", (925, 933), True, 'import matplotlib.pyplot as plt\n'), ((934, 954), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (945, 954), True, 'import matplotlib.pyplot as plt\n'), ((953, 1028), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x.Petal_Length', 'x.Petal_Width'], {'c': 'colormap[model.labels_]', 's': '(40)'}), '(x.Petal_Length, x.Petal_Width, c=colormap[model.labels_], s=40)\n', (964, 1028), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1050), 'matplotlib.pyplot.title', 'plt.title', (['"""kmeans"""'], {}), "('kmeans')\n", (1040, 1050), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1116), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y.Targets', 'model.labels_'], {}), '(y.Targets, model.labels_)\n', (1090, 1116), False, 'from sklearn.metrics import accuracy_score\n'), ((1146, 1188), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y.Targets', 'model.labels_'], {}), '(y.Targets, model.labels_)\n', (1162, 1188), False, 'from sklearn.metrics import confusion_matrix\n')] |
import numpy as np
from tkinter import filedialog
import sys
import os
import matplotlib.pyplot as plt
import pyproj
import math
import datetime
#------------------------------------------------------------------------------
# READ DELFT GRID FILE
class grd():
"""Orthogonal curvilinear grid file. See A.3.2 in Delft3D-FLOW user manual.
Attributes:
m number of rows, derived from the file header
n number of columns, derived from the file header
x the mxn array of x coordinates
y the mxn array of y coordinates
coordinate_system the coordinate system labeled in the header (cartesian or spherical)
filename the full path of the .grd file
"""
def __init__(self, fname=None):
x = ''
y = ''
m = ''
n = ''
coordinate_system = ''
filename = ''
#self.read_grd(fname)
def read_grd(self, fname=None, nan_value = -999.):
'''Read a Delft3d Grid file. If fname not specified, opens a file dialog. Some grids have
a nan value specified in header, default nan value is 0... Can add functionality in this
script to see if that is specified in the header or not.'''
# Set filename via GUI if it's not specified.
if not fname:
fname = filedialog.askopenfilename()
else:
fname = fname
f = open(fname, 'r')
# Read the header
header = []
while True:
line = f.readline()
if 'ETA' in line:
break
else:
header.append(line)
header = np.array(header)
coordinate_system = [h for h in header if 'Coordinate System' in h][0].rstrip('\n').split('=')[1]
shape = header[np.where(header == [h for h in header if 'Coordinate System' in h][0])[0][0]+1].split()
m, n = int(shape[1]), int(shape[0])
# Read the coordinates
coordinates = f.readlines()
coordinates.insert(0,line) # inserts the first line of coordinates ('line')
x = [] # generate an array of x coordinates
for line in coordinates[0:int(np.size(coordinates)/2)]:
if 'ETA=' in line:
coords = line.split()[2:]
for i in coords:
x.append(float(i))
else:
coords = line.split()
for i in coords:
x.append(float(i))
y = [] # generate an array of y coordinates
for line in coordinates[int(np.size(coordinates) / 2):]:
if 'ETA=' in line:
coords = line.split()[2:]
for i in coords:
y.append(float(i))
else:
coords = line.split()
for i in coords:
y.append(float(i))
# reshape x and y to reflect the rows columns in the header
x, y = np.reshape(x, (m, n)), np.reshape(y, (m, n))
# mask nan
x, y = np.ma.masked_equal(x, nan_value), np.ma.masked_equal(y, nan_value)
# update the class
self.x = x
self.y = y
self.m = m
self.n = n
self.coordinate_system = coordinate_system
self.filename = fname
fname = None
f.close()
def write_rectgrd(self, fname=None, coord_system = 'Cartesian', m = None,
n = None, cellsize = None, x0 = None, y0 = None, depth = None):
if not depth:
if not m:
m = int(input('number of rows: '))
if not n:
n = int(input('number of columns: '))
else:
depth = np.array(depth)
m = depth.shape[0]
n = depth.shape[1]
if not cellsize:
cellsize = float(input('cellsize: '))
if x0 is None:
x0 = float(input('xllcorner: '))
if y0 is None:
y0 = float(input('yllcorner: '))
records = []
rows = math.floor(m/5)
remainder = m%5
records.append('* \n* Delft3d- rectilinear grid file created with pydelft \n* File creation date: %s\n* \n'
%str(datetime.datetime.now()))
records.append('Coordinate System = %s\n' % coord_system)
records.append('\t%i\t%i\n' % (m,n))
records.append('0 0 0\n')
# Values for x
for i in range(1, n+1):
etax = np.arange(x0, m*cellsize+x0, cellsize)
if np.size(etax) > 5:
etax_1 = etax[0:5]
records.append('ETA=\t%i\t%.20e\t%.20e\t%.20e\t%.20e\t%.20e\n' % (i,
etax_1[0],
etax_1[1],
etax_1[2],
etax_1[3],
etax_1[4]))
if remainder != 0:
etax_mid = etax[5:-remainder]
etax_last = etax[-remainder:]
elif remainder == 0:
etax_mid = etax[5:-5]
etax_last = etax[-5:]
for j in np.arange(0,np.size(etax_mid), 5):
records.append(' \t\t%.20e\t%.20e\t%.20e\t%.20e\t%.20e\n' % (etax_mid[j],
etax_mid[j+1],
etax_mid[j+2],
etax_mid[j+3],
etax_mid[j+4]))
records.append(' \t\t%.20e' % etax_last[0])
for j in etax_last[1:]:
records.append('\t%.20e' % j)
records.append('\n')
elif np.size(etax) <= 5:
etax_1 = etax
records.append('ETA=\t%i' % i)
for j in etax_1:
records.append('\t%.20e' % j)
records.append('\n')
# Values for y
y = np.arange(y0, y0+m*cellsize, cellsize)
for i in range(1,n+1):
etay = np.ones((m,1))*y[i-1]
if np.size(etay) > 5:
etay_1 = etay[0:5]
records.append('ETA=\t%i\t%.20e\t%.20e\t%.20e\t%.20e\t%.20e\n' %(i,
etay_1[0], etay_1[1],
etay_1[2], etay_1[3],
etay_1[4]))
if remainder != 0:
etay_mid = etay[5:-remainder]
etay_last = etay[-remainder:]
elif remainder == 0:
etay_mid = etay[5:-5]
etay_last = etay[-5:]
for j in np.arange(0,np.size(etay_mid), 5):
records.append(' \t\t%.20e\t%.20e\t%.20e\t%.20e\t%.20e\n' % (etay_mid[j],
etay_mid[j+1],
etay_mid[j+2],
etay_mid[j+3],
etay_mid[j+4]))
records.append(' \t\t%.20e' % etay_last[0])
for j in etay_last[1:]:
records.append('\t%.20e' %j)
records.append('\n')
elif np.size(etay) <= 5:
etay = np.ones((m,1))*y[i-1]
etay_1 = etay
records.append('ETA=\t%i' % i)
for j in etay_1:
records.append('\t%.20e' % j)
records.append('\n')
# Set filename via GUI if it's not specified.
if not fname:
fname = filedialog.asksaveasfile()
else:
fname = fname
f = open(fname, 'w')
for r in records:
f.write(r)
f.close()
self.read_grd(fname = fname)
def plot_grd(self):
plt.pcolormesh(self.x,self.y,np.zeros(np.shape(self.x)),
edgecolor = 'k', facecolor = 'none', linewidth = 0.0005)
plt.axis('equal')
plt.show()
def get_mn(self,x,y, grid_epsg = 4326, query_epsg = 4326):
def find_nearest(grid, query):
m = np.unravel_index(np.abs(grid.y-query[1]).argmin(), np.shape(grid.y))[0]
n = np.unravel_index(np.abs(grid.x-query[0]).argmin(), np.shape(grid.x))[1]
return [m,n]
grid_proj = pyproj.Proj("+init=EPSG:%i" % grid_epsg)
query_proj = pyproj.Proj("+init=EPSG:%i" % query_epsg)
if grid_epsg != query_epsg:
x,y = pyproj.transform(query_proj, grid_proj, x, y)
idx = [find_nearest(self,[x[i],y[i]]) for i in range(0, np.size(x)-1)]
return idx
| [
"numpy.abs",
"numpy.ma.masked_equal",
"numpy.reshape",
"numpy.ones",
"math.floor",
"numpy.where",
"tkinter.filedialog.asksaveasfile",
"numpy.size",
"pyproj.transform",
"numpy.array",
"datetime.datetime.now",
"pyproj.Proj",
"tkinter.filedialog.askopenfilename",
"matplotlib.pyplot.axis",
"... | [((1635, 1651), 'numpy.array', 'np.array', (['header'], {}), '(header)\n', (1643, 1651), True, 'import numpy as np\n'), ((4007, 4024), 'math.floor', 'math.floor', (['(m / 5)'], {}), '(m / 5)\n', (4017, 4024), False, 'import math\n'), ((6371, 6413), 'numpy.arange', 'np.arange', (['y0', '(y0 + m * cellsize)', 'cellsize'], {}), '(y0, y0 + m * cellsize, cellsize)\n', (6380, 6413), True, 'import numpy as np\n'), ((8679, 8696), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (8687, 8696), True, 'import matplotlib.pyplot as plt\n'), ((8705, 8715), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8713, 8715), True, 'import matplotlib.pyplot as plt\n'), ((9041, 9081), 'pyproj.Proj', 'pyproj.Proj', (["('+init=EPSG:%i' % grid_epsg)"], {}), "('+init=EPSG:%i' % grid_epsg)\n", (9052, 9081), False, 'import pyproj\n'), ((9103, 9144), 'pyproj.Proj', 'pyproj.Proj', (["('+init=EPSG:%i' % query_epsg)"], {}), "('+init=EPSG:%i' % query_epsg)\n", (9114, 9144), False, 'import pyproj\n'), ((1314, 1342), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (1340, 1342), False, 'from tkinter import filedialog\n'), ((2931, 2952), 'numpy.reshape', 'np.reshape', (['x', '(m, n)'], {}), '(x, (m, n))\n', (2941, 2952), True, 'import numpy as np\n'), ((2954, 2975), 'numpy.reshape', 'np.reshape', (['y', '(m, n)'], {}), '(y, (m, n))\n', (2964, 2975), True, 'import numpy as np\n'), ((3010, 3042), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['x', 'nan_value'], {}), '(x, nan_value)\n', (3028, 3042), True, 'import numpy as np\n'), ((3044, 3076), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['y', 'nan_value'], {}), '(y, nan_value)\n', (3062, 3076), True, 'import numpy as np\n'), ((3669, 3684), 'numpy.array', 'np.array', (['depth'], {}), '(depth)\n', (3677, 3684), True, 'import numpy as np\n'), ((4438, 4480), 'numpy.arange', 'np.arange', (['x0', '(m * cellsize + x0)', 'cellsize'], {}), '(x0, m * cellsize + x0, cellsize)\n', (4447, 4480), True, 'import numpy as np\n'), ((8298, 8324), 'tkinter.filedialog.asksaveasfile', 'filedialog.asksaveasfile', ([], {}), '()\n', (8322, 8324), False, 'from tkinter import filedialog\n'), ((9200, 9245), 'pyproj.transform', 'pyproj.transform', (['query_proj', 'grid_proj', 'x', 'y'], {}), '(query_proj, grid_proj, x, y)\n', (9216, 9245), False, 'import pyproj\n'), ((4492, 4505), 'numpy.size', 'np.size', (['etax'], {}), '(etax)\n', (4499, 4505), True, 'import numpy as np\n'), ((6460, 6475), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (6467, 6475), True, 'import numpy as np\n'), ((6497, 6510), 'numpy.size', 'np.size', (['etay'], {}), '(etay)\n', (6504, 6510), True, 'import numpy as np\n'), ((8572, 8588), 'numpy.shape', 'np.shape', (['self.x'], {}), '(self.x)\n', (8580, 8588), True, 'import numpy as np\n'), ((4192, 4215), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4213, 4215), False, 'import datetime\n'), ((5390, 5407), 'numpy.size', 'np.size', (['etax_mid'], {}), '(etax_mid)\n', (5397, 5407), True, 'import numpy as np\n'), ((6118, 6131), 'numpy.size', 'np.size', (['etax'], {}), '(etax)\n', (6125, 6131), True, 'import numpy as np\n'), ((7212, 7229), 'numpy.size', 'np.size', (['etay_mid'], {}), '(etay_mid)\n', (7219, 7229), True, 'import numpy as np\n'), ((7939, 7952), 'numpy.size', 'np.size', (['etay'], {}), '(etay)\n', (7946, 7952), True, 'import numpy as np\n'), ((8886, 8902), 'numpy.shape', 'np.shape', (['grid.y'], {}), '(grid.y)\n', (8894, 8902), True, 'import numpy as np\n'), ((8974, 8990), 'numpy.shape', 'np.shape', (['grid.x'], {}), '(grid.x)\n', (8982, 8990), True, 'import numpy as np\n'), ((2156, 2176), 'numpy.size', 'np.size', (['coordinates'], {}), '(coordinates)\n', (2163, 2176), True, 'import numpy as np\n'), ((2545, 2565), 'numpy.size', 'np.size', (['coordinates'], {}), '(coordinates)\n', (2552, 2565), True, 'import numpy as np\n'), ((7982, 7997), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (7989, 7997), True, 'import numpy as np\n'), ((9311, 9321), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (9318, 9321), True, 'import numpy as np\n'), ((8852, 8877), 'numpy.abs', 'np.abs', (['(grid.y - query[1])'], {}), '(grid.y - query[1])\n', (8858, 8877), True, 'import numpy as np\n'), ((8940, 8965), 'numpy.abs', 'np.abs', (['(grid.x - query[0])'], {}), '(grid.x - query[0])\n', (8946, 8965), True, 'import numpy as np\n'), ((1781, 1851), 'numpy.where', 'np.where', (["(header == [h for h in header if 'Coordinate System' in h][0])"], {}), "(header == [h for h in header if 'Coordinate System' in h][0])\n", (1789, 1851), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpointing utilities for the ViT experiments.
Several functions in this file were ported from
https://github.com/google-research/vision_transformer.
"""
import collections
import dataclasses
import io
from absl import logging
import flax
import jax
import jax.numpy as jnp
import numpy as np
import scipy
from tensorflow.io import gfile
def _convert_and_recover_bfloat16(x):
"""Converts to JAX arrays, while correctly loading any bfloat16 arrays."""
if hasattr(x, "dtype") and x.dtype.type is np.void:
assert x.itemsize == 2, "Unknown dtype!"
return jnp.array(x.view(jnp.bfloat16))
else:
return jnp.array(x)
def _recover_tree(keys, values):
"""Recovers a tree as a nested dict from flat names and values.
This function is useful to analyze checkpoints that are without need to access
the exact source code of the experiment. In particular, it can be used to
extract an reuse various subtrees of the scheckpoint, e.g. subtree of
parameters.
Args:
keys: A list of keys, where "/" is used as separator between nodes.
values: A list of leaf values.
Returns:
A JAX pytree whose structure was recovered from the naming of the keys.
"""
tree = {}
sub_trees = collections.defaultdict(list)
for k, v in zip(keys, values):
if "/" not in k:
tree[k] = v
else:
k_left, k_right = k.split("/", 1)
sub_trees[k_left].append((k_right, v))
for k, kv_pairs in sub_trees.items():
k_subtree, v_subtree = zip(*kv_pairs)
tree[k] = _recover_tree(k_subtree, v_subtree)
return tree
def load_checkpoint(tree, path):
"""Loads JAX pytrees that were stored on disk in a NumPy `.npz` file.
Args:
tree: Optional JAX pytree to be restored. If None, then the tree will be
recovered from the naming scheme used within the checkpoint.
path: A path to the checkpoint.
Returns:
A JAX pytree with the same structure as `tree`, but with the leaf values
restored from the saved checkpoint.
"""
with gfile.GFile(path, "rb") as f:
data = f.read()
keys, values = zip(
*list(np.load(io.BytesIO(data), allow_pickle=False).items()))
# NOTE: NumPy loses any bfloat16 dtypes when saving, so we recover them here.
values = jax.tree_util.tree_map(_convert_and_recover_bfloat16, values)
if tree:
treedef = jax.tree_util.tree_structure(tree)
tree = jax.tree_util.tree_unflatten(treedef, values)
else:
tree = _recover_tree(keys, values)
return tree
def _traverse_with_names(tree):
"""Traverses nested dicts/dataclasses and emits (leaf_name, leaf_val)."""
if dataclasses.is_dataclass(tree):
tree = flax.serialization.to_state_dict(tree)
if isinstance(tree, dict) or isinstance(tree, flax.core.FrozenDict):
keys = sorted(tree.keys())
for key in keys:
for path, v in _traverse_with_names(tree[key]):
yield (key + "/" + path).rstrip("/"), v
else:
yield "", tree
def _tree_flatten_with_names(tree):
"""Populates tree_flatten with leaf names.
This function populates output of tree_flatten with leaf names, using a
custom traversal that produces names is provided. The custom traversal does
NOT have to traverse tree in the same order as jax, as we take care of
automatically aligning jax" and custom traversals.
Args:
tree: python tree.
Returns:
A list of values with names: [(name, value), ...]
"""
vals, tree_def = jax.tree_flatten(tree)
# "Fake" token tree that is use to track jax internal tree traversal and
# adjust our custom tree traversal to be compatible with it.
tokens = range(len(vals))
token_tree = tree_def.unflatten(tokens)
val_names, perm = zip(*_traverse_with_names(token_tree))
inv_perm = np.argsort(perm)
# Custom traversal should visit the same number of leaves.
assert len(val_names) == len(vals)
return [(val_names[i], v) for i, v in zip(inv_perm, vals)], tree_def
def save_checkpoint(tree, path, step_for_copy=None):
"""Saves the values of JAX pytrees to disk in a NumPy `.npz` file.
Args:
tree: A JAX pytree to be saved.
path: A path to save the checkpoint.
step_for_copy: Optional integer that, when not None, will be used to save a
copy of the checkpoint with the name `path-{step_for_copy}`.
"""
# NOTE: In general, this could be greatly simplified as follows. However, we
# currently need to store the leaf names as well in order to be able to load
# and reconstruct the tree directly from the checkpoint when initialized a
# subset of a model from a pretrained model for fine tuning.
# ```
# values, _ = jax.tree_util.tree_flatten(tree)
# io_buffer = io.BytesIO()
# np.savez(io_buffer, *values)
# ```
names_and_vals, _ = _tree_flatten_with_names(tree)
io_buffer = io.BytesIO()
np.savez(io_buffer, **{k: v for k, v in names_and_vals})
# In order to be robust to interruptions during saving, we first save the
# checkpoint to a temporary file, and then rename it to the actual path name.
path_tmp = path + "-TEMPORARY"
with gfile.GFile(path_tmp, "wb") as f:
f.write(io_buffer.getvalue())
gfile.rename(path_tmp, path, overwrite=True)
if step_for_copy is not None:
gfile.copy(path, f"{path}-{step_for_copy:09d}", overwrite=True)
def _flatten_dict(d, parent_key="", sep="/"):
"""Flattens a dictionary, keeping empty leaves."""
items = []
for k, v in d.items():
path = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.Mapping):
items.extend(_flatten_dict(v, path, sep=sep).items())
else:
items.append((path, v))
# Keeps the empty dict if it was set explicitly.
if parent_key and not d:
items.append((parent_key, {}))
return dict(items)
def _inspect_params(*,
params,
expected,
fail_if_extra=True,
fail_if_missing=True):
"""Inspects whether the params are consistent with the expected keys."""
params_flat = _flatten_dict(params)
expected_flat = _flatten_dict(expected)
missing_keys = expected_flat.keys() - params_flat.keys()
extra_keys = params_flat.keys() - expected_flat.keys()
# Adds back empty dict explicitly, to support layers without weights.
# Context: FLAX ignores empty dict during serialization.
empty_keys = set()
for k in missing_keys:
if isinstance(expected_flat[k], dict) and not expected_flat[k]:
params[k] = {}
empty_keys.add(k)
missing_keys -= empty_keys
if empty_keys:
logging.warning("Inspect recovered empty keys:\n%s", empty_keys)
if missing_keys:
logging.info("Inspect missing keys:\n%s", missing_keys)
if extra_keys:
logging.info("Inspect extra keys:\n%s", extra_keys)
if (missing_keys and fail_if_missing) or (extra_keys and fail_if_extra):
raise ValueError(f"Missing params from checkpoint: {missing_keys}.\n"
f"Extra params in checkpoint: {extra_keys}.\n"
f"Restored params from checkpoint: {params_flat.keys()}.\n"
f"Expected params from code: {expected_flat.keys()}.")
return params
def _tree_map_with_names(f, tree, *rest):
"""Performs a tree map with a filter on the leaf path name.
Args:
f: A function accepting a name (path-like "a/b/c"), a tree, and an optional
additional list of trees.
tree: The tree of parameters for which `f` should be applied.
*rest: More trees of the exact same structure.
Returns:
A tree identical in structure to `tree` and `*rest` but with the leaves the
result of calling `f` on corresponding name/leaves in `tree` and `*rest`.
"""
names_and_vals, tree_def = _tree_flatten_with_names(tree)
names, vals = zip(*names_and_vals)
rest_vals = [list(zip(*_tree_flatten_with_names(t)[0]))[1] for t in rest]
vals = [f(*name_and_vals) for name_and_vals in zip(names, vals, *rest_vals)]
return tree_def.unflatten(vals)
def _reinit(restored_params, init_params, to_reinit):
"""Reinitializes a subset of the parameters in the restored parameter tree."""
f = lambda name, restored, init: init if name in to_reinit else restored
return _tree_map_with_names(f, restored_params, init_params)
def load_from_pretrained_checkpoint(init_params, pretrained_path,
model_representation_size, model_classifier,
reinit_params):
"""Initializes (part of) a model from a pretrained checkpoint for fine tuning.
Args:
init_params: Tree of (possibly randomly) initialized parameters for the
model. The structure will be kept, and a subset of the values will be
replaced with values loaded from the pretrained checkpoint.
pretrained_path: File pointing to pretrained checkpoint stored in NumPy
`.npz` file.
model_representation_size: Optional integer representation size
hyperparameter for the model. If None, then the representation layer in
the checkpoint will be removed (if present).
model_classifier: String containing the classifier hyperparameter used for
the model.
reinit_params: List of parameter names to reinitialize.
Returns:
A tree of parameters with the same structure as `init_params`, but loaded
with pretrained weights from `pretrained_path` and adapted accordingly.
"""
params = load_checkpoint(None, pretrained_path)
if "opt" in params:
params = params["opt"]["target"]
restored_params = _inspect_params(
params=params,
expected=init_params,
fail_if_extra=False,
fail_if_missing=False)
# The following allows implementing fine-tuning head variants depending on the
# value of `representation_size` in the fine-tuning job:
# - `None`: drop the whole head and attach a nn.Linear.
# - Same number as in pre-training: keep the head but reset the last
# layer (logits) for the new task.
if model_representation_size is None:
if "pre_logits" in restored_params:
logging.info("load_pretrained: drop-head variant")
del restored_params["pre_logits"]
if reinit_params:
restored_params = _reinit(restored_params, init_params, reinit_params)
if "posembed_input" in restored_params.get("Transformer", {}):
# Rescale the grid of position embeddings. Param shape is (1,N,1024)
posemb = restored_params["Transformer"]["posembed_input"]["pos_embedding"]
posemb_new = init_params["Transformer"]["posembed_input"]["pos_embedding"]
if posemb.shape != posemb_new.shape:
logging.info("load_pretrained: resized variant: %s to %s", posemb.shape,
posemb_new.shape)
ntok_new = posemb_new.shape[1]
if model_classifier == "token":
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
logging.info("load_pretrained: grid-size from %s to %s", gs_old, gs_new)
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = scipy.ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
posemb = jnp.array(np.concatenate([posemb_tok, posemb_grid], axis=1))
restored_params["Transformer"]["posembed_input"]["pos_embedding"] = posemb
return restored_params
| [
"jax.tree_util.tree_structure",
"numpy.sqrt",
"io.BytesIO",
"absl.logging.info",
"numpy.argsort",
"tensorflow.io.gfile.rename",
"dataclasses.is_dataclass",
"scipy.ndimage.zoom",
"numpy.savez",
"tensorflow.io.gfile.GFile",
"jax.tree_util.tree_map",
"numpy.concatenate",
"absl.logging.warning",... | [((1831, 1860), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1854, 1860), False, 'import collections\n'), ((2843, 2904), 'jax.tree_util.tree_map', 'jax.tree_util.tree_map', (['_convert_and_recover_bfloat16', 'values'], {}), '(_convert_and_recover_bfloat16, values)\n', (2865, 2904), False, 'import jax\n'), ((3198, 3228), 'dataclasses.is_dataclass', 'dataclasses.is_dataclass', (['tree'], {}), '(tree)\n', (3222, 3228), False, 'import dataclasses\n'), ((4016, 4038), 'jax.tree_flatten', 'jax.tree_flatten', (['tree'], {}), '(tree)\n', (4032, 4038), False, 'import jax\n'), ((4320, 4336), 'numpy.argsort', 'np.argsort', (['perm'], {}), '(perm)\n', (4330, 4336), True, 'import numpy as np\n'), ((5363, 5375), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5373, 5375), False, 'import io\n'), ((5378, 5434), 'numpy.savez', 'np.savez', (['io_buffer'], {}), '(io_buffer, **{k: v for k, v in names_and_vals})\n', (5386, 5434), True, 'import numpy as np\n'), ((5702, 5746), 'tensorflow.io.gfile.rename', 'gfile.rename', (['path_tmp', 'path'], {'overwrite': '(True)'}), '(path_tmp, path, overwrite=True)\n', (5714, 5746), False, 'from tensorflow.io import gfile\n'), ((1237, 1249), 'jax.numpy.array', 'jnp.array', (['x'], {}), '(x)\n', (1246, 1249), True, 'import jax.numpy as jnp\n'), ((2612, 2635), 'tensorflow.io.gfile.GFile', 'gfile.GFile', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (2623, 2635), False, 'from tensorflow.io import gfile\n'), ((2930, 2964), 'jax.tree_util.tree_structure', 'jax.tree_util.tree_structure', (['tree'], {}), '(tree)\n', (2958, 2964), False, 'import jax\n'), ((2976, 3021), 'jax.tree_util.tree_unflatten', 'jax.tree_util.tree_unflatten', (['treedef', 'values'], {}), '(treedef, values)\n', (3004, 3021), False, 'import jax\n'), ((3241, 3279), 'flax.serialization.to_state_dict', 'flax.serialization.to_state_dict', (['tree'], {}), '(tree)\n', (3273, 3279), False, 'import flax\n'), ((5632, 5659), 'tensorflow.io.gfile.GFile', 'gfile.GFile', (['path_tmp', '"""wb"""'], {}), "(path_tmp, 'wb')\n", (5643, 5659), False, 'from tensorflow.io import gfile\n'), ((5784, 5847), 'tensorflow.io.gfile.copy', 'gfile.copy', (['path', 'f"""{path}-{step_for_copy:09d}"""'], {'overwrite': '(True)'}), "(path, f'{path}-{step_for_copy:09d}', overwrite=True)\n", (5794, 5847), False, 'from tensorflow.io import gfile\n'), ((7102, 7169), 'absl.logging.warning', 'logging.warning', (['"""Inspect recovered empty keys:\n%s"""', 'empty_keys'], {}), '("""Inspect recovered empty keys:\n%s""", empty_keys)\n', (7117, 7169), False, 'from absl import logging\n'), ((7190, 7248), 'absl.logging.info', 'logging.info', (['"""Inspect missing keys:\n%s"""', 'missing_keys'], {}), '("""Inspect missing keys:\n%s""", missing_keys)\n', (7202, 7248), False, 'from absl import logging\n'), ((7267, 7321), 'absl.logging.info', 'logging.info', (['"""Inspect extra keys:\n%s"""', 'extra_keys'], {}), '("""Inspect extra keys:\n%s""", extra_keys)\n', (7279, 7321), False, 'from absl import logging\n'), ((10567, 10617), 'absl.logging.info', 'logging.info', (['"""load_pretrained: drop-head variant"""'], {}), "('load_pretrained: drop-head variant')\n", (10579, 10617), False, 'from absl import logging\n'), ((11098, 11192), 'absl.logging.info', 'logging.info', (['"""load_pretrained: resized variant: %s to %s"""', 'posemb.shape', 'posemb_new.shape'], {}), "('load_pretrained: resized variant: %s to %s', posemb.shape,\n posemb_new.shape)\n", (11110, 11192), False, 'from absl import logging\n'), ((11531, 11603), 'absl.logging.info', 'logging.info', (['"""load_pretrained: grid-size from %s to %s"""', 'gs_old', 'gs_new'], {}), "('load_pretrained: grid-size from %s to %s', gs_old, gs_new)\n", (11543, 11603), False, 'from absl import logging\n'), ((11736, 11782), 'scipy.ndimage.zoom', 'scipy.ndimage.zoom', (['posemb_grid', 'zoom'], {'order': '(1)'}), '(posemb_grid, zoom, order=1)\n', (11754, 11782), False, 'import scipy\n'), ((11506, 11523), 'numpy.sqrt', 'np.sqrt', (['ntok_new'], {}), '(ntok_new)\n', (11513, 11523), True, 'import numpy as np\n'), ((11872, 11921), 'numpy.concatenate', 'np.concatenate', (['[posemb_tok, posemb_grid]'], {'axis': '(1)'}), '([posemb_tok, posemb_grid], axis=1)\n', (11886, 11921), True, 'import numpy as np\n'), ((2704, 2720), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (2714, 2720), False, 'import io\n')] |
# Copyright 2017-2021 Lawrence Livermore National Security, LLC and other
# CallFlow Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
# ------------------------------------------------------------------------------
"""
CallFlow's operation to calculate ensemble gradients per-callsite or per-module.
"""
import numpy as np
import pandas as pd
# TODO: Avoid the performance error in the future pass.
import warnings
import callflow
from callflow.utils.utils import histogram
from callflow.utils.df import df_unique
from callflow.datastructures.metrics import TIME_COLUMNS
from callflow.modules.histogram import Histogram
LOGGER = callflow.get_logger(__name__)
warnings.simplefilter(action="ignore", category=pd.errors.PerformanceWarning)
# ------------------------------------------------------------------------------
class Gradients:
"""
Computes the ensemble gradients for the a given dictionary of dataframes.
"""
def __init__(self, sg, node, bins: int = 20, proxy_columns={}):
"""
Constructor function for the class
:param sg: Dictinary of dataframes keyed by the dataset_name. For e.g., { "dataset_name": df }.
:param node: Super node or node
:param bins: Number of bins to distribute the runtime information.
:param proxy_columns: Proxy columns
"""
assert isinstance(sg, callflow.SuperGraph)
assert node.get("type") in ["callsite", "module"]
assert isinstance(bins, int)
assert isinstance(proxy_columns, dict)
assert bins > 0
self.node = node
self.name = sg.get_name(node.get("id"), node.get("type"))
indexers = ["dataset"]
if node.get("type") == "callsite":
indexers.append("name")
elif node.get("type") == "module":
indexers.append("module")
# TODO: Could be slow for large datasets!!..
self.df = sg.dataframe.set_index(indexers)
# # gradient should be computed only for ensemble dataframe
# # i.e., multiple values in dataframe column
self.datasets = list(self.df.index.levels[0])
assert len(self.datasets) >= 1
self.bins = bins
self.proxy_columns = proxy_columns
self.time_columns = [self.proxy_columns.get(_, _) for _ in TIME_COLUMNS]
self.max_ranks = max(df_unique(self.df, "rank"))
self.result = self.compute()
@staticmethod
def convert_dictmean_to_list(dictionary):
"""
Convert a dictionary by taking its mean and converting to a list.
:param dictionary: (dict) Input dictionary
:return: (list) mean of all values in the dictionary
"""
return [np.mean(np.array(list(dictionary[_].values()))) for _ in dictionary]
@staticmethod
def convert_dictmean_to_dict(dictionary):
"""
Convert a dictionary by taking its mean and converting to a list.
:param dictionary: (dict) Input dictionary
:return: (dict) Dictionary of mean values indexed by the keys in the
input dictionary.
"""
return {_: np.mean(np.array(list(dictionary[_].values()))) for _ in dictionary}
# --------------------------------------------------------------------------
@staticmethod
def map_datasets_to_bins(bins, dataset_dict={}):
"""
Map dataset information to the corresponding bins.
:param bins: (int) Bin size
:param dataset_dict: Dataset dictionary
:return: Mapping of the datases to the corresponding bins.
"""
# TODO: previously, this logic applied to bin edges
# but, now, we are working on bin_centers
binw = bins[1] - bins[0]
bin_edges = np.append(bins - 0.5 * binw, bins[-1] + 0.5 * binw)
# Map the datasets to their histogram indexes.
dataset_position_dict = {}
for dataset in dataset_dict:
mean = dataset_dict[dataset]
for idx, x in np.ndenumerate(bin_edges):
if x > float(mean):
if idx[0] != 0:
pos = idx[0] - 1
else:
pos = idx[0]
dataset_position_dict[dataset] = pos
break
if idx[0] == len(bin_edges) - 1:
dataset_position_dict[dataset] = len(bin_edges) - 2
return dataset_position_dict
# --------------------------------------------------------------------------
def compute(self):
"""
Compute the required results.
:return: (JSON) data
"""
dists = {tk: {} for tk, tv in zip(TIME_COLUMNS, self.time_columns)}
# Get the runtimes for all the runs.
levels = self.df.index.unique().tolist()
for idx, dataset in enumerate(self.datasets):
# If the level doesn't exist, it means this callsite is not present
# in the dataset.
if (dataset, self.node.get("id")) not in levels:
continue
node_df = self.df.xs((dataset, self.node.get("id")))
for tk, tv in zip(TIME_COLUMNS, self.time_columns):
if node_df.empty:
dists[tk][dataset] = dict(
(rank, 0) for rank in range(0, self.max_ranks)
)
else:
dists[tk][dataset] = dict(zip(node_df["rank"], node_df[tv]))
# Calculate appropriate number of bins automatically.
# num_of_bins = min(self.freedman_diaconis_bins(np.array(dist_list)),
num_of_bins = self.bins
# convert the dictionary of values to list of values.
results = {}
for tk, tv in zip(TIME_COLUMNS, self.time_columns):
dists_list = np.array(Gradients.convert_dictmean_to_list(dists[tk]))
datasets_dict = Gradients.convert_dictmean_to_dict(dists[tk])
dists_dict = Gradients.convert_dictmean_to_dict(dists[tk])
hist_grid = histogram(dists_list, bins=num_of_bins)
# kde_grid = kde(dists_list, gridsize=num_of_bins)
dataset_pos = Gradients.map_datasets_to_bins(hist_grid[0], datasets_dict)
pos_dataset = {bin: [] for bin in range(0, self.bins)}
for dataset in dataset_pos:
position = dataset_pos[dataset]
if dataset not in pos_dataset[position]:
pos_dataset[position].append(dataset)
results[tk] = {
"bins": num_of_bins,
"dataset": {"mean": dists_dict, "d2p": dataset_pos, "p2d": pos_dataset},
# "kde": Histogram._format_data(kde_grid),
"hist": Histogram._format_data(hist_grid),
}
return results
# ------------------------------------------------------------------------------
| [
"callflow.modules.histogram.Histogram._format_data",
"callflow.utils.utils.histogram",
"numpy.ndenumerate",
"callflow.get_logger",
"numpy.append",
"warnings.simplefilter",
"callflow.utils.df.df_unique"
] | [((681, 710), 'callflow.get_logger', 'callflow.get_logger', (['__name__'], {}), '(__name__)\n', (700, 710), False, 'import callflow\n'), ((711, 788), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'pd.errors.PerformanceWarning'}), "(action='ignore', category=pd.errors.PerformanceWarning)\n", (732, 788), False, 'import warnings\n'), ((3769, 3820), 'numpy.append', 'np.append', (['(bins - 0.5 * binw)', '(bins[-1] + 0.5 * binw)'], {}), '(bins - 0.5 * binw, bins[-1] + 0.5 * binw)\n', (3778, 3820), True, 'import numpy as np\n'), ((2386, 2412), 'callflow.utils.df.df_unique', 'df_unique', (['self.df', '"""rank"""'], {}), "(self.df, 'rank')\n", (2395, 2412), False, 'from callflow.utils.df import df_unique\n'), ((4016, 4041), 'numpy.ndenumerate', 'np.ndenumerate', (['bin_edges'], {}), '(bin_edges)\n', (4030, 4041), True, 'import numpy as np\n'), ((6054, 6093), 'callflow.utils.utils.histogram', 'histogram', (['dists_list'], {'bins': 'num_of_bins'}), '(dists_list, bins=num_of_bins)\n', (6063, 6093), False, 'from callflow.utils.utils import histogram\n'), ((6753, 6786), 'callflow.modules.histogram.Histogram._format_data', 'Histogram._format_data', (['hist_grid'], {}), '(hist_grid)\n', (6775, 6786), False, 'from callflow.modules.histogram import Histogram\n')] |
#!/usr/bin/env python
import numpy as np
def main():
dt = 10
x0 = np.array([[2.0],[1.0]])
P0 = 2*np.identity(2)
# define model
A = np.matrix([[1, dt], [0, 1]])
C = np.array([[1, 0]])
Q = 0.1*np.identity(2)
R = 0.1
z = 2.25
# predict
x = A*x0 # np.multiply(A, x0)
P = A*P0*np.transpose(A) + Q
# update
S = C*P*np.transpose(C) + R
K = P*np.transpose(C)*np.linalg.inv(S)
x = x + K*(z - C*x)
P = (np.identity(2) - K*C)*P
print(x)
print(P)
if __name__ == '__main__':
main()
| [
"numpy.identity",
"numpy.array",
"numpy.linalg.inv",
"numpy.matrix",
"numpy.transpose"
] | [((71, 95), 'numpy.array', 'np.array', (['[[2.0], [1.0]]'], {}), '([[2.0], [1.0]])\n', (79, 95), True, 'import numpy as np\n'), ((141, 169), 'numpy.matrix', 'np.matrix', (['[[1, dt], [0, 1]]'], {}), '([[1, dt], [0, 1]])\n', (150, 169), True, 'import numpy as np\n'), ((175, 193), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (183, 193), True, 'import numpy as np\n'), ((103, 117), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (114, 117), True, 'import numpy as np\n'), ((203, 217), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (214, 217), True, 'import numpy as np\n'), ((374, 390), 'numpy.linalg.inv', 'np.linalg.inv', (['S'], {}), '(S)\n', (387, 390), True, 'import numpy as np\n'), ((290, 305), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (302, 305), True, 'import numpy as np\n'), ((331, 346), 'numpy.transpose', 'np.transpose', (['C'], {}), '(C)\n', (343, 346), True, 'import numpy as np\n'), ((358, 373), 'numpy.transpose', 'np.transpose', (['C'], {}), '(C)\n', (370, 373), True, 'import numpy as np\n'), ((418, 432), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (429, 432), True, 'import numpy as np\n')] |
import numpy as np
from scipy.sparse import csr_matrix
arr = np.array([
[1,0,0,1,0,0],
[0,0,2,0,0,1],
[0,0,0,2,0,0]
])
print(f"arr is {arr}")
S = csr_matrix(arr)
print(f"CSR matrix is {S}")
B = S.todense()
print(f"dense matrix is {B}")
| [
"numpy.array",
"scipy.sparse.csr_matrix"
] | [((63, 133), 'numpy.array', 'np.array', (['[[1, 0, 0, 1, 0, 0], [0, 0, 2, 0, 0, 1], [0, 0, 0, 2, 0, 0]]'], {}), '([[1, 0, 0, 1, 0, 0], [0, 0, 2, 0, 0, 1], [0, 0, 0, 2, 0, 0]])\n', (71, 133), True, 'import numpy as np\n'), ((166, 181), 'scipy.sparse.csr_matrix', 'csr_matrix', (['arr'], {}), '(arr)\n', (176, 181), False, 'from scipy.sparse import csr_matrix\n')] |
import numpy as np
def imagem_to_cinza(matrix_colorida: np.array) -> np.array:
linhas = matrix_colorida.shape[0]
colunas = matrix_colorida.shape[1]
matrix_gray = np.zeros((linhas, colunas))
for i in range(linhas):
for j in range(colunas):
r, g, b = matrix_colorida[i, j]
matrix_gray[i, j] = int((r + g + b) / 3)
return matrix_gray
def imagem_rgb(matrix_colorida: np.array) -> np.array:
linhas = matrix_colorida.shape[0]
colunas = matrix_colorida.shape[1]
matrix_rgb = []
matriz_r = np.zeros((linhas, colunas))
matriz_g = np.zeros((linhas, colunas))
matriz_b = np.zeros((linhas, colunas))
for i in range(linhas):
for j in range(colunas):
r, g, b = matrix_colorida[i, j]
matriz_r[i, j] = int(r)
matriz_g[i, j] = int(g)
matriz_b[i, j] = int(b)
matrix_rgb.append(matriz_r)
matrix_rgb.append(matriz_g)
matrix_rgb.append(matriz_b)
return matrix_rgb
| [
"numpy.zeros"
] | [((177, 204), 'numpy.zeros', 'np.zeros', (['(linhas, colunas)'], {}), '((linhas, colunas))\n', (185, 204), True, 'import numpy as np\n'), ((557, 584), 'numpy.zeros', 'np.zeros', (['(linhas, colunas)'], {}), '((linhas, colunas))\n', (565, 584), True, 'import numpy as np\n'), ((600, 627), 'numpy.zeros', 'np.zeros', (['(linhas, colunas)'], {}), '((linhas, colunas))\n', (608, 627), True, 'import numpy as np\n'), ((643, 670), 'numpy.zeros', 'np.zeros', (['(linhas, colunas)'], {}), '((linhas, colunas))\n', (651, 670), True, 'import numpy as np\n')] |
import torch.nn as nn
from modules.DGL.transformer.layers import *
from modules.DGL.transformer.functions import *
from modules.DGL.transformer.embedding import *
from modules.DGL.transformer.optims import *
import dgl.function as fn
import torch.nn.init as INIT
class MultiHeadAttention(nn.Module):
def __init__(self, h, dim_model):
'''
:param h: number of heads
:param dim_model: hidden dimension
'''
super(MultiHeadAttention, self).__init__()
self.d_k = dim_model // h
self.h = h
# W_q, W_k, W_v, W_o
self.linears = clones(nn.Linear(dim_model, dim_model), 4)
def get(self, x, fields='qkv'):
'Return a dict if queries / keys / values.'
batch_size = x.shape[0]
ret = {}
if 'q' in fields:
ret['q'] = self.linears[0](x).view(batch_size, self.h, self.d_k)
if 'k' in fields:
ret['k'] = self.linears[1](x).view(batch_size, self.h, self.d_k)
if 'v' in fields:
ret['v'] = self.linears[2](x).view(batch_size, self.h, self.d_k)
return ret
def get_o(self, x):
'get output of the multi-head attention'
batch_size = x.shape[0]
return self.linears[3](x.view(batch_size, -1))
def message_func(edges):
return {
'score': ((edges.src['k'] * edges.dst['q']).sum(-1,
keepdim=True)), 'v':edges.src['v']}
import torch as th
import torch.nn.functional as F
def reduce_func(nodes, d_k=64):
v = nodes.mailbox['v']
att = F.softmax(nodes.mailbox['score'] / th.sqrt(d_k), 1)
return {'dx': (att * v).sum(1)}
import functools.partial as partial
def naive_propagate_attention(self, g, eids):
g.send_and_recv(eids, message_func, partial(reduce_func, d_k=self.d_k))
def src_dot_dst(src_field, dst_field, out_field):
def func(edges):
return {out_field: (edges.src[src_field] * edges.dst[dst_field]).sum(-1, keepdim=True)}
return func
def scaled_exp(field, scale_constant):
def func(edges):
# clamp for softmax numerical stability
return {field: th.exp((edges.data[field] / scale_constant).clamp(-5, 5))}
return func
def propagate_attention(self, g, eids):
# Compute attention score
g.apply_edges(src_dot_dst('k', 'q', 'score'), eids)
g.apply_edges(scaled_exp('score', np.sqrt(self.d_k)))
# Update node state
g.send_and_recv(eids,
[fn.src_mul_edge('v', 'score', 'v'), fn.copy_edge('score', 'score')],
[fn.sum('v', 'wv'), fn.sum('score', 'z')])
class Encoder(nn.Module):
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.N = N
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def pre_func(self, i, fields='qkv'):
layer = self.layers[i]
def func(nodes):
x = nodes.data['x']
norm_x = layer.sublayer[0].norm(x)
return layer.self_attn.get(norm_x, fields=fields)
return func
def post_func(self, i):
layer = self.layers[i]
def func(nodes):
x, wv, z = nodes.data['x'], nodes.data['wv'], nodes.data['z']
o = layer.self_attn.get_o(wv / z)
x = x + layer.sublayer[0].dropout(o)
x = layer.sublayer[1](x, layer.feed_forward)
return {'x': x if i < self.N - 1 else self.norm(x)}
return func
class Decoder(nn.Module):
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.N = N
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def pre_func(self, i, fields='qkv', l=0):
layer = self.layers[i]
def func(nodes):
x = nodes.data['x']
if fields == 'kv':
norm_x = x # In enc-dec attention, x has already been normalized.
else:
norm_x = layer.sublayer[l].norm(x)
return layer.self_attn.get(norm_x, fields)
return func
def post_func(self, i, l=0):
layer = self.layers[i]
def func(nodes):
x, wv, z = nodes.data['x'], nodes.data['wv'], nodes.data['z']
o = layer.self_attn.get_o(wv / z)
x = x + layer.sublayer[l].dropout(o)
if l == 1:
x = layer.sublayer[2](x, layer.feed_forward)
return {'x': x if i < self.N - 1 else self.norm(x)}
return func
class Transformer(nn.Module):
def __init__(self, encoder, decoder, src_embed, tgt_embed, pos_enc, generator, h, d_k):
super(Transformer, self).__init__()
self.encoder, self.decoder = encoder, decoder
self.src_embed, self.tgt_embed = src_embed, tgt_embed
self.pos_enc = pos_enc
self.generator = generator
self.h, self.d_k = h, d_k
def propagate_attention(self, g, eids):
# Compute attention score
g.apply_edges(src_dot_dst('k', 'q', 'score'), eids)
g.apply_edges(scaled_exp('score', np.sqrt(self.d_k)))
# Send weighted values to target nodes
g.send_and_recv(eids,
[fn.src_mul_edge('v', 'score', 'v'), fn.copy_edge('score', 'score')],
[fn.sum('v', 'wv'), fn.sum('score', 'z')])
def update_graph(self, g, eids, pre_pairs, post_pairs):
"Update the node states and edge states of the graph."
# Pre-compute queries and key-value pairs.
for pre_func, nids in pre_pairs:
g.apply_nodes(pre_func, nids)
self.propagate_attention(g, eids)
# Further calculation after attention mechanism
for post_func, nids in post_pairs:
g.apply_nodes(post_func, nids)
def forward(self, graph):
g = graph.g
nids, eids = graph.nids, graph.eids
# Word Embedding and Position Embedding
src_embed, src_pos = self.src_embed(graph.src[0]), self.pos_enc(graph.src[1])
tgt_embed, tgt_pos = self.tgt_embed(graph.tgt[0]), self.pos_enc(graph.tgt[1])
g.nodes[nids['enc']].data['x'] = self.pos_enc.dropout(src_embed + src_pos)
g.nodes[nids['dec']].data['x'] = self.pos_enc.dropout(tgt_embed + tgt_pos)
for i in range(self.encoder.N):
# Step 1: Encoder Self-attention
pre_func = self.encoder.pre_func(i, 'qkv')
post_func = self.encoder.post_func(i)
nodes, edges = nids['enc'], eids['ee']
self.update_graph(g, edges, [(pre_func, nodes)], [(post_func, nodes)])
for i in range(self.decoder.N):
# Step 2: Dncoder Self-attention
pre_func = self.decoder.pre_func(i, 'qkv')
post_func = self.decoder.post_func(i)
nodes, edges = nids['dec'], eids['dd']
self.update_graph(g, edges, [(pre_func, nodes)], [(post_func, nodes)])
# Step 3: Encoder-Decoder attention
pre_q = self.decoder.pre_func(i, 'q', 1)
pre_kv = self.decoder.pre_func(i, 'kv', 1)
post_func = self.decoder.post_func(i, 1)
nodes_e, nodes_d, edges = nids['enc'], nids['dec'], eids['ed']
self.update_graph(g, edges, [(pre_q, nodes_d), (pre_kv, nodes_e)], [(post_func, nodes_d)])
return self.generator(g.ndata['x'][nids['dec']])
graph_pool = GraphPool()
data_iter = dataset(graph_pool, mode='train', batch_size=1, devices=devices)
for graph in data_iter:
print(graph.nids['enc']) # encoder node ids
print(graph.nids['dec']) # decoder node ids
print(graph.eids['ee']) # encoder-encoder edge ids
print(graph.eids['ed']) # encoder-decoder edge ids
print(graph.eids['dd']) # decoder-decoder edge ids
print(graph.src[0]) # Input word index list
print(graph.src[1]) # Input positions
print(graph.tgt[0]) # Output word index list
print(graph.tgt[1]) # Ouptut positions
break
from tqdm import tqdm
import torch as th
import numpy as np
from modules import make_model
from optims import NoamOpt
from dgl.contrib.transformer import get_dataset, GraphPool
def run_epoch(data_iter, model, loss_compute, is_train=True):
for i, g in tqdm(enumerate(data_iter)):
with th.set_grad_enabled(is_train):
output = model(g)
loss = loss_compute(output, g.tgt_y, g.n_tokens)
print('average loss: {}'.format(loss_compute.avg_loss))
print('accuracy: {}'.format(loss_compute.accuracy))
N = 1
batch_size = 128
devices = ['cuda' if th.cuda.is_available() else 'cpu']
dataset = get_dataset("copy")
V = dataset.vocab_size
criterion = LabelSmoothing(V, padding_idx=dataset.pad_id, smoothing=0.1)
dim_model = 128
# Create model
model = make_model(V, V, N=N, dim_model=128, dim_ff=128, h=1)
# Sharing weights between Encoder & Decoder
model.src_embed.lut.weight = model.tgt_embed.lut.weight
model.generator.proj.weight = model.tgt_embed.lut.weight
model, criterion = model.to(devices[0]), criterion.to(devices[0])
model_opt = NoamOpt(dim_model, 1, 400,
th.optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.98), eps=1e-9))
loss_compute = SimpleLossCompute
att_maps = []
for epoch in range(4):
train_iter = dataset(graph_pool, mode='train', batch_size=batch_size, devices=devices)
valid_iter = dataset(graph_pool, mode='valid', batch_size=batch_size, devices=devices)
print('Epoch: {} Training...'.format(epoch))
model.train(True)
run_epoch(train_iter, model,
loss_compute(criterion, model_opt), is_train=True)
print('Epoch: {} Evaluating...'.format(epoch))
model.att_weight_map = None
model.eval()
run_epoch(valid_iter, model,
loss_compute(criterion, None), is_train=False)
att_maps.append(model.att_weight_map) | [
"dgl.function.src_mul_edge",
"dgl.function.sum",
"numpy.sqrt",
"dgl.function.copy_edge",
"torch.sqrt",
"modules.make_model",
"torch.cuda.is_available",
"functools.partial",
"dgl.contrib.transformer.get_dataset",
"torch.nn.Linear",
"torch.set_grad_enabled",
"dgl.contrib.transformer.GraphPool"
] | [((7507, 7518), 'dgl.contrib.transformer.GraphPool', 'GraphPool', ([], {}), '()\n', (7516, 7518), False, 'from dgl.contrib.transformer import get_dataset, GraphPool\n'), ((8739, 8758), 'dgl.contrib.transformer.get_dataset', 'get_dataset', (['"""copy"""'], {}), "('copy')\n", (8750, 8758), False, 'from dgl.contrib.transformer import get_dataset, GraphPool\n'), ((8901, 8954), 'modules.make_model', 'make_model', (['V', 'V'], {'N': 'N', 'dim_model': '(128)', 'dim_ff': '(128)', 'h': '(1)'}), '(V, V, N=N, dim_model=128, dim_ff=128, h=1)\n', (8911, 8954), False, 'from modules import make_model\n'), ((1820, 1854), 'functools.partial', 'partial', (['reduce_func'], {'d_k': 'self.d_k'}), '(reduce_func, d_k=self.d_k)\n', (1827, 1854), True, 'import functools.partial as partial\n'), ((8691, 8713), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (8711, 8713), True, 'import torch as th\n'), ((629, 660), 'torch.nn.Linear', 'nn.Linear', (['dim_model', 'dim_model'], {}), '(dim_model, dim_model)\n', (638, 660), True, 'import torch.nn as nn\n'), ((1639, 1651), 'torch.sqrt', 'th.sqrt', (['d_k'], {}), '(d_k)\n', (1646, 1651), True, 'import torch as th\n'), ((2432, 2449), 'numpy.sqrt', 'np.sqrt', (['self.d_k'], {}), '(self.d_k)\n', (2439, 2449), True, 'import numpy as np\n'), ((2526, 2560), 'dgl.function.src_mul_edge', 'fn.src_mul_edge', (['"""v"""', '"""score"""', '"""v"""'], {}), "('v', 'score', 'v')\n", (2541, 2560), True, 'import dgl.function as fn\n'), ((2562, 2592), 'dgl.function.copy_edge', 'fn.copy_edge', (['"""score"""', '"""score"""'], {}), "('score', 'score')\n", (2574, 2592), True, 'import dgl.function as fn\n'), ((2617, 2634), 'dgl.function.sum', 'fn.sum', (['"""v"""', '"""wv"""'], {}), "('v', 'wv')\n", (2623, 2634), True, 'import dgl.function as fn\n'), ((2636, 2656), 'dgl.function.sum', 'fn.sum', (['"""score"""', '"""z"""'], {}), "('score', 'z')\n", (2642, 2656), True, 'import dgl.function as fn\n'), ((8400, 8429), 'torch.set_grad_enabled', 'th.set_grad_enabled', (['is_train'], {}), '(is_train)\n', (8419, 8429), True, 'import torch as th\n'), ((5166, 5183), 'numpy.sqrt', 'np.sqrt', (['self.d_k'], {}), '(self.d_k)\n', (5173, 5183), True, 'import numpy as np\n'), ((5291, 5325), 'dgl.function.src_mul_edge', 'fn.src_mul_edge', (['"""v"""', '"""score"""', '"""v"""'], {}), "('v', 'score', 'v')\n", (5306, 5325), True, 'import dgl.function as fn\n'), ((5327, 5357), 'dgl.function.copy_edge', 'fn.copy_edge', (['"""score"""', '"""score"""'], {}), "('score', 'score')\n", (5339, 5357), True, 'import dgl.function as fn\n'), ((5386, 5403), 'dgl.function.sum', 'fn.sum', (['"""v"""', '"""wv"""'], {}), "('v', 'wv')\n", (5392, 5403), True, 'import dgl.function as fn\n'), ((5405, 5425), 'dgl.function.sum', 'fn.sum', (['"""score"""', '"""z"""'], {}), "('score', 'z')\n", (5411, 5425), True, 'import dgl.function as fn\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
base = pd.read_csv('plano-saude2.csv')
X = base.iloc[:, 0:1].values
y = base.iloc[:, 1].values
regressor = DecisionTreeRegressor()
regressor.fit(X, y) # treinamento do regressor de árvore de decisão
score = regressor.score(X, y) # verificando o score do modelo
plt.scatter(X, y) # geração do gráfico com a disposição dos dados
plt.plot(X, regressor.predict(X), color='red') # realizando a previsão no gráfico
plt.title('Regressão com árvore de decisão')
plt.xlabel('Idade')
plt.ylabel('Custo')
"""
A maneira como é criada o gráfico acima, não é exatamente como é feito por trás dos "panos" com
regressão em árvore de decisão, pois não temos uma linha contínua como foi exibida anteriormente
Abaixo, iremos criar uma variável nova denominada "X_teste" e iremos utilizar ela para criar
o novo gráfico com a visualização correta de uma árvore de decisão.
Árvores de decisão são chamadas de modelos não lineares e não contínuos,
por isso é verificado um gráfico com "escadas" com os pontos ligados
"""
# criação de um array numpy que irá iniciar com menor valor de X e irá até o maior de X com incremento de 0.1 em 0.1
X_teste = np.arange(min(X), max(X), 0.1)
X_teste = X_teste.reshape(-1, 1) # incluindo uma coluna para utilizar no comando plt.plot()
plt.scatter(X, y) # geração do gráfico com a disposição dos dados
plt.plot(X_teste, regressor.predict(X_teste), color='red') # realizando a previsão no gráfico
plt.title('Regressão com árvore de decisão')
plt.xlabel('Idade')
plt.ylabel('Custo')
regressor.predict(np.array(40).reshape(1, -1))
| [
"sklearn.tree.DecisionTreeRegressor",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title"
] | [((126, 157), 'pandas.read_csv', 'pd.read_csv', (['"""plano-saude2.csv"""'], {}), "('plano-saude2.csv')\n", (137, 157), True, 'import pandas as pd\n'), ((228, 251), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (249, 251), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((386, 403), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {}), '(X, y)\n', (397, 403), True, 'import matplotlib.pyplot as plt\n'), ((536, 580), 'matplotlib.pyplot.title', 'plt.title', (['"""Regressão com árvore de decisão"""'], {}), "('Regressão com árvore de decisão')\n", (545, 580), True, 'import matplotlib.pyplot as plt\n'), ((581, 600), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Idade"""'], {}), "('Idade')\n", (591, 600), True, 'import matplotlib.pyplot as plt\n'), ((601, 620), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Custo"""'], {}), "('Custo')\n", (611, 620), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1395), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {}), '(X, y)\n', (1389, 1395), True, 'import matplotlib.pyplot as plt\n'), ((1540, 1584), 'matplotlib.pyplot.title', 'plt.title', (['"""Regressão com árvore de decisão"""'], {}), "('Regressão com árvore de decisão')\n", (1549, 1584), True, 'import matplotlib.pyplot as plt\n'), ((1585, 1604), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Idade"""'], {}), "('Idade')\n", (1595, 1604), True, 'import matplotlib.pyplot as plt\n'), ((1605, 1624), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Custo"""'], {}), "('Custo')\n", (1615, 1624), True, 'import matplotlib.pyplot as plt\n'), ((1645, 1657), 'numpy.array', 'np.array', (['(40)'], {}), '(40)\n', (1653, 1657), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
import abc
import math
import numbers
import numpy as np
import scipy.sparse as sp
from .physicalmodel import PhysicalModel
class ClassicalIsingModel(PhysicalModel):
@classmethod
def initial_state(cls, shape, state_type):
if state_type == 'qubo':
return cls.initial_qubo_state(shape)
elif state_type == 'ising':
return cls.initial_ising_state(shape)
@classmethod
def initial_qubo_state(cls, shape):
return np.random.randint(2, size=shape, dtype=np.int8)
@classmethod
def initial_ising_state(cls, shape):
return np.random.randint(2, size=shape, dtype=np.int8)*2 - 1
def __init__(self, j, h, c=0, beta=1.0, state=None, state_size=None, state_type='qubo', random_state=None):
if state is None:
state = self.initial_state(state_size, state_type)
self.state_size = state.size
j = self._as_matrix(j, (self.state_size, self.state_size))
h = self._as_matrix(h, self.state_size)
j, h = self._to_triangular(j, h)
j = sp.csr_matrix(j)
jt = j.T.tocsr()
j2 = j + jt # j2 == j + jt
self.j = j
self.j2 = j2
self.h = h
self.c = c
self.beta = beta
self._state = state
self.state_type = state_type
self._is_qubo = 1 if state_type == 'qubo' else 0
if isinstance(random_state, (numbers.Number, None.__class__)):
self.random_state = np.random.RandomState(random_state)
else:
self.random_state = random_state
def __repr__(self):
return (
'{}('
'j={}, '
'h={}, '
'c={}, '
'beta={}, '
'state={}, '
'state_type={}, '
'random={}'
')'
).format(
self.__class__.__name__,
str(self.j)[:10] + '...',
str(self.h)[:10] + '...',
self.c,
self.beta,
str(self.h)[:10] + '...',
self.state_type,
self.random_state
)
@staticmethod
def _as_matrix(list_or_dict, shape=None):
if isinstance(list_or_dict, dict):
matrix = np.zeros(shape)
for (i, j), v in list_or_dict.items():
matrix[i, j] = v
return matrix
else:
return list_or_dict
@staticmethod
def _to_triangular(j, h):
h = h + j.diagonal()
j = (1 - np.tri(h.size))*(j + j.T)
return j, h
def _flip_spin(self, index):
self._state[index] *= -1
if self._is_qubo:
self._state[index] += 1
def energy_diff(self, index):
if self._is_qubo:
sign = self._state[index]*2 - 1
else:
sign = self._state[index]
return sign*(
self.j2.dot(self._state)[index]
+ self.h[index]
)
def energy(self):
e = -self.c
e -= self.j.dot(self._state).dot(self._state)
e -= self.h.dot(self._state)
return e
def update_state(self):
updated = False
indices = self.random_state.permutation(self.state_size)
for index in indices:
delta = max(0., self.energy_diff(index))
if math.exp(-self.beta*delta) > self.random_state.rand():
self._flip_spin(index)
updated = True
return updated
@property
def state(self):
return self._state
| [
"numpy.random.randint",
"numpy.zeros",
"numpy.random.RandomState",
"scipy.sparse.csr_matrix",
"numpy.tri",
"math.exp"
] | [((499, 546), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'shape', 'dtype': 'np.int8'}), '(2, size=shape, dtype=np.int8)\n', (516, 546), True, 'import numpy as np\n'), ((1083, 1099), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['j'], {}), '(j)\n', (1096, 1099), True, 'import scipy.sparse as sp\n'), ((1490, 1525), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (1511, 1525), True, 'import numpy as np\n'), ((2237, 2252), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2245, 2252), True, 'import numpy as np\n'), ((621, 668), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'shape', 'dtype': 'np.int8'}), '(2, size=shape, dtype=np.int8)\n', (638, 668), True, 'import numpy as np\n'), ((2504, 2518), 'numpy.tri', 'np.tri', (['h.size'], {}), '(h.size)\n', (2510, 2518), True, 'import numpy as np\n'), ((3309, 3337), 'math.exp', 'math.exp', (['(-self.beta * delta)'], {}), '(-self.beta * delta)\n', (3317, 3337), False, 'import math\n')] |
# -*- coding: utf-8 -*-
from scipy import misc
from scipy import ndimage
import numpy as np
import util
import os
oriDir = '../data/'
tgtDir = '../processedData/'
imgLength = 512
compressRatio = 0.2
compressLen = int(imgLength * compressRatio)
def flipImageMatrix(img):
flipped_img = np.ndarray(img.shape, dtype='uint8')
flipped_img[:, :, 0] = np.fliplr(img[:, :, 0])
flipped_img[:, :, 1] = np.fliplr(img[:, :, 1])
flipped_img[:, :, 2] = np.fliplr(img[:, :, 2])
return flipped_img
def filterOnePic(x):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
if x[i, j, 0] not in range(128, 228) \
or x[i, j, 1] not in range(84, 184) \
or x[i, j, 2] not in range(100, 200):
x[i, j, 0] = 0
x[i, j, 1] = 0
x[i, j, 2] = 0
def preprocessImgMatrix(x):
r_img = misc.imresize(x, [imgLength, imgLength], interp='nearest')
r_img = r_img[compressLen:imgLength - compressLen,
compressLen:imgLength - compressLen]
return r_img
labelTransDict = {'常': 'chang',
'黄': 'huang',
'红': 'hong',
'有': 'you',
'无': 'wu',
'紫': 'zi',
'白': 'bai'}
def transferLabel(label):
labels = label.split('-')
for i in range(len(labels)):
labels[i] = labelTransDict[labels[i]]
return '-'.join(labels)
def augment(x, img_name):
i = img_name.index('-')
rimg = preprocessImgMatrix(x)
label = img_name[i - 1:i + 4]
label = transferLabel(label)
# filterOnePic(rimg) # 过滤
for rotateIndex in range(4):
misc.imsave(tgtDir + label + '-' + img_name[:i - 1] + '-' + str(rotateIndex) + '.jpg', rimg)
misc.imsave(tgtDir + label + '-' + img_name[:i - 1] + '-r' + str(rotateIndex) + '.jpg',
flipImageMatrix(rimg)) # 镜像
rimg = ndimage.rotate(rimg, 90) # 翻转
def processData():
util.updateDir(tgtDir)
for fn in os.listdir(oriDir):
for imgName in os.listdir(oriDir + fn):
imgPath = oriDir + fn + '/' + imgName
print('processing' + imgPath)
try:
x = util.getImageMatrix(imgPath)
except Exception as e:
print('----ERROR----')
continue
augment(x, imgName)
augment(ndimage.rotate(x, 45, reshape=False), '45_' + imgName)
def loadData(index): # 返回, x_train, y_train, x_test, y_test
dataset = []
labelset = []
for fn in os.listdir(tgtDir):
label = fn.split('-')[index]
x = util.getImageMatrix(tgtDir + fn)
dataset.append((x, label))
if label not in labelset:
labelset.append(label)
print('index label')
labelDict = {}
for i in range(len(labelset)):
print(i, labelset[i])
labelDict[labelset[i]] = i
print('shuffling')
sampleSize = len(dataset)
height, width, channels = dataset[0][0].shape
X = np.zeros((sampleSize, height, width, channels), dtype='uint8')
y = np.zeros(sampleSize, dtype='uint8')
seq = np.random.permutation(sampleSize)
c = 0
for i in seq:
X[c] = dataset[i][0]
y[c] = labelDict[dataset[i][1]]
c += 1
print('Datasets loading finish!')
return X, y
def getFilterRange():
c1, c2, c3 = [], [], []
for i in range(256):
c1.append(0)
c2.append(0)
c3.append(0)
dirPath = "preparedDatasets/"
for fn in os.listdir(dirPath):
x = util.getImageMatrix(dirPath + fn)
for r in x:
for pix in r:
c1[pix[0]] += 1
c2[pix[1]] += 1
c3[pix[2]] += 1
print('c1')
print(c1)
print('c2')
print(c2)
print('c3')
print(c3)
def filterPic():
tgtDir = 'filteredDataset/'
util.updateDir(tgtDir)
for fn in os.listdir(oriDir):
for imgName in os.listdir(oriDir + fn):
x = util.getImageMatrix(oriDir + fn + '/' + imgName)
filterOnePic(x)
print('saved to: ', tgtDir + fn)
misc.imsave(tgtDir + fn, x)
# getFilterRange()
# 压缩和旋转图片()
# loadData(0)
# filterPic()
# def checkLabel():
# for fn in os.listdir(oriDir):
# for imgName in os.listdir(oriDir + fn):
# i = imgName.index('-')
# # 先压缩, 然后旋转三次, 由一张图片变四张
# label = imgName[i - 1:i + 4]
# shetai = label[-1]
# if shetai == '白':
# print(oriDir + fn + '/' + imgName)
def lookData(dataType):
tgtDirPath = 'look/'
util.updateDir(tgtDirPath)
import shutil
c = 0
for fn in os.listdir(oriDir):
for imgName in os.listdir(oriDir + fn):
i = imgName.index('-')
imgPath = oriDir + fn + '/' + imgName
label = imgName[i - 1:i + 4].split('-')[dataType]
shutil.copyfile(imgPath, tgtDirPath + label + str(c) + '.jpg')
print(c)
c += 1
if __name__ == '__main__':
processData()
| [
"os.listdir",
"numpy.fliplr",
"scipy.misc.imsave",
"numpy.zeros",
"numpy.ndarray",
"scipy.misc.imresize",
"util.updateDir",
"scipy.ndimage.rotate",
"util.getImageMatrix",
"numpy.random.permutation"
] | [((310, 346), 'numpy.ndarray', 'np.ndarray', (['img.shape'], {'dtype': '"""uint8"""'}), "(img.shape, dtype='uint8')\n", (320, 346), True, 'import numpy as np\n'), ((375, 398), 'numpy.fliplr', 'np.fliplr', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (384, 398), True, 'import numpy as np\n'), ((427, 450), 'numpy.fliplr', 'np.fliplr', (['img[:, :, 1]'], {}), '(img[:, :, 1])\n', (436, 450), True, 'import numpy as np\n'), ((479, 502), 'numpy.fliplr', 'np.fliplr', (['img[:, :, 2]'], {}), '(img[:, :, 2])\n', (488, 502), True, 'import numpy as np\n'), ((935, 993), 'scipy.misc.imresize', 'misc.imresize', (['x', '[imgLength, imgLength]'], {'interp': '"""nearest"""'}), "(x, [imgLength, imgLength], interp='nearest')\n", (948, 993), False, 'from scipy import misc\n'), ((2075, 2097), 'util.updateDir', 'util.updateDir', (['tgtDir'], {}), '(tgtDir)\n', (2089, 2097), False, 'import util\n'), ((2113, 2131), 'os.listdir', 'os.listdir', (['oriDir'], {}), '(oriDir)\n', (2123, 2131), False, 'import os\n'), ((2673, 2691), 'os.listdir', 'os.listdir', (['tgtDir'], {}), '(tgtDir)\n', (2683, 2691), False, 'import os\n'), ((3152, 3214), 'numpy.zeros', 'np.zeros', (['(sampleSize, height, width, channels)'], {'dtype': '"""uint8"""'}), "((sampleSize, height, width, channels), dtype='uint8')\n", (3160, 3214), True, 'import numpy as np\n'), ((3224, 3259), 'numpy.zeros', 'np.zeros', (['sampleSize'], {'dtype': '"""uint8"""'}), "(sampleSize, dtype='uint8')\n", (3232, 3259), True, 'import numpy as np\n'), ((3271, 3304), 'numpy.random.permutation', 'np.random.permutation', (['sampleSize'], {}), '(sampleSize)\n', (3292, 3304), True, 'import numpy as np\n'), ((3676, 3695), 'os.listdir', 'os.listdir', (['dirPath'], {}), '(dirPath)\n', (3686, 3695), False, 'import os\n'), ((4047, 4069), 'util.updateDir', 'util.updateDir', (['tgtDir'], {}), '(tgtDir)\n', (4061, 4069), False, 'import util\n'), ((4087, 4105), 'os.listdir', 'os.listdir', (['oriDir'], {}), '(oriDir)\n', (4097, 4105), False, 'import os\n'), ((4818, 4844), 'util.updateDir', 'util.updateDir', (['tgtDirPath'], {}), '(tgtDirPath)\n', (4832, 4844), False, 'import util\n'), ((4892, 4910), 'os.listdir', 'os.listdir', (['oriDir'], {}), '(oriDir)\n', (4902, 4910), False, 'import os\n'), ((2015, 2039), 'scipy.ndimage.rotate', 'ndimage.rotate', (['rimg', '(90)'], {}), '(rimg, 90)\n', (2029, 2039), False, 'from scipy import ndimage\n'), ((2157, 2180), 'os.listdir', 'os.listdir', (['(oriDir + fn)'], {}), '(oriDir + fn)\n', (2167, 2180), False, 'import os\n'), ((2744, 2776), 'util.getImageMatrix', 'util.getImageMatrix', (['(tgtDir + fn)'], {}), '(tgtDir + fn)\n', (2763, 2776), False, 'import util\n'), ((3710, 3743), 'util.getImageMatrix', 'util.getImageMatrix', (['(dirPath + fn)'], {}), '(dirPath + fn)\n', (3729, 3743), False, 'import util\n'), ((4131, 4154), 'os.listdir', 'os.listdir', (['(oriDir + fn)'], {}), '(oriDir + fn)\n', (4141, 4154), False, 'import os\n'), ((4936, 4959), 'os.listdir', 'os.listdir', (['(oriDir + fn)'], {}), '(oriDir + fn)\n', (4946, 4959), False, 'import os\n'), ((4173, 4221), 'util.getImageMatrix', 'util.getImageMatrix', (["(oriDir + fn + '/' + imgName)"], {}), "(oriDir + fn + '/' + imgName)\n", (4192, 4221), False, 'import util\n'), ((4310, 4337), 'scipy.misc.imsave', 'misc.imsave', (['(tgtDir + fn)', 'x'], {}), '(tgtDir + fn, x)\n', (4321, 4337), False, 'from scipy import misc\n'), ((2315, 2343), 'util.getImageMatrix', 'util.getImageMatrix', (['imgPath'], {}), '(imgPath)\n', (2334, 2343), False, 'import util\n'), ((2500, 2536), 'scipy.ndimage.rotate', 'ndimage.rotate', (['x', '(45)'], {'reshape': '(False)'}), '(x, 45, reshape=False)\n', (2514, 2536), False, 'from scipy import ndimage\n')] |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control RL Unplugged datasets.
Examples in the dataset represent sequences stored when running a partially
trained agent (trained in online way) as described in
https://arxiv.org/abs/2006.13888.
Every dataset has a SARSA version, and datasets for environments for solving
which we believe one may need a recurrent agent also include a version of the
dataset with overlapping sequences of length 40.
Datasets for the dm_control_suite environments only include proprio
observations, while datasets for dm_locomotion include both pixel and proprio
observations.
"""
import collections
import functools
import os
from typing import Dict, Optional, Tuple, Set
from acme import wrappers
from dm_control import composer
from dm_control import suite
from dm_control.composer.variation import colors
from dm_control.composer.variation import distributions
from dm_control.locomotion import arenas
from dm_control.locomotion import props
from dm_control.locomotion import tasks
from dm_control.locomotion import walkers
from dm_env import specs
import numpy as np
import reverb
import tensorflow as tf
import tree
def _build_rodent_escape_env():
"""Build environment where a rodent escapes from a bowl."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena = arenas.bowl.Bowl(
size=(20., 20.),
aesthetic='outdoor_natural')
locomotion_task = tasks.escape.Escape(
walker=walker,
arena=arena,
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=20,
task=locomotion_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_maze_env():
"""Build environment where a rodent runs to targets."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
wall_textures = arenas.labmaze_textures.WallTextures(
style='style_01')
arena = arenas.mazes.RandomMazeWithTargets(
x_cells=11,
y_cells=11,
xy_scale=.5,
z_height=.3,
max_rooms=4,
room_min_size=4,
room_max_size=5,
spawns_per_room=1,
targets_per_room=3,
wall_textures=wall_textures,
aesthetic='outdoor_natural')
rodent_task = tasks.random_goal_maze.ManyGoalsMaze(
walker=walker,
maze_arena=arena,
target_builder=functools.partial(
props.target_sphere.TargetSphere,
radius=0.05,
height_above_ground=.125,
rgb1=(0, 0, 0.4),
rgb2=(0, 0, 0.7)),
target_reward_scale=50.,
contact_termination=False,
control_timestep=.02,
physics_timestep=0.001)
raw_env = composer.Environment(
time_limit=30,
task=rodent_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_corridor_gaps():
"""Build environment where a rodent runs over gaps."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
platform_length = distributions.Uniform(low=0.4, high=0.8)
gap_length = distributions.Uniform(low=0.05, high=0.2)
arena = arenas.corridors.GapsCorridor(
corridor_width=2,
platform_length=platform_length,
gap_length=gap_length,
corridor_length=40,
aesthetic='outdoor_natural')
rodent_task = tasks.corridors.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_position=(5, 0, 0),
walker_spawn_rotation=0,
target_velocity=1.0,
contact_termination=False,
terminate_at_height=-0.3,
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=30,
task=rodent_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_two_touch_env():
"""Build environment where a rodent touches targets."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena_floor = arenas.floors.Floor(
size=(10., 10.), aesthetic='outdoor_natural')
task_reach = tasks.reach.TwoTouch(
walker=walker,
arena=arena_floor,
target_builders=[
functools.partial(
props.target_sphere.TargetSphereTwoTouch,
radius=0.025),
],
randomize_spawn_rotation=True,
target_type_rewards=[25.],
shuffle_target_builders=False,
target_area=(1.5, 1.5),
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=30,
task=task_reach,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_walls_env():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
wall_width = distributions.Uniform(low=1, high=7)
wall_height = distributions.Uniform(low=2.5, high=4.0)
swap_wall_side = distributions.Bernoulli(prob=0.5)
wall_r = distributions.Uniform(low=0.5, high=0.6)
wall_g = distributions.Uniform(low=0.21, high=0.41)
wall_rgba = colors.RgbVariation(r=wall_r, g=wall_g, b=0, alpha=1)
arena = arenas.WallsCorridor(
wall_gap=5.0,
wall_width=wall_width,
wall_height=wall_height,
swap_wall_side=swap_wall_side,
wall_rgba=wall_rgba,
corridor_width=10,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_corridor_env():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena = arenas.EmptyCorridor(
corridor_width=10,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_corridor_gaps():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
platform_length = distributions.Uniform(low=0.3, high=2.5)
gap_length = distributions.Uniform(low=0.75, high=1.25)
arena = arenas.GapsCorridor(
corridor_width=10,
platform_length=platform_length,
gap_length=gap_length,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_position=(2, 0, 0),
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
class MujocoActionNormalizer(wrappers.EnvironmentWrapper):
"""Rescale actions to [-1, 1] range for mujoco physics engine.
For control environments whose actions have bounded range in [-1, 1], this
adaptor rescale actions to the desired range. This allows actor network to
output unscaled actions for better gradient dynamics.
"""
def __init__(self, environment, rescale='clip'):
super().__init__(environment)
self._rescale = rescale
def step(self, action):
"""Rescale actions to [-1, 1] range before stepping wrapped environment."""
if self._rescale == 'tanh':
scaled_actions = tree.map_structure(np.tanh, action)
elif self._rescale == 'clip':
scaled_actions = tree.map_structure(lambda a: np.clip(a, -1., 1.), action)
else:
raise ValueError('Unrecognized scaling option: %s' % self._rescale)
return self._environment.step(scaled_actions)
class NormilizeActionSpecWrapper(wrappers.EnvironmentWrapper):
"""Turn each dimension of the actions into the range of [-1, 1]."""
def __init__(self, environment):
super().__init__(environment)
action_spec = environment.action_spec()
self._scale = action_spec.maximum - action_spec.minimum
self._offset = action_spec.minimum
minimum = action_spec.minimum * 0 - 1.
maximum = action_spec.minimum * 0 + 1.
self._action_spec = specs.BoundedArray(
action_spec.shape,
action_spec.dtype,
minimum,
maximum,
name=action_spec.name)
def _from_normal_actions(self, actions):
actions = 0.5 * (actions + 1.0) # a_t is now in the range [0, 1]
# scale range to [minimum, maximum]
return actions * self._scale + self._offset
def step(self, action):
action = self._from_normal_actions(action)
return self._environment.step(action)
def action_spec(self):
return self._action_spec
class FilterObservationsWrapper(wrappers.EnvironmentWrapper):
"""Filter out all the observations not specified to this wrapper."""
def __init__(self, environment, observations_to_keep):
super().__init__(environment)
self._observations_to_keep = observations_to_keep
spec = self._environment.observation_spec()
filtered = [(k, spec[k]) for k in observations_to_keep]
self._observation_spec = collections.OrderedDict(filtered)
def _filter_observation(self, timestep):
observation = timestep.observation
filtered = [(k, observation[k]) for k in self._observations_to_keep]
return timestep._replace(observation=collections.OrderedDict(filtered))
def step(self, action):
return self._filter_observation(self._environment.step(action))
def reset(self):
return self._filter_observation(self._environment.reset())
def observation_spec(self):
return self._observation_spec
class ControlSuite:
"""Create bits needed to run agents on an Control Suite dataset."""
def __init__(self, task_name='humanoid_run'):
"""Initializes datasets/environments for the Deepmind Control suite.
Args:
task_name: take name. Must be one of,
finger_turn_hard, manipulator_insert_peg, humanoid_run,
cartpole_swingup, cheetah_run, fish_swim, manipulator_insert_ball,
walker_stand, walker_walk
"""
self.task_name = task_name
self._uint8_features = set([])
self._environment = None
if task_name == 'swim':
self._domain_name = 'fish'
self._task_name = 'swim'
self._shapes = {
'observation/target': (3,),
'observation/velocity': (13,),
'observation/upright': (1,),
'observation/joint_angles': (7,),
'action': (5,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
elif task_name == 'humanoid_run':
self._domain_name = 'humanoid'
self._task_name = 'run'
self._shapes = {
'observation/velocity': (27,),
'observation/com_velocity': (3,),
'observation/torso_vertical': (3,),
'observation/extremities': (12,),
'observation/head_height': (1,),
'observation/joint_angles': (21,),
'action': (21,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
elif task_name == 'manipulator_insert_ball':
self._domain_name = 'manipulator'
self._task_name = 'insert_ball'
self._shapes = {
'observation/arm_pos': (16,),
'observation/arm_vel': (8,),
'observation/touch': (5,),
'observation/hand_pos': (4,),
'observation/object_pos': (4,),
'observation/object_vel': (3,),
'observation/target_pos': (4,),
'action': (5,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'manipulator_insert_peg':
self._domain_name = 'manipulator'
self._task_name = 'insert_peg'
self._shapes = {
'observation/arm_pos': (16,),
'observation/arm_vel': (8,),
'observation/touch': (5,),
'observation/hand_pos': (4,),
'observation/object_pos': (4,),
'observation/object_vel': (3,),
'observation/target_pos': (4,),
'episodic_reward': (),
'action': (5,),
'discount': (),
'reward': (),
'step_type': ()}
elif task_name == 'cartpole_swingup':
self._domain_name = 'cartpole'
self._task_name = 'swingup'
self._shapes = {
'observation/position': (3,),
'observation/velocity': (2,),
'action': (1,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'walker_walk':
self._domain_name = 'walker'
self._task_name = 'walk'
self._shapes = {
'observation/orientations': (14,),
'observation/velocity': (9,),
'observation/height': (1,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'walker_stand':
self._domain_name = 'walker'
self._task_name = 'stand'
self._shapes = {
'observation/orientations': (14,),
'observation/velocity': (9,),
'observation/height': (1,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'cheetah_run':
self._domain_name = 'cheetah'
self._task_name = 'run'
self._shapes = {
'observation/position': (8,),
'observation/velocity': (9,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'finger_turn_hard':
self._domain_name = 'finger'
self._task_name = 'turn_hard'
self._shapes = {
'observation/position': (4,),
'observation/velocity': (3,),
'observation/touch': (2,),
'observation/target_position': (2,),
'observation/dist_to_target': (1,),
'action': (2,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
self._data_path = 'dm_control_suite/{}/train'.format(task_name)
@property
def shapes(self):
return self._shapes
@property
def data_path(self):
return self._data_path
@property
def uint8_features(self):
return self._uint8_features
@property
def environment(self):
"""Build and return the environment."""
if self._environment is not None:
return self._environment
self._environment = suite.load(
domain_name=self._domain_name,
task_name=self._task_name)
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
self._environment = NormilizeActionSpecWrapper(self._environment)
return self._environment
class CmuThirdParty:
"""Create bits needed to run agents on an locomotion humanoid dataset."""
def __init__(self, task_name='humanoid_walls'):
# 'humanoid_corridor|humanoid_gaps|humanoid_walls'
self._task_name = task_name
self._pixel_keys = self.get_pixel_keys()
self._uint8_features = set(['observation/walker/egocentric_camera'])
self.additional_paths = {}
self._proprio_keys = [
'walker/joints_vel',
'walker/sensors_velocimeter',
'walker/sensors_gyro',
'walker/joints_pos',
'walker/world_zaxis',
'walker/body_height',
'walker/sensors_accelerometer',
'walker/end_effectors_pos'
]
self._shapes = {
'observation/walker/joints_vel': (56,),
'observation/walker/sensors_velocimeter': (3,),
'observation/walker/sensors_gyro': (3,),
'observation/walker/joints_pos': (56,),
'observation/walker/world_zaxis': (3,),
'observation/walker/body_height': (1,),
'observation/walker/sensors_accelerometer': (3,),
'observation/walker/end_effectors_pos': (12,),
'observation/walker/egocentric_camera': (
64,
64,
3,
),
'action': (56,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
if task_name == 'humanoid_corridor':
self._data_path = 'dm_locomotion/humanoid_corridor/seq2/train'
elif task_name == 'humanoid_gaps':
self._data_path = 'dm_locomotion/humanoid_gaps/seq2/train'
elif task_name == 'humanoid_walls':
self._data_path = 'dm_locomotion/humanoid_walls/seq40/train'
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
@staticmethod
def get_pixel_keys():
return ('walker/egocentric_camera',)
@property
def uint8_features(self):
return self._uint8_features
@property
def shapes(self):
return self._shapes
@property
def data_path(self):
return self._data_path
@property
def environment(self):
"""Build and return the environment."""
if self._task_name == 'humanoid_corridor':
self._environment = _build_humanoid_corridor_env()
elif self._task_name == 'humanoid_gaps':
self._environment = _build_humanoid_corridor_gaps()
elif self._task_name == 'humanoid_walls':
self._environment = _build_humanoid_walls_env()
self._environment = NormilizeActionSpecWrapper(self._environment)
self._environment = MujocoActionNormalizer(
environment=self._environment, rescale='clip')
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
all_observations = list(self._proprio_keys) + list(self._pixel_keys)
self._environment = FilterObservationsWrapper(self._environment,
all_observations)
return self._environment
class Rodent:
"""Create bits needed to run agents on an Rodent dataset."""
def __init__(self, task_name='rodent_gaps'):
# 'rodent_escape|rodent_two_touch|rodent_gaps|rodent_mazes'
self._task_name = task_name
self._pixel_keys = self.get_pixel_keys()
self._uint8_features = set(['observation/walker/egocentric_camera'])
self._proprio_keys = [
'walker/joints_pos', 'walker/joints_vel', 'walker/tendons_pos',
'walker/tendons_vel', 'walker/appendages_pos', 'walker/world_zaxis',
'walker/sensors_accelerometer', 'walker/sensors_velocimeter',
'walker/sensors_gyro', 'walker/sensors_touch',
]
self._shapes = {
'observation/walker/joints_pos': (30,),
'observation/walker/joints_vel': (30,),
'observation/walker/tendons_pos': (8,),
'observation/walker/tendons_vel': (8,),
'observation/walker/appendages_pos': (15,),
'observation/walker/world_zaxis': (3,),
'observation/walker/sensors_accelerometer': (3,),
'observation/walker/sensors_velocimeter': (3,),
'observation/walker/sensors_gyro': (3,),
'observation/walker/sensors_touch': (4,),
'observation/walker/egocentric_camera': (64, 64, 3),
'action': (38,),
'discount': (),
'reward': (),
'step_type': ()
}
if task_name == 'rodent_gaps':
self._data_path = 'dm_locomotion/rodent_gaps/seq2/train'
elif task_name == 'rodent_escape':
self._data_path = 'dm_locomotion/rodent_bowl_escape/seq2/train'
elif task_name == 'rodent_two_touch':
self._data_path = 'dm_locomotion/rodent_two_touch/seq40/train'
elif task_name == 'rodent_mazes':
self._data_path = 'dm_locomotion/rodent_mazes/seq40/train'
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
@staticmethod
def get_pixel_keys():
return ('walker/egocentric_camera',)
@property
def shapes(self):
return self._shapes
@property
def uint8_features(self):
return self._uint8_features
@property
def data_path(self):
return self._data_path
@property
def environment(self):
"""Return environment."""
if self._task_name == 'rodent_escape':
self._environment = _build_rodent_escape_env()
elif self._task_name == 'rodent_gaps':
self._environment = _build_rodent_corridor_gaps()
elif self._task_name == 'rodent_two_touch':
self._environment = _build_rodent_two_touch_env()
elif self._task_name == 'rodent_mazes':
self._environment = _build_rodent_maze_env()
self._environment = NormilizeActionSpecWrapper(self._environment)
self._environment = MujocoActionNormalizer(
environment=self._environment, rescale='clip')
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
all_observations = list(self._proprio_keys) + list(self._pixel_keys)
self._environment = FilterObservationsWrapper(self._environment,
all_observations)
return self._environment
def _parse_seq_tf_example(example, uint8_features, shapes):
"""Parse tf.Example containing one or two episode steps."""
def to_feature(key, shape):
if key in uint8_features:
return tf.io.FixedLenSequenceFeature(
shape=[], dtype=tf.string, allow_missing=True)
else:
return tf.io.FixedLenSequenceFeature(
shape=shape, dtype=tf.float32, allow_missing=True)
feature_map = {}
for k, v in shapes.items():
feature_map[k] = to_feature(k, v)
parsed = tf.io.parse_single_example(example, features=feature_map)
observation = {}
restructured = {}
for k in parsed.keys():
if 'observation' not in k:
restructured[k] = parsed[k]
continue
if k in uint8_features:
observation[k.replace('observation/', '')] = tf.reshape(
tf.io.decode_raw(parsed[k], out_type=tf.uint8), (-1,) + shapes[k])
else:
observation[k.replace('observation/', '')] = parsed[k]
restructured['observation'] = observation
restructured['length'] = tf.shape(restructured['action'])[0]
return restructured
def _build_sequence_example(sequences):
"""Convert raw sequences into a Reverb sequence sample."""
o = sequences['observation']
a = sequences['action']
r = sequences['reward']
p = sequences['discount']
info = reverb.SampleInfo(key=tf.constant(0, tf.uint64),
probability=tf.constant(1.0, tf.float64),
table_size=tf.constant(0, tf.int64),
priority=tf.constant(1.0, tf.float64))
return reverb.ReplaySample(info=info, data=(o, a, r, p))
def _build_sarsa_example(sequences):
"""Convert raw sequences into a Reverb n-step SARSA sample."""
o_tm1 = tree.map_structure(lambda t: t[0], sequences['observation'])
o_t = tree.map_structure(lambda t: t[1], sequences['observation'])
a_tm1 = tree.map_structure(lambda t: t[0], sequences['action'])
a_t = tree.map_structure(lambda t: t[1], sequences['action'])
r_t = tree.map_structure(lambda t: t[0], sequences['reward'])
p_t = tree.map_structure(lambda t: t[0], sequences['discount'])
info = reverb.SampleInfo(key=tf.constant(0, tf.uint64),
probability=tf.constant(1.0, tf.float64),
table_size=tf.constant(0, tf.int64),
priority=tf.constant(1.0, tf.float64))
return reverb.ReplaySample(info=info, data=(o_tm1, a_tm1, r_t, p_t, o_t, a_t))
def _padded_batch(example_ds, batch_size, shapes, drop_remainder=False):
"""Batch data while handling unequal lengths."""
padded_shapes = {}
padded_shapes['observation'] = {}
for k, v in shapes.items():
if 'observation' in k:
padded_shapes['observation'][
k.replace('observation/', '')] = (-1,) + v
else:
padded_shapes[k] = (-1,) + v
padded_shapes['length'] = ()
return example_ds.padded_batch(batch_size,
padded_shapes=padded_shapes,
drop_remainder=drop_remainder)
def dataset(root_path: str,
data_path: str,
shapes: Dict[str, Tuple[int]],
num_threads: int,
batch_size: int,
uint8_features: Optional[Set[str]] = None,
num_shards: int = 100,
shuffle_buffer_size: int = 100000,
sarsa: bool = True) -> tf.data.Dataset:
"""Create tf dataset for training."""
uint8_features = uint8_features if uint8_features else {}
path = os.path.join(root_path, data_path)
filenames = [f'{path}-{i:05d}-of-{num_shards:05d}' for i in range(num_shards)]
file_ds = tf.data.Dataset.from_tensor_slices(filenames)
file_ds = file_ds.repeat().shuffle(num_shards)
example_ds = file_ds.interleave(
functools.partial(tf.data.TFRecordDataset, compression_type='GZIP'),
cycle_length=tf.data.experimental.AUTOTUNE,
block_length=5)
example_ds = example_ds.shuffle(shuffle_buffer_size)
def map_func(example):
example = _parse_seq_tf_example(example, uint8_features, shapes)
return example
example_ds = example_ds.map(map_func, num_parallel_calls=num_threads)
example_ds = example_ds.repeat().shuffle(batch_size * 10)
if sarsa:
example_ds = example_ds.map(
_build_sarsa_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
example_ds.batch(batch_size)
else:
example_ds = _padded_batch(
example_ds, batch_size, shapes, drop_remainder=True)
example_ds = example_ds.map(
_build_sequence_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
example_ds = example_ds.prefetch(tf.data.experimental.AUTOTUNE)
return example_ds
| [
"numpy.clip",
"tensorflow.io.FixedLenSequenceFeature",
"tensorflow.shape",
"acme.wrappers.SinglePrecisionWrapper",
"reverb.ReplaySample",
"dm_control.composer.Environment",
"dm_control.composer.variation.distributions.Uniform",
"dm_control.locomotion.arenas.corridors.GapsCorridor",
"tensorflow.io.de... | [((1924, 1988), 'dm_control.locomotion.arenas.bowl.Bowl', 'arenas.bowl.Bowl', ([], {'size': '(20.0, 20.0)', 'aesthetic': '"""outdoor_natural"""'}), "(size=(20.0, 20.0), aesthetic='outdoor_natural')\n", (1940, 1988), False, 'from dm_control.locomotion import arenas\n'), ((2020, 2118), 'dm_control.locomotion.tasks.escape.Escape', 'tasks.escape.Escape', ([], {'walker': 'walker', 'arena': 'arena', 'physics_timestep': '(0.001)', 'control_timestep': '(0.02)'}), '(walker=walker, arena=arena, physics_timestep=0.001,\n control_timestep=0.02)\n', (2039, 2118), False, 'from dm_control.locomotion import tasks\n'), ((2151, 2249), 'dm_control.composer.Environment', 'composer.Environment', ([], {'time_limit': '(20)', 'task': 'locomotion_task', 'strip_singleton_obs_buffer_dim': '(True)'}), '(time_limit=20, task=locomotion_task,\n strip_singleton_obs_buffer_dim=True)\n', (2171, 2249), False, 'from dm_control import composer\n'), ((2488, 2542), 'dm_control.locomotion.arenas.labmaze_textures.WallTextures', 'arenas.labmaze_textures.WallTextures', ([], {'style': '"""style_01"""'}), "(style='style_01')\n", (2524, 2542), False, 'from dm_control.locomotion import arenas\n'), ((2561, 2803), 'dm_control.locomotion.arenas.mazes.RandomMazeWithTargets', 'arenas.mazes.RandomMazeWithTargets', ([], {'x_cells': '(11)', 'y_cells': '(11)', 'xy_scale': '(0.5)', 'z_height': '(0.3)', 'max_rooms': '(4)', 'room_min_size': '(4)', 'room_max_size': '(5)', 'spawns_per_room': '(1)', 'targets_per_room': '(3)', 'wall_textures': 'wall_textures', 'aesthetic': '"""outdoor_natural"""'}), "(x_cells=11, y_cells=11, xy_scale=0.5,\n z_height=0.3, max_rooms=4, room_min_size=4, room_max_size=5,\n spawns_per_room=1, targets_per_room=3, wall_textures=wall_textures,\n aesthetic='outdoor_natural')\n", (2595, 2803), False, 'from dm_control.locomotion import arenas\n'), ((3291, 3385), 'dm_control.composer.Environment', 'composer.Environment', ([], {'time_limit': '(30)', 'task': 'rodent_task', 'strip_singleton_obs_buffer_dim': '(True)'}), '(time_limit=30, task=rodent_task,\n strip_singleton_obs_buffer_dim=True)\n', (3311, 3385), False, 'from dm_control import composer\n'), ((3630, 3670), 'dm_control.composer.variation.distributions.Uniform', 'distributions.Uniform', ([], {'low': '(0.4)', 'high': '(0.8)'}), '(low=0.4, high=0.8)\n', (3651, 3670), False, 'from dm_control.composer.variation import distributions\n'), ((3686, 3727), 'dm_control.composer.variation.distributions.Uniform', 'distributions.Uniform', ([], {'low': '(0.05)', 'high': '(0.2)'}), '(low=0.05, high=0.2)\n', (3707, 3727), False, 'from dm_control.composer.variation import distributions\n'), ((3738, 3900), 'dm_control.locomotion.arenas.corridors.GapsCorridor', 'arenas.corridors.GapsCorridor', ([], {'corridor_width': '(2)', 'platform_length': 'platform_length', 'gap_length': 'gap_length', 'corridor_length': '(40)', 'aesthetic': '"""outdoor_natural"""'}), "(corridor_width=2, platform_length=\n platform_length, gap_length=gap_length, corridor_length=40, aesthetic=\n 'outdoor_natural')\n", (3767, 3900), False, 'from dm_control.locomotion import arenas\n'), ((3939, 4193), 'dm_control.locomotion.tasks.corridors.RunThroughCorridor', 'tasks.corridors.RunThroughCorridor', ([], {'walker': 'walker', 'arena': 'arena', 'walker_spawn_position': '(5, 0, 0)', 'walker_spawn_rotation': '(0)', 'target_velocity': '(1.0)', 'contact_termination': '(False)', 'terminate_at_height': '(-0.3)', 'physics_timestep': '(0.001)', 'control_timestep': '(0.02)'}), '(walker=walker, arena=arena,\n walker_spawn_position=(5, 0, 0), walker_spawn_rotation=0,\n target_velocity=1.0, contact_termination=False, terminate_at_height=-\n 0.3, physics_timestep=0.001, control_timestep=0.02)\n', (3973, 4193), False, 'from dm_control.locomotion import tasks\n'), ((4247, 4341), 'dm_control.composer.Environment', 'composer.Environment', ([], {'time_limit': '(30)', 'task': 'rodent_task', 'strip_singleton_obs_buffer_dim': '(True)'}), '(time_limit=30, task=rodent_task,\n strip_singleton_obs_buffer_dim=True)\n', (4267, 4341), False, 'from dm_control import composer\n'), ((4583, 4650), 'dm_control.locomotion.arenas.floors.Floor', 'arenas.floors.Floor', ([], {'size': '(10.0, 10.0)', 'aesthetic': '"""outdoor_natural"""'}), "(size=(10.0, 10.0), aesthetic='outdoor_natural')\n", (4602, 4650), False, 'from dm_control.locomotion import arenas\n'), ((5094, 5187), 'dm_control.composer.Environment', 'composer.Environment', ([], {'time_limit': '(30)', 'task': 'task_reach', 'strip_singleton_obs_buffer_dim': '(True)'}), '(time_limit=30, task=task_reach,\n strip_singleton_obs_buffer_dim=True)\n', (5114, 5187), False, 'from dm_control import composer\n'), ((5463, 5499), 'dm_control.composer.variation.distributions.Uniform', 'distributions.Uniform', ([], {'low': '(1)', 'high': '(7)'}), '(low=1, high=7)\n', (5484, 5499), False, 'from dm_control.composer.variation import distributions\n'), ((5516, 5556), 'dm_control.composer.variation.distributions.Uniform', 'distributions.Uniform', ([], {'low': '(2.5)', 'high': '(4.0)'}), '(low=2.5, high=4.0)\n', (5537, 5556), False, 'from dm_control.composer.variation import distributions\n'), ((5576, 5609), 'dm_control.composer.variation.distributions.Bernoulli', 'distributions.Bernoulli', ([], {'prob': '(0.5)'}), '(prob=0.5)\n', (5599, 5609), False, 'from dm_control.composer.variation import distributions\n'), ((5621, 5661), 'dm_control.composer.variation.distributions.Uniform', 'distributions.Uniform', ([], {'low': '(0.5)', 'high': '(0.6)'}), '(low=0.5, high=0.6)\n', (5642, 5661), False, 'from dm_control.composer.variation import distributions\n'), ((5673, 5715), 'dm_control.composer.variation.distributions.Uniform', 'distributions.Uniform', ([], {'low': '(0.21)', 'high': '(0.41)'}), '(low=0.21, high=0.41)\n', (5694, 5715), False, 'from dm_control.composer.variation import distributions\n'), ((5730, 5783), 'dm_control.composer.variation.colors.RgbVariation', 'colors.RgbVariation', ([], {'r': 'wall_r', 'g': 'wall_g', 'b': '(0)', 'alpha': '(1)'}), '(r=wall_r, g=wall_g, b=0, alpha=1)\n', (5749, 5783), False, 'from dm_control.composer.variation import colors\n'), ((5794, 5977), 'dm_control.locomotion.arenas.WallsCorridor', 'arenas.WallsCorridor', ([], {'wall_gap': '(5.0)', 'wall_width': 'wall_width', 'wall_height': 'wall_height', 'swap_wall_side': 'swap_wall_side', 'wall_rgba': 'wall_rgba', 'corridor_width': '(10)', 'corridor_length': '(100)'}), '(wall_gap=5.0, wall_width=wall_width, wall_height=\n wall_height, swap_wall_side=swap_wall_side, wall_rgba=wall_rgba,\n corridor_width=10, corridor_length=100)\n', (5814, 5977), False, 'from dm_control.locomotion import arenas\n'), ((6030, 6162), 'dm_control.locomotion.tasks.RunThroughCorridor', 'tasks.RunThroughCorridor', ([], {'walker': 'walker', 'arena': 'arena', 'walker_spawn_rotation': '(1.57)', 'physics_timestep': '(0.005)', 'control_timestep': '(0.03)'}), '(walker=walker, arena=arena, walker_spawn_rotation=\n 1.57, physics_timestep=0.005, control_timestep=0.03)\n', (6054, 6162), False, 'from dm_control.locomotion import tasks\n'), ((6211, 6307), 'dm_control.composer.Environment', 'composer.Environment', ([], {'time_limit': '(30)', 'task': 'humanoid_task', 'strip_singleton_obs_buffer_dim': '(True)'}), '(time_limit=30, task=humanoid_task,\n strip_singleton_obs_buffer_dim=True)\n', (6231, 6307), False, 'from dm_control import composer\n'), ((6581, 6641), 'dm_control.locomotion.arenas.EmptyCorridor', 'arenas.EmptyCorridor', ([], {'corridor_width': '(10)', 'corridor_length': '(100)'}), '(corridor_width=10, corridor_length=100)\n', (6601, 6641), False, 'from dm_control.locomotion import arenas\n'), ((6673, 6805), 'dm_control.locomotion.tasks.RunThroughCorridor', 'tasks.RunThroughCorridor', ([], {'walker': 'walker', 'arena': 'arena', 'walker_spawn_rotation': '(1.57)', 'physics_timestep': '(0.005)', 'control_timestep': '(0.03)'}), '(walker=walker, arena=arena, walker_spawn_rotation=\n 1.57, physics_timestep=0.005, control_timestep=0.03)\n', (6697, 6805), False, 'from dm_control.locomotion import tasks\n'), ((6854, 6950), 'dm_control.composer.Environment', 'composer.Environment', ([], {'time_limit': '(30)', 'task': 'humanoid_task', 'strip_singleton_obs_buffer_dim': '(True)'}), '(time_limit=30, task=humanoid_task,\n strip_singleton_obs_buffer_dim=True)\n', (6874, 6950), False, 'from dm_control import composer\n'), ((7235, 7275), 'dm_control.composer.variation.distributions.Uniform', 'distributions.Uniform', ([], {'low': '(0.3)', 'high': '(2.5)'}), '(low=0.3, high=2.5)\n', (7256, 7275), False, 'from dm_control.composer.variation import distributions\n'), ((7291, 7333), 'dm_control.composer.variation.distributions.Uniform', 'distributions.Uniform', ([], {'low': '(0.75)', 'high': '(1.25)'}), '(low=0.75, high=1.25)\n', (7312, 7333), False, 'from dm_control.composer.variation import distributions\n'), ((7344, 7463), 'dm_control.locomotion.arenas.GapsCorridor', 'arenas.GapsCorridor', ([], {'corridor_width': '(10)', 'platform_length': 'platform_length', 'gap_length': 'gap_length', 'corridor_length': '(100)'}), '(corridor_width=10, platform_length=platform_length,\n gap_length=gap_length, corridor_length=100)\n', (7363, 7463), False, 'from dm_control.locomotion import arenas\n'), ((7503, 7672), 'dm_control.locomotion.tasks.RunThroughCorridor', 'tasks.RunThroughCorridor', ([], {'walker': 'walker', 'arena': 'arena', 'walker_spawn_position': '(2, 0, 0)', 'walker_spawn_rotation': '(1.57)', 'physics_timestep': '(0.005)', 'control_timestep': '(0.03)'}), '(walker=walker, arena=arena, walker_spawn_position=\n (2, 0, 0), walker_spawn_rotation=1.57, physics_timestep=0.005,\n control_timestep=0.03)\n', (7527, 7672), False, 'from dm_control.locomotion import tasks\n'), ((7723, 7819), 'dm_control.composer.Environment', 'composer.Environment', ([], {'time_limit': '(30)', 'task': 'humanoid_task', 'strip_singleton_obs_buffer_dim': '(True)'}), '(time_limit=30, task=humanoid_task,\n strip_singleton_obs_buffer_dim=True)\n', (7743, 7819), False, 'from dm_control import composer\n'), ((22493, 22550), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example'], {'features': 'feature_map'}), '(example, features=feature_map)\n', (22519, 22550), True, 'import tensorflow as tf\n'), ((23550, 23599), 'reverb.ReplaySample', 'reverb.ReplaySample', ([], {'info': 'info', 'data': '(o, a, r, p)'}), '(info=info, data=(o, a, r, p))\n', (23569, 23599), False, 'import reverb\n'), ((23715, 23775), 'tree.map_structure', 'tree.map_structure', (['(lambda t: t[0])', "sequences['observation']"], {}), "(lambda t: t[0], sequences['observation'])\n", (23733, 23775), False, 'import tree\n'), ((23784, 23844), 'tree.map_structure', 'tree.map_structure', (['(lambda t: t[1])', "sequences['observation']"], {}), "(lambda t: t[1], sequences['observation'])\n", (23802, 23844), False, 'import tree\n'), ((23855, 23910), 'tree.map_structure', 'tree.map_structure', (['(lambda t: t[0])', "sequences['action']"], {}), "(lambda t: t[0], sequences['action'])\n", (23873, 23910), False, 'import tree\n'), ((23919, 23974), 'tree.map_structure', 'tree.map_structure', (['(lambda t: t[1])', "sequences['action']"], {}), "(lambda t: t[1], sequences['action'])\n", (23937, 23974), False, 'import tree\n'), ((23983, 24038), 'tree.map_structure', 'tree.map_structure', (['(lambda t: t[0])', "sequences['reward']"], {}), "(lambda t: t[0], sequences['reward'])\n", (24001, 24038), False, 'import tree\n'), ((24047, 24104), 'tree.map_structure', 'tree.map_structure', (['(lambda t: t[0])', "sequences['discount']"], {}), "(lambda t: t[0], sequences['discount'])\n", (24065, 24104), False, 'import tree\n'), ((24372, 24443), 'reverb.ReplaySample', 'reverb.ReplaySample', ([], {'info': 'info', 'data': '(o_tm1, a_tm1, r_t, p_t, o_t, a_t)'}), '(info=info, data=(o_tm1, a_tm1, r_t, p_t, o_t, a_t))\n', (24391, 24443), False, 'import reverb\n'), ((25481, 25515), 'os.path.join', 'os.path.join', (['root_path', 'data_path'], {}), '(root_path, data_path)\n', (25493, 25515), False, 'import os\n'), ((25610, 25655), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['filenames'], {}), '(filenames)\n', (25644, 25655), True, 'import tensorflow as tf\n'), ((9221, 9322), 'dm_env.specs.BoundedArray', 'specs.BoundedArray', (['action_spec.shape', 'action_spec.dtype', 'minimum', 'maximum'], {'name': 'action_spec.name'}), '(action_spec.shape, action_spec.dtype, minimum, maximum,\n name=action_spec.name)\n', (9239, 9322), False, 'from dm_env import specs\n'), ((10151, 10184), 'collections.OrderedDict', 'collections.OrderedDict', (['filtered'], {}), '(filtered)\n', (10174, 10184), False, 'import collections\n'), ((15760, 15828), 'dm_control.suite.load', 'suite.load', ([], {'domain_name': 'self._domain_name', 'task_name': 'self._task_name'}), '(domain_name=self._domain_name, task_name=self._task_name)\n', (15770, 15828), False, 'from dm_control import suite\n'), ((15871, 15921), 'acme.wrappers.SinglePrecisionWrapper', 'wrappers.SinglePrecisionWrapper', (['self._environment'], {}), '(self._environment)\n', (15902, 15921), False, 'from acme import wrappers\n'), ((18637, 18687), 'acme.wrappers.SinglePrecisionWrapper', 'wrappers.SinglePrecisionWrapper', (['self._environment'], {}), '(self._environment)\n', (18668, 18687), False, 'from acme import wrappers\n'), ((21701, 21751), 'acme.wrappers.SinglePrecisionWrapper', 'wrappers.SinglePrecisionWrapper', (['self._environment'], {}), '(self._environment)\n', (21732, 21751), False, 'from acme import wrappers\n'), ((23010, 23042), 'tensorflow.shape', 'tf.shape', (["restructured['action']"], {}), "(restructured['action'])\n", (23018, 23042), True, 'import tensorflow as tf\n'), ((25747, 25814), 'functools.partial', 'functools.partial', (['tf.data.TFRecordDataset'], {'compression_type': '"""GZIP"""'}), "(tf.data.TFRecordDataset, compression_type='GZIP')\n", (25764, 25814), False, 'import functools\n'), ((2978, 3109), 'functools.partial', 'functools.partial', (['props.target_sphere.TargetSphere'], {'radius': '(0.05)', 'height_above_ground': '(0.125)', 'rgb1': '(0, 0, 0.4)', 'rgb2': '(0, 0, 0.7)'}), '(props.target_sphere.TargetSphere, radius=0.05,\n height_above_ground=0.125, rgb1=(0, 0, 0.4), rgb2=(0, 0, 0.7))\n', (2995, 3109), False, 'import functools\n'), ((8476, 8511), 'tree.map_structure', 'tree.map_structure', (['np.tanh', 'action'], {}), '(np.tanh, action)\n', (8494, 8511), False, 'import tree\n'), ((22190, 22266), 'tensorflow.io.FixedLenSequenceFeature', 'tf.io.FixedLenSequenceFeature', ([], {'shape': '[]', 'dtype': 'tf.string', 'allow_missing': '(True)'}), '(shape=[], dtype=tf.string, allow_missing=True)\n', (22219, 22266), True, 'import tensorflow as tf\n'), ((22301, 22386), 'tensorflow.io.FixedLenSequenceFeature', 'tf.io.FixedLenSequenceFeature', ([], {'shape': 'shape', 'dtype': 'tf.float32', 'allow_missing': '(True)'}), '(shape=shape, dtype=tf.float32, allow_missing=True\n )\n', (22330, 22386), True, 'import tensorflow as tf\n'), ((23315, 23340), 'tensorflow.constant', 'tf.constant', (['(0)', 'tf.uint64'], {}), '(0, tf.uint64)\n', (23326, 23340), True, 'import tensorflow as tf\n'), ((23381, 23409), 'tensorflow.constant', 'tf.constant', (['(1.0)', 'tf.float64'], {}), '(1.0, tf.float64)\n', (23392, 23409), True, 'import tensorflow as tf\n'), ((23449, 23473), 'tensorflow.constant', 'tf.constant', (['(0)', 'tf.int64'], {}), '(0, tf.int64)\n', (23460, 23473), True, 'import tensorflow as tf\n'), ((23511, 23539), 'tensorflow.constant', 'tf.constant', (['(1.0)', 'tf.float64'], {}), '(1.0, tf.float64)\n', (23522, 23539), True, 'import tensorflow as tf\n'), ((24137, 24162), 'tensorflow.constant', 'tf.constant', (['(0)', 'tf.uint64'], {}), '(0, tf.uint64)\n', (24148, 24162), True, 'import tensorflow as tf\n'), ((24203, 24231), 'tensorflow.constant', 'tf.constant', (['(1.0)', 'tf.float64'], {}), '(1.0, tf.float64)\n', (24214, 24231), True, 'import tensorflow as tf\n'), ((24271, 24295), 'tensorflow.constant', 'tf.constant', (['(0)', 'tf.int64'], {}), '(0, tf.int64)\n', (24282, 24295), True, 'import tensorflow as tf\n'), ((24333, 24361), 'tensorflow.constant', 'tf.constant', (['(1.0)', 'tf.float64'], {}), '(1.0, tf.float64)\n', (24344, 24361), True, 'import tensorflow as tf\n'), ((4773, 4846), 'functools.partial', 'functools.partial', (['props.target_sphere.TargetSphereTwoTouch'], {'radius': '(0.025)'}), '(props.target_sphere.TargetSphereTwoTouch, radius=0.025)\n', (4790, 4846), False, 'import functools\n'), ((10382, 10415), 'collections.OrderedDict', 'collections.OrderedDict', (['filtered'], {}), '(filtered)\n', (10405, 10415), False, 'import collections\n'), ((22799, 22845), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (['parsed[k]'], {'out_type': 'tf.uint8'}), '(parsed[k], out_type=tf.uint8)\n', (22815, 22845), True, 'import tensorflow as tf\n'), ((8598, 8619), 'numpy.clip', 'np.clip', (['a', '(-1.0)', '(1.0)'], {}), '(a, -1.0, 1.0)\n', (8605, 8619), True, 'import numpy as np\n')] |
import os
import tensorflow as tf
import numpy as np
import matplotlib
# matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import argparse
from numpy import linalg as LA
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
class Extractor:
def __init__(self):
# weights: 'imagenet'
# pooling: 'max' or 'avg'
# input_shape: (width, height, 3), width and height should >= 48
self.input_shape = (224, 224, 3)
self.weight = 'imagenet'
self.pooling = 'max'
self.model = VGG16(weights = self.weight, input_shape = (self.input_shape[0], self.input_shape[1], self.input_shape[2]), pooling = self.pooling, include_top = True)
self.model = Model(inputs=self.model.inputs, outputs=self.model.get_layer('fc2').output)
self.model.predict(np.zeros((1, 224, 224 , 3)))
'''
Use vgg16 model to extract features
Output normalized feature vector
'''
def extract_feat(self, image):
if(isinstance(image, np.ndarray)):
if(image.shape == (224,224,3)):
img = np.asarray(image, dtype=np.float64) # This is an int array!
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
feat = self.model.predict(img)
norm_feat = feat[0]/LA.norm(feat[0])
return norm_feat
else:
raise ValueError("Input shape is incorrect")
else:
raise ValueError("Input is incorrect")
return None
def save_extracted_feat_as_image(self, extracted, save_dir):
plt.plot(extracted)
plt.savefig(os.path.join(save_dir,'extracted.png'))
| [
"keras.applications.vgg16.VGG16",
"matplotlib.pyplot.plot",
"os.path.join",
"numpy.asarray",
"numpy.zeros",
"keras.applications.vgg16.preprocess_input",
"numpy.expand_dims",
"numpy.linalg.norm"
] | [((687, 840), 'keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': 'self.weight', 'input_shape': '(self.input_shape[0], self.input_shape[1], self.input_shape[2])', 'pooling': 'self.pooling', 'include_top': '(True)'}), '(weights=self.weight, input_shape=(self.input_shape[0], self.\n input_shape[1], self.input_shape[2]), pooling=self.pooling, include_top\n =True)\n', (692, 840), False, 'from keras.applications.vgg16 import VGG16\n'), ((1771, 1790), 'matplotlib.pyplot.plot', 'plt.plot', (['extracted'], {}), '(extracted)\n', (1779, 1790), True, 'import matplotlib.pyplot as plt\n'), ((963, 989), 'numpy.zeros', 'np.zeros', (['(1, 224, 224, 3)'], {}), '((1, 224, 224, 3))\n', (971, 989), True, 'import numpy as np\n'), ((1811, 1850), 'os.path.join', 'os.path.join', (['save_dir', '"""extracted.png"""'], {}), "(save_dir, 'extracted.png')\n", (1823, 1850), False, 'import os\n'), ((1239, 1274), 'numpy.asarray', 'np.asarray', (['image'], {'dtype': 'np.float64'}), '(image, dtype=np.float64)\n', (1249, 1274), True, 'import numpy as np\n'), ((1329, 1356), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1343, 1356), True, 'import numpy as np\n'), ((1375, 1396), 'keras.applications.vgg16.preprocess_input', 'preprocess_input', (['img'], {}), '(img)\n', (1391, 1396), False, 'from keras.applications.vgg16 import preprocess_input\n'), ((1472, 1488), 'numpy.linalg.norm', 'LA.norm', (['feat[0]'], {}), '(feat[0])\n', (1479, 1488), True, 'from numpy import linalg as LA\n')] |
import os
import time
import argparse
from datetime import datetime
import subprocess
import pdb
import math
import numpy as np
import pybullet as p
import pickle
import matplotlib.pyplot as plt
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from gym.spaces import Box, Dict
import torch
import torch.nn as nn
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork
import ray
from ray import tune
from ray.tune.logger import DEFAULT_LOGGERS
from ray.tune import register_env
from ray.rllib.agents import ppo
from ray.rllib.agents.ppo import PPOTrainer, PPOTFPolicy
from ray.rllib.examples.policy.random_policy import RandomPolicy
from ray.rllib.utils.test_utils import check_learning_achieved
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models import ModelCatalog
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.env.multi_agent_env import ENV_STATE
from gym_pybullet_drones.envs.BaseAviary import DroneModel, Physics
from gym_pybullet_drones.envs.multi_agent_rl.FlockAviary import FlockAviary
from gym_pybullet_drones.envs.multi_agent_rl.LeaderFollowerAviary import LeaderFollowerAviary
from gym_pybullet_drones.envs.multi_agent_rl.MeetupAviary import MeetupAviary
from gym_pybullet_drones.envs.single_agent_rl.BaseSingleAgentAviary import ActionType, ObservationType
from gym_pybullet_drones.utils.Logger import Logger
import shared_constants
OWN_OBS_VEC_SIZE = None
ACTION_VEC_SIZE = None
############################################################
class CustomTorchCentralizedCriticModel(TorchModelV2, nn.Module):
"""Multi-agent model that implements a centralized value function.
It assumes the observation is a dict with 'own_obs' and 'opponent_obs', the
former of which can be used for computing actions (i.e., decentralized
execution), and the latter for optimization (i.e., centralized learning).
This model has two parts:
- An action model that looks at just 'own_obs' to compute actions
- A value model that also looks at the 'opponent_obs' / 'opponent_action'
to compute the value (it does this by using the 'obs_flat' tensor).
"""
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs, model_config, name)
nn.Module.__init__(self)
self.action_model = FullyConnectedNetwork(
Box(low=-1, high=1, shape=(OWN_OBS_VEC_SIZE, )),
action_space,
num_outputs,
model_config,
name + "_action"
)
self.value_model = FullyConnectedNetwork(
obs_space,
action_space,
1,
model_config,
name + "_vf"
)
self._model_in = None
def forward(self, input_dict, state, seq_lens):
self._model_in = [input_dict["obs_flat"], state, seq_lens]
return self.action_model({"obs": input_dict["obs"]["own_obs"]}, state, seq_lens)
def value_function(self):
value_out, _ = self.value_model({"obs": self._model_in[0]}, self._model_in[1], self._model_in[2])
return torch.reshape(value_out, [-1])
############################################################
class FillInActions(DefaultCallbacks):
def on_postprocess_trajectory(self, worker, episode, agent_id, policy_id, policies, postprocessed_batch, original_batches, **kwargs):
to_update = postprocessed_batch[SampleBatch.CUR_OBS]
other_id = 1 if agent_id == 0 else 0
action_encoder = ModelCatalog.get_preprocessor_for_space(
Box(-np.inf, np.inf, (ACTION_VEC_SIZE,), np.float32) # Unbounded
)
_, opponent_batch = original_batches[other_id]
opponent_actions = np.array([action_encoder.transform(a) for a in opponent_batch[SampleBatch.ACTIONS]])
to_update[:, -ACTION_VEC_SIZE:] = opponent_actions
############################################################
def central_critic_observer(agent_obs, **kw):
new_obs = {
0: {
"own_obs": agent_obs[0],
"opponent_obs": agent_obs[1],
"opponent_action": np.zeros(ACTION_VEC_SIZE), # Filled in by FillInActions
},
1: {
"own_obs": agent_obs[1],
"opponent_obs": agent_obs[0],
"opponent_action": np.zeros(ACTION_VEC_SIZE), # Filled in by FillInActions
},
}
return new_obs
############################################################
if __name__ == "__main__":
#### Define and parse (optional) arguments for the script ##
parser = argparse.ArgumentParser(description='Multi-agent reinforcement learning experiments script')
parser.add_argument('--num_drones', default=2, type=int, help='Number of drones (default: 2)', metavar='')
parser.add_argument('--env', default='flock', type=str, choices=['leaderfollower', 'flock', 'meetup'], help='Help (default: ..)', metavar='')
parser.add_argument('--obs', default='kin', type=ObservationType, help='Help (default: ..)', metavar='')
parser.add_argument('--act', default='one_d_rpm', type=ActionType, help='Help (default: ..)', metavar='')
parser.add_argument('--algo', default='cc', type=str, choices=['cc'], help='Help (default: ..)', metavar='')
parser.add_argument('--workers', default=0, type=int, help='Help (default: ..)', metavar='')
ARGS = parser.parse_args()
#### Save directory ########################################
filename = os.path.dirname(os.path.abspath(__file__))+'/results/save-'+ARGS.env+'-'+str(ARGS.num_drones)+'-'+ARGS.algo+'-'+ARGS.obs.value+'-'+ARGS.act.value+'-'+datetime.now().strftime("%m.%d.%Y_%H.%M.%S")
if not os.path.exists(filename):
os.makedirs(filename+'/')
#### Print out current git commit hash #####################
git_commit = subprocess.check_output(["git", "describe", "--tags"]).strip()
with open(filename+'/git_commit.txt', 'w+') as f:
f.write(str(git_commit))
#### Constants, and errors #################################
if ARGS.obs==ObservationType.KIN:
OWN_OBS_VEC_SIZE = 12
elif ARGS.obs==ObservationType.RGB:
print("[ERROR] ObservationType.RGB for multi-agent systems not yet implemented")
exit()
else:
print("[ERROR] unknown ObservationType")
exit()
if ARGS.act in [ActionType.ONE_D_RPM, ActionType.ONE_D_DYN, ActionType.ONE_D_PID]:
ACTION_VEC_SIZE = 1
elif ARGS.act in [ActionType.RPM, ActionType.DYN]:
ACTION_VEC_SIZE = 4
elif ARGS.act == ActionType.PID:
ACTION_VEC_SIZE = 3
else:
print("[ERROR] unknown ActionType")
exit()
#### Uncomment to debug slurm scripts ######################
# exit()
#### Initialize Ray Tune ###################################
ray.shutdown()
ray.init(ignore_reinit_error=True)
#### Register the custom centralized critic model ##########
ModelCatalog.register_custom_model("cc_model", CustomTorchCentralizedCriticModel)
#### Register the environment ##############################
temp_env_name = "this-aviary-v0"
if ARGS.env == 'flock':
register_env(temp_env_name, lambda _: FlockAviary(num_drones=ARGS.num_drones,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=ARGS.obs,
act=ARGS.act
)
)
elif ARGS.env == 'leaderfollower':
register_env(temp_env_name, lambda _: LeaderFollowerAviary(num_drones=ARGS.num_drones,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=ARGS.obs,
act=ARGS.act
)
)
elif ARGS.env == 'meetup':
register_env(temp_env_name, lambda _: MeetupAviary(num_drones=ARGS.num_drones,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=ARGS.obs,
act=ARGS.act
)
)
else:
print("[ERROR] environment not yet implemented")
exit()
#### Unused env to extract the act and obs spaces ##########
if ARGS.env == 'flock':
temp_env = FlockAviary(num_drones=ARGS.num_drones,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=ARGS.obs,
act=ARGS.act
)
elif ARGS.env == 'leaderfollower':
temp_env = LeaderFollowerAviary(num_drones=ARGS.num_drones,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=ARGS.obs,
act=ARGS.act
)
elif ARGS.env == 'meetup':
temp_env = MeetupAviary(num_drones=ARGS.num_drones,
aggregate_phy_steps=shared_constants.AGGR_PHY_STEPS,
obs=ARGS.obs,
act=ARGS.act
)
else:
print("[ERROR] environment not yet implemented")
exit()
observer_space = Dict({
"own_obs": temp_env.observation_space[0],
"opponent_obs": temp_env.observation_space[0],
"opponent_action": temp_env.action_space[0],
})
action_space = temp_env.action_space[0]
#### Set up the trainer's config ###########################
config = ppo.DEFAULT_CONFIG.copy() # For the default config, see github.com/ray-project/ray/blob/master/rllib/agents/trainer.py
config = {
"env": temp_env_name,
"num_workers": 0 + ARGS.workers,
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")), # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0
"batch_mode": "complete_episodes",
"callbacks": FillInActions,
"framework": "torch",
}
#### Set up the model parameters of the trainer's config ###
config["model"] = {
"custom_model": "cc_model",
}
#### Set up the multiagent params of the trainer's config ##
config["multiagent"] = {
"policies": {
"pol0": (None, observer_space, action_space, {"agent_id": 0,}),
"pol1": (None, observer_space, action_space, {"agent_id": 1,}),
},
"policy_mapping_fn": lambda x: "pol0" if x == 0 else "pol1", # # Function mapping agent ids to policy ids
"observation_fn": central_critic_observer, # See rllib/evaluation/observation_function.py for more info
}
#### Ray Tune stopping conditions ##########################
stop = {
"timesteps_total": 100, # 8000,
# "episode_reward_mean": 0,
# "training_iteration": 0,
}
#### Train #################################################
results = tune.run(
"PPO",
stop=stop,
config=config,
verbose=True,
checkpoint_at_end=True,
local_dir=filename,
)
# check_learning_achieved(results, 1.0)
#### Save agent ############################################
checkpoints = results.get_trial_checkpoints_paths(trial=results.get_best_trial('episode_reward_mean',
mode='max'
),
metric='episode_reward_mean'
)
with open(filename+'/checkpoint.txt', 'w+') as f:
f.write(checkpoints[0][0])
#### Shut down Ray #########################################
ray.shutdown()
| [
"ray.rllib.agents.ppo.DEFAULT_CONFIG.copy",
"ray.rllib.models.torch.fcnet.FullyConnectedNetwork",
"gym_pybullet_drones.envs.multi_agent_rl.MeetupAviary.MeetupAviary",
"ray.init",
"torch.nn.Module.__init__",
"os.path.exists",
"argparse.ArgumentParser",
"ray.rllib.models.torch.torch_modelv2.TorchModelV2... | [((5268, 5365), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Multi-agent reinforcement learning experiments script"""'}), "(description=\n 'Multi-agent reinforcement learning experiments script')\n", (5291, 5365), False, 'import argparse\n'), ((7880, 7894), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (7892, 7894), False, 'import ray\n'), ((7899, 7933), 'ray.init', 'ray.init', ([], {'ignore_reinit_error': '(True)'}), '(ignore_reinit_error=True)\n', (7907, 7933), False, 'import ray\n'), ((8004, 8089), 'ray.rllib.models.ModelCatalog.register_custom_model', 'ModelCatalog.register_custom_model', (['"""cc_model"""', 'CustomTorchCentralizedCriticModel'], {}), "('cc_model',\n CustomTorchCentralizedCriticModel)\n", (8038, 8089), False, 'from ray.rllib.models import ModelCatalog\n'), ((10800, 10945), 'gym.spaces.Dict', 'Dict', (["{'own_obs': temp_env.observation_space[0], 'opponent_obs': temp_env.\n observation_space[0], 'opponent_action': temp_env.action_space[0]}"], {}), "({'own_obs': temp_env.observation_space[0], 'opponent_obs': temp_env.\n observation_space[0], 'opponent_action': temp_env.action_space[0]})\n", (10804, 10945), False, 'from gym.spaces import Box, Dict\n'), ((11095, 11120), 'ray.rllib.agents.ppo.DEFAULT_CONFIG.copy', 'ppo.DEFAULT_CONFIG.copy', ([], {}), '()\n', (11118, 11120), False, 'from ray.rllib.agents import ppo\n'), ((12456, 12560), 'ray.tune.run', 'tune.run', (['"""PPO"""'], {'stop': 'stop', 'config': 'config', 'verbose': '(True)', 'checkpoint_at_end': '(True)', 'local_dir': 'filename'}), "('PPO', stop=stop, config=config, verbose=True, checkpoint_at_end=\n True, local_dir=filename)\n", (12464, 12560), False, 'from ray import tune\n'), ((13305, 13319), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (13317, 13319), False, 'import ray\n'), ((2328, 2417), 'ray.rllib.models.torch.torch_modelv2.TorchModelV2.__init__', 'TorchModelV2.__init__', (['self', 'obs_space', 'action_space', 'num_outputs', 'model_config', 'name'], {}), '(self, obs_space, action_space, num_outputs,\n model_config, name)\n', (2349, 2417), False, 'from ray.rllib.models.torch.torch_modelv2 import TorchModelV2\n'), ((2422, 2446), 'torch.nn.Module.__init__', 'nn.Module.__init__', (['self'], {}), '(self)\n', (2440, 2446), True, 'import torch.nn as nn\n'), ((2935, 3012), 'ray.rllib.models.torch.fcnet.FullyConnectedNetwork', 'FullyConnectedNetwork', (['obs_space', 'action_space', '(1)', 'model_config', "(name + '_vf')"], {}), "(obs_space, action_space, 1, model_config, name + '_vf')\n", (2956, 3012), False, 'from ray.rllib.models.torch.fcnet import FullyConnectedNetwork\n'), ((3703, 3733), 'torch.reshape', 'torch.reshape', (['value_out', '[-1]'], {}), '(value_out, [-1])\n', (3716, 3733), False, 'import torch\n'), ((6754, 6778), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (6768, 6778), False, 'import os\n'), ((6788, 6815), 'os.makedirs', 'os.makedirs', (["(filename + '/')"], {}), "(filename + '/')\n", (6799, 6815), False, 'import os\n'), ((9801, 9926), 'gym_pybullet_drones.envs.multi_agent_rl.FlockAviary.FlockAviary', 'FlockAviary', ([], {'num_drones': 'ARGS.num_drones', 'aggregate_phy_steps': 'shared_constants.AGGR_PHY_STEPS', 'obs': 'ARGS.obs', 'act': 'ARGS.act'}), '(num_drones=ARGS.num_drones, aggregate_phy_steps=\n shared_constants.AGGR_PHY_STEPS, obs=ARGS.obs, act=ARGS.act)\n', (9812, 9926), False, 'from gym_pybullet_drones.envs.multi_agent_rl.FlockAviary import FlockAviary\n'), ((2548, 2594), 'gym.spaces.Box', 'Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(OWN_OBS_VEC_SIZE,)'}), '(low=-1, high=1, shape=(OWN_OBS_VEC_SIZE,))\n', (2551, 2594), False, 'from gym.spaces import Box, Dict\n'), ((4211, 4263), 'gym.spaces.Box', 'Box', (['(-np.inf)', 'np.inf', '(ACTION_VEC_SIZE,)', 'np.float32'], {}), '(-np.inf, np.inf, (ACTION_VEC_SIZE,), np.float32)\n', (4214, 4263), False, 'from gym.spaces import Box, Dict\n'), ((4816, 4841), 'numpy.zeros', 'np.zeros', (['ACTION_VEC_SIZE'], {}), '(ACTION_VEC_SIZE)\n', (4824, 4841), True, 'import numpy as np\n'), ((5007, 5032), 'numpy.zeros', 'np.zeros', (['ACTION_VEC_SIZE'], {}), '(ACTION_VEC_SIZE)\n', (5015, 5032), True, 'import numpy as np\n'), ((6897, 6951), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'describe', '--tags']"], {}), "(['git', 'describe', '--tags'])\n", (6920, 6951), False, 'import subprocess\n'), ((10105, 10239), 'gym_pybullet_drones.envs.multi_agent_rl.LeaderFollowerAviary.LeaderFollowerAviary', 'LeaderFollowerAviary', ([], {'num_drones': 'ARGS.num_drones', 'aggregate_phy_steps': 'shared_constants.AGGR_PHY_STEPS', 'obs': 'ARGS.obs', 'act': 'ARGS.act'}), '(num_drones=ARGS.num_drones, aggregate_phy_steps=\n shared_constants.AGGR_PHY_STEPS, obs=ARGS.obs, act=ARGS.act)\n', (10125, 10239), False, 'from gym_pybullet_drones.envs.multi_agent_rl.LeaderFollowerAviary import LeaderFollowerAviary\n'), ((11324, 11361), 'os.environ.get', 'os.environ.get', (['"""RLLIB_NUM_GPUS"""', '"""0"""'], {}), "('RLLIB_NUM_GPUS', '0')\n", (11338, 11361), False, 'import os\n'), ((6698, 6712), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6710, 6712), False, 'from datetime import datetime\n'), ((8263, 8388), 'gym_pybullet_drones.envs.multi_agent_rl.FlockAviary.FlockAviary', 'FlockAviary', ([], {'num_drones': 'ARGS.num_drones', 'aggregate_phy_steps': 'shared_constants.AGGR_PHY_STEPS', 'obs': 'ARGS.obs', 'act': 'ARGS.act'}), '(num_drones=ARGS.num_drones, aggregate_phy_steps=\n shared_constants.AGGR_PHY_STEPS, obs=ARGS.obs, act=ARGS.act)\n', (8274, 8388), False, 'from gym_pybullet_drones.envs.multi_agent_rl.FlockAviary import FlockAviary\n'), ((10446, 10572), 'gym_pybullet_drones.envs.multi_agent_rl.MeetupAviary.MeetupAviary', 'MeetupAviary', ([], {'num_drones': 'ARGS.num_drones', 'aggregate_phy_steps': 'shared_constants.AGGR_PHY_STEPS', 'obs': 'ARGS.obs', 'act': 'ARGS.act'}), '(num_drones=ARGS.num_drones, aggregate_phy_steps=\n shared_constants.AGGR_PHY_STEPS, obs=ARGS.obs, act=ARGS.act)\n', (10458, 10572), False, 'from gym_pybullet_drones.envs.multi_agent_rl.MeetupAviary import MeetupAviary\n'), ((8725, 8859), 'gym_pybullet_drones.envs.multi_agent_rl.LeaderFollowerAviary.LeaderFollowerAviary', 'LeaderFollowerAviary', ([], {'num_drones': 'ARGS.num_drones', 'aggregate_phy_steps': 'shared_constants.AGGR_PHY_STEPS', 'obs': 'ARGS.obs', 'act': 'ARGS.act'}), '(num_drones=ARGS.num_drones, aggregate_phy_steps=\n shared_constants.AGGR_PHY_STEPS, obs=ARGS.obs, act=ARGS.act)\n', (8745, 8859), False, 'from gym_pybullet_drones.envs.multi_agent_rl.LeaderFollowerAviary import LeaderFollowerAviary\n'), ((9224, 9350), 'gym_pybullet_drones.envs.multi_agent_rl.MeetupAviary.MeetupAviary', 'MeetupAviary', ([], {'num_drones': 'ARGS.num_drones', 'aggregate_phy_steps': 'shared_constants.AGGR_PHY_STEPS', 'obs': 'ARGS.obs', 'act': 'ARGS.act'}), '(num_drones=ARGS.num_drones, aggregate_phy_steps=\n shared_constants.AGGR_PHY_STEPS, obs=ARGS.obs, act=ARGS.act)\n', (9236, 9350), False, 'from gym_pybullet_drones.envs.multi_agent_rl.MeetupAviary import MeetupAviary\n'), ((6564, 6589), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (6579, 6589), False, 'import os\n')] |
# coding: utf-8
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
from .pretrainedmodels import inceptionresnetv2
def l2norm(input, p=2.0, dim=1, eps=1e-12):
"""
Compute L2 norm, row-wise
"""
#print("input size(): ", input.size())
l2_inp = input / input.norm(p, dim, keepdim=True).clamp(min=eps)
return l2_inp.expand_as(input)
def xavier_weight(tensor):
nin, nout = tensor.size()[0], tensor.size()[1]
r = np.sqrt(6.) / np.sqrt(nin + nout)
return tensor.normal_(0, r)
class ImageEncoder(nn.Module):
def __init__(self):
super(ImageEncoder, self).__init__()
self.register_buffer('device_id', torch.IntTensor(1))
resmodel = inceptionresnetv2(1000)
self.encoder = nn.Sequential(
*list(resmodel.children())[:-1]
)
self.mean = resmodel.mean
self.std = resmodel.std
self.input_size = resmodel.input_size
def forward(self, x):
feat = self.encoder(x)
return feat
class ImgSenRanking(torch.nn.Module): # WORD
def __init__(self, dim_image, sent_dim, hid_dim):
super(ImgSenRanking, self).__init__()
self.register_buffer('device_id', torch.IntTensor(1))
self.linear_img = nn.Linear(dim_image, hid_dim)
self.txt_encoder = nn.GRU(300, 512, batch_first=True)
self.linear_sent = nn.Linear(sent_dim, hid_dim)
self.init_weights()
def init_weights(self):
xavier_weight(self.linear_img.weight.data)
xavier_weight(self.linear_sent.weight.data)
self.linear_img.bias.data.fill_(0)
self.linear_sent.bias.data.fill_(0)
def forward(self, txt, img):
img_vec = self.linear_img(img)
txt_vec = self.txt_encoder(txt)
sent_vec = self.linear_sent(txt_vec[1].squeeze(0))
return l2norm(sent_vec), l2norm(img_vec)
# class ImgSenRanking(torch.nn.Module): # CHAR
# def __init__(self, dim_image, sent_dim, hid_dim):
# super(ImgSenRanking, self).__init__()
# self.register_buffer('device_id', torch.IntTensor(1))
# self.linear_img = torch.nn.Linear(dim_image, hid_dim)
# self.linear_sent = torch.nn.Linear(sent_dim, hid_dim)
# self.init_weights()
# def init_weights(self):
# xavier_weight(self.linear_img.weight.data)
# xavier_weight(self.linear_sent.weight.data)
# self.linear_img.bias.data.fill_(0)
# self.linear_sent.bias.data.fill_(0)
# def forward(self, sent, img):
# img_vec = self.linear_img(img)
# sent_vec = self.linear_sent(sent)
# return l2norm(sent_vec), l2norm(img_vec) | [
"torch.IntTensor",
"numpy.sqrt",
"torch.nn.Linear",
"torch.nn.GRU"
] | [((481, 493), 'numpy.sqrt', 'np.sqrt', (['(6.0)'], {}), '(6.0)\n', (488, 493), True, 'import numpy as np\n'), ((495, 514), 'numpy.sqrt', 'np.sqrt', (['(nin + nout)'], {}), '(nin + nout)\n', (502, 514), True, 'import numpy as np\n'), ((1290, 1319), 'torch.nn.Linear', 'nn.Linear', (['dim_image', 'hid_dim'], {}), '(dim_image, hid_dim)\n', (1299, 1319), True, 'import torch.nn as nn\n'), ((1347, 1381), 'torch.nn.GRU', 'nn.GRU', (['(300)', '(512)'], {'batch_first': '(True)'}), '(300, 512, batch_first=True)\n', (1353, 1381), True, 'import torch.nn as nn\n'), ((1409, 1437), 'torch.nn.Linear', 'nn.Linear', (['sent_dim', 'hid_dim'], {}), '(sent_dim, hid_dim)\n', (1418, 1437), True, 'import torch.nn as nn\n'), ((691, 709), 'torch.IntTensor', 'torch.IntTensor', (['(1)'], {}), '(1)\n', (706, 709), False, 'import torch\n'), ((1243, 1261), 'torch.IntTensor', 'torch.IntTensor', (['(1)'], {}), '(1)\n', (1258, 1261), False, 'import torch\n')] |
from __future__ import absolute_import
from builtins import str
from builtins import range
from anuga.coordinate_transforms.geo_reference import Geo_reference, DEFAULT_ZONE
from anuga.geometry.polygon import point_in_polygon, populate_polygon
from anuga.utilities.numerical_tools import ensure_numeric
import numpy as num
from anuga.geometry.polygon import inside_polygon
from anuga.geometry.polygon import polylist2points_verts
import anuga.utilities.log as log
import datetime
# This is due to pmesh being a package and a module and
# the current dir being unknown
try:
from anuga.pmesh.mesh import Mesh
except ImportError:
from .mesh import Mesh
# Python 2.7 Hack
try:
from exceptions import Exception
except:
pass
class PolygonError(Exception):
pass
class SegmentError(Exception):
pass
def create_mesh_from_regions(bounding_polygon,
boundary_tags,
maximum_triangle_area=None,
filename=None,
interior_regions=None,
interior_holes=None,
hole_tags=None,
poly_geo_reference=None,
mesh_geo_reference=None,
breaklines=None,
regionPtArea=None,
minimum_triangle_angle=28.0,
fail_if_polygons_outside=True,
use_cache=False,
verbose=True):
"""Create mesh from bounding polygons, and resolutions.
bounding_polygon is a list of points in Eastings and Northings,
relative to the poly_geo_reference.
Boundary tags is a dictionary of symbolic tags. For every tag there
is a list of indices referring to segments associated with that tag.
If a segment is omitted an Exception will be raised.
maximum_triangle_area is the maximal area per triangle
for the bounding polygon, excluding the interior regions.
Interior_regions is a list of tuples consisting of (polygon,
resolution) for each region to be separately refined. Do not have
polygon lines cross or be on-top of each other. Also do not have
polygon close to each other.
NOTE: If a interior_region is outside the bounding_polygon it should
throw an error
Interior_holes is a list of polygons for each hole.
hole_tags is an optional list of boundary tags for the holes, see
boundary_tags parameter.
This function does not allow segments to share points - use underlying
pmesh functionality for that
poly_geo_reference is the geo_reference of the bounding polygon and
the interior polygons.
If none, assume absolute. Please pass one though, since absolute
references have a zone.
mesh_geo_reference is the geo_reference of the mesh to be created.
If none is given one will be automatically generated. It was use
the lower left hand corner of bounding_polygon (absolute)
as the x and y values for the geo_ref.
breaklines is a list of polygons. These lines will be preserved by the
triangulation algorithm - useful for coastlines, walls, etc.
The polygons are not closed.
regionPtArea is a list of user-specified point-based regions with max area
Returns the mesh instance if no filename is given
Note, interior regions should be fully nested, as overlaps may cause
unintended resolutions.
fail_if_polygons_outside: If True (the default) Exception in thrown
where interior polygons fall outside bounding polygon. If False, these
will be ignored and execution continued.
"""
if verbose:
log.resource_usage_timing(log.logging.CRITICAL, "start_")
if verbose:
log.timingInfo("maximum_triangle_area, " + str(maximum_triangle_area))
if verbose:
log.timingInfo("minimum_triangle_angle, " +
str(minimum_triangle_angle))
if verbose:
log.timingInfo("startMesh, '%s'" % log.CurrentDateTime())
# Build arguments and keyword arguments for use with caching or apply.
args = (bounding_polygon,
boundary_tags)
kwargs = {'maximum_triangle_area': maximum_triangle_area,
'filename': filename,
'interior_regions': interior_regions,
'interior_holes': interior_holes,
'hole_tags': hole_tags,
'poly_geo_reference': poly_geo_reference,
'mesh_geo_reference': mesh_geo_reference,
'minimum_triangle_angle': minimum_triangle_angle,
'fail_if_polygons_outside': fail_if_polygons_outside,
'breaklines': breaklines,
'verbose': verbose,
'regionPtArea': regionPtArea} # FIXME (Ole): Should be bypassed one day. See ticket:14
# Call underlying engine with or without caching
if use_cache is True:
try:
from anuga.caching import cache
except:
msg = 'Caching was requested, but caching module' +\
'could not be imported'
raise Exception(msg)
m = cache(_create_mesh_from_regions,
args, kwargs,
verbose=verbose,
compression=False)
else:
m = _create_mesh_from_regions(*args, **kwargs)
return m
def _create_mesh_from_regions(bounding_polygon,
boundary_tags,
maximum_triangle_area=None,
filename=None,
interior_regions=None,
interior_holes=None,
hole_tags=None,
poly_geo_reference=None,
mesh_geo_reference=None,
minimum_triangle_angle=28.0,
fail_if_polygons_outside=True,
breaklines=None,
verbose=True,
regionPtArea=None):
"""_create_mesh_from_regions - internal function.
See create_mesh_from_regions for documentation.
"""
# check the segment indexes - throw an error if they are out of bounds
if boundary_tags is not None:
max_points = len(bounding_polygon)
for key in list(boundary_tags.keys()):
if len([x for x in boundary_tags[key] if x > max_points-1]) >= 1:
msg = 'Boundary tag %s has segment out of bounds. '\
% (str(key))
msg += 'Number of points in bounding polygon = %d' % max_points
raise SegmentError(msg)
for i in range(max_points):
found = False
for tag in boundary_tags:
if i in boundary_tags[tag]:
found = True
if found is False:
msg = 'Segment %d was not assigned a boundary_tag.' % i
msg += 'Default tag "exterior" will be assigned to missing segment'
#raise Exception(msg)
# Fixme: Use proper Python warning
if verbose:
log.critical('WARNING: %s' % msg)
# In addition I reckon the polygons could be of class Geospatial_data
# (DSG) If polygons were classes caching would break in places.
# Simple check
bounding_polygon = ensure_numeric(bounding_polygon, float)
msg = 'Bounding polygon must be a list of points or an Nx2 array'
assert len(bounding_polygon.shape) == 2, msg
assert bounding_polygon.shape[1] == 2, msg
#
if interior_regions is not None:
# Test that all the interior polygons are inside the
# bounding_poly and throw out those that aren't fully
# included. #Note, Both poly's have the same geo_ref,
# therefore don't take into account # geo_ref
polygons_inside_boundary = []
for interior_polygon, res in interior_regions:
indices = inside_polygon(interior_polygon, bounding_polygon,
closed=True, verbose=False)
if len(indices) != len(interior_polygon):
msg = 'Interior polygon %s is not fully inside'\
% (str(interior_polygon))
msg += ' bounding polygon: %s.' % (str(bounding_polygon))
if fail_if_polygons_outside is True:
raise PolygonError(msg)
else:
msg += ' I will ignore it.'
log.critical(msg)
else:
polygons_inside_boundary.append([interior_polygon, res])
# Record only those that were fully contained
interior_regions = polygons_inside_boundary
# the following segment of code could be used to Test that all the
# interior polygons are inside the bounding_poly... however it might need
# to be change a bit
#
#count = 0
# for i in range(len(interior_regions)):
# region = interior_regions[i]
# interior_polygon = region[0]
# if len(inside_polygon(interior_polygon, bounding_polygon,
# closed = True, verbose = False)) <> len(interior_polygon):
# print 'WARNING: interior polygon %d is outside bounding polygon' %(i)
# count += 1
# if count == 0:
# print 'interior regions OK'
# else:
# print 'check out your interior polygons'
# print 'check %s in production directory' %figname
# import sys; sys.exit()
if interior_holes is not None:
# Test that all the interior polygons are inside the bounding_poly
for interior_polygon in interior_holes:
# Test that we have a polygon
if len(num.array(interior_polygon).flat) < 6:
msg = 'Interior hole polygon %s has too few (<3) points.\n' \
% (str(interior_polygon))
msg = msg + \
'(Insure that you have specified a LIST of interior hole polygons)'
raise PolygonError(msg)
indices = inside_polygon(interior_polygon, bounding_polygon,
closed=True, verbose=False)
if len(indices) != len(interior_polygon):
msg = 'Interior polygon %s is outside bounding polygon: %s'\
% (str(interior_polygon), str(bounding_polygon))
raise PolygonError(msg)
# Resolve geo referencing
if mesh_geo_reference is None:
xllcorner = min(bounding_polygon[:, 0])
yllcorner = min(bounding_polygon[:, 1])
#
if poly_geo_reference is None:
zone = DEFAULT_ZONE
else:
zone = poly_geo_reference.get_zone()
[(xllcorner, yllcorner)] = poly_geo_reference.get_absolute(
[(xllcorner, yllcorner)])
# create a geo_ref, based on the llc of the bounding_polygon
mesh_geo_reference = Geo_reference(xllcorner=xllcorner,
yllcorner=yllcorner,
zone=zone)
m = Mesh(geo_reference=mesh_geo_reference)
# build a list of discrete segments from the breakline polygons
if breaklines is not None:
points, verts = polylist2points_verts(breaklines)
m.add_points_and_segments(points, verts)
# Do bounding polygon
m.add_region_from_polygon(bounding_polygon,
segment_tags=boundary_tags,
geo_reference=poly_geo_reference)
# Find one point inside region automatically
if interior_regions is not None:
excluded_polygons = []
for polygon, res in interior_regions:
excluded_polygons.append(polygon)
else:
excluded_polygons = None
# Convert bounding poly to absolute values
# this sort of thing can be fixed with the geo_points class
if poly_geo_reference is not None:
bounding_polygon_absolute = \
poly_geo_reference.get_absolute(bounding_polygon)
else:
bounding_polygon_absolute = bounding_polygon
inner_point = point_in_polygon(bounding_polygon_absolute)
inner = m.add_region(inner_point[0], inner_point[1])
inner.setMaxArea(maximum_triangle_area)
# Do interior regions
# if interior_regions is not None:
# for polygon, res in interior_regions:
# m.add_region_from_polygon(polygon,
# geo_reference=poly_geo_reference)
# # convert bounding poly to absolute values
# if poly_geo_reference is not None:
# polygon_absolute = \
# poly_geo_reference.get_absolute(polygon)
# else:
# polygon_absolute = polygon
# inner_point = point_in_polygon(polygon_absolute)
# region = m.add_region(inner_point[0], inner_point[1])
# region.setMaxArea(res)
if interior_regions is not None:
for polygon, res in interior_regions:
m.add_region_from_polygon(polygon,
max_triangle_area=res,
geo_reference=poly_geo_reference)
# Do interior holes
if interior_holes is not None:
for n, polygon in enumerate(interior_holes):
try:
tags = hole_tags[n]
except:
tags = {}
m.add_hole_from_polygon(polygon,
segment_tags=tags,
geo_reference=poly_geo_reference)
# 22/04/2014
# Add user-specified point-based regions with max area
if(regionPtArea is not None):
for i in range(len(regionPtArea)):
inner = m.add_region(regionPtArea[i][0], regionPtArea[i][1])
inner.setMaxArea(regionPtArea[i][2])
# NOTE (Ole): This was moved here as it is annoying if mesh is always
# stored irrespective of whether the computation
# was cached or not. This caused Domain to
# recompute as it has meshfile as a dependency
# Decide whether to store this mesh or return it
if filename is None:
return m
else:
if verbose:
log.critical("Generating mesh to file '%s'" % filename)
m.generate_mesh(minimum_triangle_angle=minimum_triangle_angle,
verbose=verbose)
m.export_mesh_file(filename)
return m
| [
"anuga.caching.cache",
"anuga.utilities.numerical_tools.ensure_numeric",
"anuga.coordinate_transforms.geo_reference.Geo_reference",
"builtins.str",
"anuga.pmesh.mesh.Mesh",
"exceptions.Exception",
"anuga.geometry.polygon.point_in_polygon",
"builtins.range",
"anuga.utilities.log.resource_usage_timing... | [((7508, 7547), 'anuga.utilities.numerical_tools.ensure_numeric', 'ensure_numeric', (['bounding_polygon', 'float'], {}), '(bounding_polygon, float)\n', (7522, 7547), False, 'from anuga.utilities.numerical_tools import ensure_numeric\n'), ((11197, 11235), 'anuga.pmesh.mesh.Mesh', 'Mesh', ([], {'geo_reference': 'mesh_geo_reference'}), '(geo_reference=mesh_geo_reference)\n', (11201, 11235), False, 'from anuga.pmesh.mesh import Mesh\n'), ((12226, 12269), 'anuga.geometry.polygon.point_in_polygon', 'point_in_polygon', (['bounding_polygon_absolute'], {}), '(bounding_polygon_absolute)\n', (12242, 12269), False, 'from anuga.geometry.polygon import point_in_polygon, populate_polygon\n'), ((3782, 3839), 'anuga.utilities.log.resource_usage_timing', 'log.resource_usage_timing', (['log.logging.CRITICAL', '"""start_"""'], {}), "(log.logging.CRITICAL, 'start_')\n", (3807, 3839), True, 'import anuga.utilities.log as log\n'), ((5234, 5321), 'anuga.caching.cache', 'cache', (['_create_mesh_from_regions', 'args', 'kwargs'], {'verbose': 'verbose', 'compression': '(False)'}), '(_create_mesh_from_regions, args, kwargs, verbose=verbose, compression\n =False)\n', (5239, 5321), False, 'from anuga.caching import cache\n'), ((6804, 6821), 'builtins.range', 'range', (['max_points'], {}), '(max_points)\n', (6809, 6821), False, 'from builtins import range\n'), ((11035, 11101), 'anuga.coordinate_transforms.geo_reference.Geo_reference', 'Geo_reference', ([], {'xllcorner': 'xllcorner', 'yllcorner': 'yllcorner', 'zone': 'zone'}), '(xllcorner=xllcorner, yllcorner=yllcorner, zone=zone)\n', (11048, 11101), False, 'from anuga.coordinate_transforms.geo_reference import Geo_reference, DEFAULT_ZONE\n'), ((11360, 11393), 'anuga.geometry.polygon.polylist2points_verts', 'polylist2points_verts', (['breaklines'], {}), '(breaklines)\n', (11381, 11393), False, 'from anuga.geometry.polygon import polylist2points_verts\n'), ((8115, 8193), 'anuga.geometry.polygon.inside_polygon', 'inside_polygon', (['interior_polygon', 'bounding_polygon'], {'closed': '(True)', 'verbose': '(False)'}), '(interior_polygon, bounding_polygon, closed=True, verbose=False)\n', (8129, 8193), False, 'from anuga.geometry.polygon import inside_polygon\n'), ((10158, 10236), 'anuga.geometry.polygon.inside_polygon', 'inside_polygon', (['interior_polygon', 'bounding_polygon'], {'closed': '(True)', 'verbose': '(False)'}), '(interior_polygon, bounding_polygon, closed=True, verbose=False)\n', (10172, 10236), False, 'from anuga.geometry.polygon import inside_polygon\n'), ((14323, 14378), 'anuga.utilities.log.critical', 'log.critical', (['("Generating mesh to file \'%s\'" % filename)'], {}), '("Generating mesh to file \'%s\'" % filename)\n', (14335, 14378), True, 'import anuga.utilities.log as log\n'), ((3907, 3933), 'builtins.str', 'str', (['maximum_triangle_area'], {}), '(maximum_triangle_area)\n', (3910, 3933), False, 'from builtins import str\n'), ((4026, 4053), 'builtins.str', 'str', (['minimum_triangle_angle'], {}), '(minimum_triangle_angle)\n', (4029, 4053), False, 'from builtins import str\n'), ((4114, 4135), 'anuga.utilities.log.CurrentDateTime', 'log.CurrentDateTime', ([], {}), '()\n', (4133, 4135), True, 'import anuga.utilities.log as log\n'), ((5206, 5220), 'exceptions.Exception', 'Exception', (['msg'], {}), '(msg)\n', (5215, 5220), False, 'from exceptions import Exception\n'), ((6656, 6664), 'builtins.str', 'str', (['key'], {}), '(key)\n', (6659, 6664), False, 'from builtins import str\n'), ((7288, 7321), 'anuga.utilities.log.critical', 'log.critical', (["('WARNING: %s' % msg)"], {}), "('WARNING: %s' % msg)\n", (7300, 7321), True, 'import anuga.utilities.log as log\n'), ((8376, 8397), 'builtins.str', 'str', (['interior_polygon'], {}), '(interior_polygon)\n', (8379, 8397), False, 'from builtins import str\n'), ((8450, 8471), 'builtins.str', 'str', (['bounding_polygon'], {}), '(bounding_polygon)\n', (8453, 8471), False, 'from builtins import str\n'), ((8661, 8678), 'anuga.utilities.log.critical', 'log.critical', (['msg'], {}), '(msg)\n', (8673, 8678), True, 'import anuga.utilities.log as log\n'), ((9954, 9975), 'builtins.str', 'str', (['interior_polygon'], {}), '(interior_polygon)\n', (9957, 9975), False, 'from builtins import str\n'), ((9814, 9841), 'numpy.array', 'num.array', (['interior_polygon'], {}), '(interior_polygon)\n', (9823, 9841), True, 'import numpy as num\n'), ((10431, 10452), 'builtins.str', 'str', (['interior_polygon'], {}), '(interior_polygon)\n', (10434, 10452), False, 'from builtins import str\n'), ((10454, 10475), 'builtins.str', 'str', (['bounding_polygon'], {}), '(bounding_polygon)\n', (10457, 10475), False, 'from builtins import str\n')] |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import NamedTuple, Optional, Iterable, Dict, Any
import logging
import numpy as np
import pytorch_lightning as pl
import torch.nn as nn
from gluonts.core.component import validated
from gluonts.dataset.common import Dataset
from gluonts.itertools import Cached
from gluonts.model.estimator import Estimator
from gluonts.torch.model.predictor import PyTorchPredictor
from gluonts.transform import Transformation
logger = logging.getLogger(__name__)
class TrainOutput(NamedTuple):
transformation: Transformation
trained_net: nn.Module
trainer: pl.Trainer
predictor: PyTorchPredictor
class PyTorchLightningEstimator(Estimator):
"""
An `Estimator` type with utilities for creating PyTorch-Lightning-based models.
To extend this class, one needs to implement three methods:
`create_transformation`, `create_training_network`, `create_predictor`,
`create_training_data_loader`, and `create_validation_data_loader`.
"""
@validated()
def __init__(
self,
trainer_kwargs: Dict[str, Any],
lead_time: int = 0,
) -> None:
super().__init__(lead_time=lead_time)
self.trainer_kwargs = trainer_kwargs
def create_transformation(self) -> Transformation:
"""
Create and return the transformation needed for training and inference.
Returns
-------
Transformation
The transformation that will be applied entry-wise to datasets,
at training and inference time.
"""
raise NotImplementedError
def create_lightning_module(self) -> pl.LightningModule:
"""
Create and return the network used for training (i.e., computing the
loss).
Returns
-------
nn.Module
The network that computes the loss given input data.
"""
raise NotImplementedError
def create_predictor(
self,
transformation: Transformation,
network: nn.Module,
) -> PyTorchPredictor:
"""
Create and return a predictor object.
Returns
-------
Predictor
A predictor wrapping a `nn.Module` used for inference.
"""
raise NotImplementedError
def create_training_data_loader(
self, data: Dataset, network: nn.Module, **kwargs
) -> Iterable:
raise NotImplementedError
def create_validation_data_loader(
self, data: Dataset, network: nn.Module, **kwargs
) -> Iterable:
raise NotImplementedError
def train_model(
self,
training_data: Dataset,
validation_data: Optional[Dataset] = None,
num_workers: int = 0,
shuffle_buffer_length: Optional[int] = None,
cache_data: bool = False,
**kwargs,
) -> TrainOutput:
transformation = self.create_transformation()
transformed_training_data = transformation.apply(
training_data, is_train=True
)
training_network = self.create_lightning_module()
training_data_loader = self.create_training_data_loader(
transformed_training_data
if not cache_data
else Cached(transformed_training_data),
training_network,
num_workers=num_workers,
shuffle_buffer_length=shuffle_buffer_length,
)
validation_data_loader = None
if validation_data is not None:
transformed_validation_data = transformation.apply(
validation_data, is_train=True
)
validation_data_loader = self.create_validation_data_loader(
transformed_validation_data
if not cache_data
else Cached(transformed_validation_data),
training_network,
)
monitor = "train_loss" if validation_data is None else "val_loss"
checkpoint = pl.callbacks.ModelCheckpoint(
monitor=monitor, mode="min", verbose=True
)
custom_callbacks = self.trainer_kwargs.get("callbacks", [])
callbacks = [checkpoint] + custom_callbacks
trainer_kwargs = {**self.trainer_kwargs, "callbacks": callbacks}
trainer = pl.Trainer(**trainer_kwargs)
trainer.fit(
model=training_network,
train_dataloaders=training_data_loader,
val_dataloaders=validation_data_loader,
)
logger.info(f"Loading best model from {checkpoint.best_model_path}")
best_model = training_network.load_from_checkpoint(
checkpoint.best_model_path
)
return TrainOutput(
transformation=transformation,
trained_net=best_model,
trainer=trainer,
predictor=self.create_predictor(transformation, best_model),
)
@staticmethod
def _worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
def train(
self,
training_data: Dataset,
validation_data: Optional[Dataset] = None,
num_workers: int = 0,
shuffle_buffer_length: Optional[int] = None,
cache_data: bool = False,
**kwargs,
) -> PyTorchPredictor:
return self.train_model(
training_data,
validation_data,
num_workers=num_workers,
shuffle_buffer_length=shuffle_buffer_length,
cache_data=cache_data,
**kwargs,
).predictor
| [
"logging.getLogger",
"pytorch_lightning.callbacks.ModelCheckpoint",
"numpy.random.get_state",
"gluonts.itertools.Cached",
"pytorch_lightning.Trainer",
"gluonts.core.component.validated"
] | [((1011, 1038), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1028, 1038), False, 'import logging\n'), ((1555, 1566), 'gluonts.core.component.validated', 'validated', ([], {}), '()\n', (1564, 1566), False, 'from gluonts.core.component import validated\n'), ((4507, 4578), 'pytorch_lightning.callbacks.ModelCheckpoint', 'pl.callbacks.ModelCheckpoint', ([], {'monitor': 'monitor', 'mode': '"""min"""', 'verbose': '(True)'}), "(monitor=monitor, mode='min', verbose=True)\n", (4535, 4578), True, 'import pytorch_lightning as pl\n'), ((4813, 4841), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {}), '(**trainer_kwargs)\n', (4823, 4841), True, 'import pytorch_lightning as pl\n'), ((3779, 3812), 'gluonts.itertools.Cached', 'Cached', (['transformed_training_data'], {}), '(transformed_training_data)\n', (3785, 3812), False, 'from gluonts.itertools import Cached\n'), ((4326, 4361), 'gluonts.itertools.Cached', 'Cached', (['transformed_validation_data'], {}), '(transformed_validation_data)\n', (4332, 4361), False, 'from gluonts.itertools import Cached\n'), ((5499, 5520), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (5518, 5520), True, 'import numpy as np\n')] |
from sandbox.crazyflie.src.gcg.envs.GibsonEnv.env_modalities import CameraRobotEnv, BaseRobotEnv
from sandbox.crazyflie.src.gcg.envs.GibsonEnv.env_bases import *
from sandbox.crazyflie.src.gcg.envs.GibsonEnv.robot_locomotors import Quadrotor3
from transforms3d import quaternions
import os
import numpy as np
import sys
import pybullet as p
from gibson.core.physics.scene_stadium import SinglePlayerStadiumScene
import pybullet_data
import cv2
import random
from gcg.envs.env_spec import EnvSpec
from collections import OrderedDict
from gcg.envs.spaces.box import Box
from gcg.envs.spaces.discrete import Discrete
from termcolor import colored
CALC_OBSTACLE_PENALTY = 1
tracking_camera = {
'yaw': 20,
'z_offset': 0.5,
'distance': 1,
'pitch': -20
}
tracking_camera_top = {
'yaw': 20, # demo: living room, stairs
'z_offset': 0.5,
'distance': 1,
'pitch': -20
}
class DroneNavigateEnv(CameraRobotEnv):
"""Specfy navigation reward
"""
def __init__(self, config, gpu_count=0):
#self.config = self.parse_config(config)
self.config = config
CameraRobotEnv.__init__(self, self.config, gpu_count,
scene_type="building",
tracking_camera=tracking_camera)
self.robot_introduce(Quadrotor3(self.config, env=self))
self.scene_introduce()
self.gui = self.config["mode"] == "gui"
self.total_reward = 0
self.total_frame = 0
def add_text(self, img):
font = cv2.FONT_HERSHEY_SIMPLEX
x,y,z = self.robot.body_xyz
r,p,ya = self.robot.body_rpy
cv2.putText(img, 'x:{0:.4f} y:{1:.4f} z:{2:.4f}'.format(x,y,z), (10, 20), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img, 'ro:{0:.4f} pth:{1:.4f} ya:{2:.4f}'.format(r,p,ya), (10, 40), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img, 'potential:{0:.4f}'.format(self.potential), (10, 60), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img, 'fps:{0:.4f}'.format(self.fps), (10, 80), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
return img
def _rewards(self, action=None, debugmode=False):
a = action
potential_old = self.potential
self.potential = self.robot.calc_potential()
progress = float(self.potential - potential_old)
feet_collision_cost = 0.0
for i, f in enumerate(
self.robot.feet): # TODO: Maybe calculating feet contacts could be done within the robot code
# print(f.contact_list())
contact_ids = set((x[2], x[4]) for x in f.contact_list())
# print("CONTACT OF '%d' WITH %d" % (contact_ids, ",".join(contact_names)) )
if (self.ground_ids & contact_ids):
# see Issue 63: https://github.com/openai/roboschool/issues/63
# feet_collision_cost += self.foot_collision_cost
self.robot.feet_contact[i] = 1.0
else:
self.robot.feet_contact[i] = 0.0
# print(self.robot.feet_contact)
#electricity_cost = self.electricity_cost * float(np.abs(a*self.robot.joint_speeds).mean()) # let's assume we
electricity_cost = self.stall_torque_cost * float(np.square(a).mean())
debugmode = 0
wall_contact = [pt for pt in self.robot.parts['base_link'].contact_list() if pt[6][2] > 0.15]
wall_collision_cost = self.wall_collision_cost * len(wall_contact)
joints_at_limit_cost = float(self.joints_at_limit_cost * self.robot.joints_at_limit)
close_to_goal = 0
if self.robot.dist_to_target() < 2:
close_to_goal = 0.5
obstacle_penalty = 0
debugmode = 0
debugmode = 0
if (debugmode):
print("Wall contact points", len(wall_contact))
print("Collision cost", wall_collision_cost)
print("electricity_cost", electricity_cost)
print("close to goal", close_to_goal)
#print("progress")
#print(progress)
#print("electricity_cost")
#print(electricity_cost)
#print("joints_at_limit_cost")
#print(joints_at_limit_cost)
#print("feet_collision_cost")
#print(feet_collision_cost)
rewards = [
#alive,
progress,
#wall_collision_cost,
close_to_goal,
obstacle_penalty
#electricity_cost,
#joints_at_limit_cost,
#feet_collision_cost
]
return rewards
def _termination(self, debugmode=False):
done = self.nframe > 250 or self.robot.get_position()[2] < 0
return done
def _reset(self):
self.total_frame = 0
self.total_reward = 0
obs = CameraRobotEnv._reset(self)
return obs
class GcgDroneNavigateEnv(DroneNavigateEnv):
def __init__(self, params={}, gpu_count=0):
DroneNavigateEnv.__init__(self, params, gpu_count)
self._obs_shape = params['obs_shape']
self._yaw_limits = params['yaw_limits']
self._horizon = params['horizon']
self._model_id = params['model_id']
self._setup_spec()
assert (self.observation_im_space.shape[-1] == 1 or self.observation_im_space.shape[-1] == 3)
self.spec = EnvSpec(self.observation_im_space,self.action_space,self.action_selection_space,self.observation_vec_spec,self.action_spec,self.action_selection_spec,self.goal_spec)
@property
def horizon(self):
return self._horizon
def _setup_spec(self):
self.action_spec = OrderedDict()
self.action_selection_spec = OrderedDict()
self.observation_vec_spec = OrderedDict()
self.goal_spec = OrderedDict()
self.action_spec['yaw'] = Box(low=-180, high=180)
self.action_space = Box(low=np.array([self.action_spec['yaw'].low[0]]),
high=np.array([self.action_spec['yaw'].high[0]]))
self.action_selection_spec['yaw'] = Box(low=self._yaw_limits[0], high=self._yaw_limits[1])
self.action_selection_space = Box(low = np.array([self.action_selection_spec['yaw'].low[0]]), high = np.array([self.action_selection_spec['yaw'].high[0]]))
assert (np.logical_and(self.action_selection_space.low >= self.action_space.low,
self.action_selection_space.high <= self.action_space.high).all())
self.observation_im_space = Box(low=0, high=255, shape=self._obs_shape)
self.observation_vec_spec['coll'] = Discrete(1)
def step(self, a):
observations, reward, _, env_info_internal = DroneNavigateEnv._step(self, a)
done = self.get_collision()
filtered_obs = self.get_filtered_observation(observations)
env_info = dict(x=env_info_internal["x"], y=env_info_internal["y"], yaw=env_info_internal["yaw"], height=env_info_internal["height"], speed=env_info_internal["speed"], model_id=self._model_id)
return filtered_obs, np.array([]), reward, done, env_info
def reset(self, offline=False, keep_rosbag=True):
observations = DroneNavigateEnv._reset(self)
filtered_obs = self.get_filtered_observation_reset(observations)
return filtered_obs, np.array([])
def ros_is_good(self, print=False):
return True
def get_collision(self):
collision = (len(self.robot.parts['base_link'].contact_list()) > 0) or (abs(self.robot.get_orientation_eulerian()[0]) > 0.5) or (abs(self.robot.get_orientation_eulerian()[1]) > 0.5)
if collision:
print("\n")
print(colored("COLLISION!!!!!", "green"))
print("\n")
print(colored("COLLISION!!!!!", "red"))
print("\n")
print(colored("COLLISION!!!!!", "yellow"))
print("\n")
return collision
def get_filtered_observation(self, observations):
image = observations['rgb_filled']
image_resized = cv2.cvtColor(cv2.resize(image, (96, 96))[12:84], cv2.COLOR_BGR2GRAY)
cv2.imshow('image', image_resized)
cv2.waitKey(5)
return (image_resized, np.array([int(self.get_collision())]))
def get_filtered_observation_reset(self, observations):
image = observations['rgb_filled']
image_resized = cv2.cvtColor(cv2.resize(image, (96, 96))[12:84], cv2.COLOR_BGR2GRAY)
cv2.imshow('image', image_resized)
cv2.waitKey(5)
return (image_resized, np.array([0]))
| [
"collections.OrderedDict",
"termcolor.colored",
"sandbox.crazyflie.src.gcg.envs.GibsonEnv.robot_locomotors.Quadrotor3",
"numpy.logical_and",
"sandbox.crazyflie.src.gcg.envs.GibsonEnv.env_modalities.CameraRobotEnv._reset",
"gcg.envs.env_spec.EnvSpec",
"sandbox.crazyflie.src.gcg.envs.GibsonEnv.env_modalit... | [((1117, 1230), 'sandbox.crazyflie.src.gcg.envs.GibsonEnv.env_modalities.CameraRobotEnv.__init__', 'CameraRobotEnv.__init__', (['self', 'self.config', 'gpu_count'], {'scene_type': '"""building"""', 'tracking_camera': 'tracking_camera'}), "(self, self.config, gpu_count, scene_type='building',\n tracking_camera=tracking_camera)\n", (1140, 1230), False, 'from sandbox.crazyflie.src.gcg.envs.GibsonEnv.env_modalities import CameraRobotEnv, BaseRobotEnv\n'), ((4849, 4876), 'sandbox.crazyflie.src.gcg.envs.GibsonEnv.env_modalities.CameraRobotEnv._reset', 'CameraRobotEnv._reset', (['self'], {}), '(self)\n', (4870, 4876), False, 'from sandbox.crazyflie.src.gcg.envs.GibsonEnv.env_modalities import CameraRobotEnv, BaseRobotEnv\n'), ((5381, 5561), 'gcg.envs.env_spec.EnvSpec', 'EnvSpec', (['self.observation_im_space', 'self.action_space', 'self.action_selection_space', 'self.observation_vec_spec', 'self.action_spec', 'self.action_selection_spec', 'self.goal_spec'], {}), '(self.observation_im_space, self.action_space, self.\n action_selection_space, self.observation_vec_spec, self.action_spec,\n self.action_selection_spec, self.goal_spec)\n', (5388, 5561), False, 'from gcg.envs.env_spec import EnvSpec\n'), ((5674, 5687), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5685, 5687), False, 'from collections import OrderedDict\n'), ((5725, 5738), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5736, 5738), False, 'from collections import OrderedDict\n'), ((5775, 5788), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5786, 5788), False, 'from collections import OrderedDict\n'), ((5814, 5827), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5825, 5827), False, 'from collections import OrderedDict\n'), ((5863, 5886), 'gcg.envs.spaces.box.Box', 'Box', ([], {'low': '(-180)', 'high': '(180)'}), '(low=-180, high=180)\n', (5866, 5886), False, 'from gcg.envs.spaces.box import Box\n'), ((6095, 6149), 'gcg.envs.spaces.box.Box', 'Box', ([], {'low': 'self._yaw_limits[0]', 'high': 'self._yaw_limits[1]'}), '(low=self._yaw_limits[0], high=self._yaw_limits[1])\n', (6098, 6149), False, 'from gcg.envs.spaces.box import Box\n'), ((6539, 6582), 'gcg.envs.spaces.box.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': 'self._obs_shape'}), '(low=0, high=255, shape=self._obs_shape)\n', (6542, 6582), False, 'from gcg.envs.spaces.box import Box\n'), ((6627, 6638), 'gcg.envs.spaces.discrete.Discrete', 'Discrete', (['(1)'], {}), '(1)\n', (6635, 6638), False, 'from gcg.envs.spaces.discrete import Discrete\n'), ((8128, 8162), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image_resized'], {}), "('image', image_resized)\n", (8138, 8162), False, 'import cv2\n'), ((8171, 8185), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (8182, 8185), False, 'import cv2\n'), ((8461, 8495), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image_resized'], {}), "('image', image_resized)\n", (8471, 8495), False, 'import cv2\n'), ((8504, 8518), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (8515, 8518), False, 'import cv2\n'), ((1322, 1355), 'sandbox.crazyflie.src.gcg.envs.GibsonEnv.robot_locomotors.Quadrotor3', 'Quadrotor3', (['self.config'], {'env': 'self'}), '(self.config, env=self)\n', (1332, 1355), False, 'from sandbox.crazyflie.src.gcg.envs.GibsonEnv.robot_locomotors import Quadrotor3\n'), ((7081, 7093), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7089, 7093), True, 'import numpy as np\n'), ((7330, 7342), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7338, 7342), True, 'import numpy as np\n'), ((8550, 8563), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (8558, 8563), True, 'import numpy as np\n'), ((5924, 5966), 'numpy.array', 'np.array', (["[self.action_spec['yaw'].low[0]]"], {}), "([self.action_spec['yaw'].low[0]])\n", (5932, 5966), True, 'import numpy as np\n'), ((6005, 6048), 'numpy.array', 'np.array', (["[self.action_spec['yaw'].high[0]]"], {}), "([self.action_spec['yaw'].high[0]])\n", (6013, 6048), True, 'import numpy as np\n'), ((6199, 6251), 'numpy.array', 'np.array', (["[self.action_selection_spec['yaw'].low[0]]"], {}), "([self.action_selection_spec['yaw'].low[0]])\n", (6207, 6251), True, 'import numpy as np\n'), ((6260, 6313), 'numpy.array', 'np.array', (["[self.action_selection_spec['yaw'].high[0]]"], {}), "([self.action_selection_spec['yaw'].high[0]])\n", (6268, 6313), True, 'import numpy as np\n'), ((6331, 6468), 'numpy.logical_and', 'np.logical_and', (['(self.action_selection_space.low >= self.action_space.low)', '(self.action_selection_space.high <= self.action_space.high)'], {}), '(self.action_selection_space.low >= self.action_space.low, \n self.action_selection_space.high <= self.action_space.high)\n', (6345, 6468), True, 'import numpy as np\n'), ((7689, 7723), 'termcolor.colored', 'colored', (['"""COLLISION!!!!!"""', '"""green"""'], {}), "('COLLISION!!!!!', 'green')\n", (7696, 7723), False, 'from termcolor import colored\n'), ((7767, 7799), 'termcolor.colored', 'colored', (['"""COLLISION!!!!!"""', '"""red"""'], {}), "('COLLISION!!!!!', 'red')\n", (7774, 7799), False, 'from termcolor import colored\n'), ((7843, 7878), 'termcolor.colored', 'colored', (['"""COLLISION!!!!!"""', '"""yellow"""'], {}), "('COLLISION!!!!!', 'yellow')\n", (7850, 7878), False, 'from termcolor import colored\n'), ((8064, 8091), 'cv2.resize', 'cv2.resize', (['image', '(96, 96)'], {}), '(image, (96, 96))\n', (8074, 8091), False, 'import cv2\n'), ((8397, 8424), 'cv2.resize', 'cv2.resize', (['image', '(96, 96)'], {}), '(image, (96, 96))\n', (8407, 8424), False, 'import cv2\n'), ((3287, 3299), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (3296, 3299), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.gridspec as gridspec
import matplotlib.patches as mpatches
from scipy.optimize import minimize
import time
def sim_run(options, MPC):
start = time.clock()
# Simulator Options
FIG_SIZE = options['FIG_SIZE'] # [Width, Height]
OBSTACLES = options['OBSTACLES']
mpc = MPC() # initialize cost function and model
num_inputs = 2 #num of control inputs
u = np.zeros(mpc.horizon*num_inputs)
bounds = []
# Set bounds for inputs bounded optimization.
for i in range(mpc.horizon):
bounds += [[-1, 1]] # boundaries for pedal, i.e., -1 <= pedal <= 1
bounds += [[-0.8, 0.8]] # boundary for steering, i.e., -0.8 <= steering <= 0.8
ref_1 = mpc.reference1
ref_2 = mpc.reference2
ref = ref_1
state_i = np.array([[0,0,0,0]])
u_i = np.array([[0,0]])
sim_total = 250
predict_info = [state_i]
for i in range(1,sim_total+1):
u = np.delete(u,0)
u = np.delete(u,0)
u = np.append(u, u[-2])
u = np.append(u, u[-2])
start_time = time.time()
# Non-linear optimization.
u_solution = minimize(mpc.cost_function, u, (state_i[-1], ref),
method='SLSQP',
bounds=bounds,
tol = 1e-5)
print('Step ' + str(i) + ' of ' + str(sim_total) + ' Time ' + str(round(time.time() - start_time,5)))
u = u_solution.x
y = mpc.plant_model(state_i[-1], mpc.dt, u[0], u[1])
# Change the goal point from ref1 to ref2 after 130 iters
if (i > 130 and ref_2 != None):
ref = ref_2
predicted_state = np.array([y])
for j in range(1, mpc.horizon):
predicted = mpc.plant_model(predicted_state[-1], mpc.dt, u[2*j], u[2*j+1])
predicted_state = np.append(predicted_state, np.array([predicted]), axis=0)
predict_info += [predicted_state]
state_i = np.append(state_i, np.array([y]), axis=0)
u_i = np.append(u_i, np.array([(u[0], u[1])]), axis=0)
###################
# SIMULATOR DISPLAY
# Total Figure
fig = plt.figure(figsize=(FIG_SIZE[0], FIG_SIZE[1]))
gs = gridspec.GridSpec(8,8)
# Elevator plot settings.
ax = fig.add_subplot(gs[:8, :8])
plt.xlim(-3, 17)
ax.set_ylim([-3, 17])
plt.xticks(np.arange(0,11, step=2))
plt.yticks(np.arange(0,11, step=2))
plt.title('MPC 2D')
# Time display.
time_text = ax.text(6, 0.5, '', fontsize=15)
# Main plot info.
car_width = 1.0
patch_car = mpatches.Rectangle((0, 0), car_width, 2.5, fc='k', fill=False)
patch_goal = mpatches.Rectangle((0, 0), car_width, 2.5, fc='b',
ls='dashdot', fill=False)
ax.add_patch(patch_car)
ax.add_patch(patch_goal)
predict, = ax.plot([], [], 'r--', linewidth = 1)
# Car steering and throttle position.
telem = [3,14]
patch_wheel = mpatches.Circle((telem[0]-3, telem[1]), 2.2)
ax.add_patch(patch_wheel)
wheel_1, = ax.plot([], [], 'k', linewidth = 3)
wheel_2, = ax.plot([], [], 'k', linewidth = 3)
wheel_3, = ax.plot([], [], 'k', linewidth = 3)
throttle_outline, = ax.plot([telem[0], telem[0]], [telem[1]-2, telem[1]+2],
'b', linewidth = 20, alpha = 0.4)
throttle, = ax.plot([], [], 'k', linewidth = 20)
brake_outline, = ax.plot([telem[0]+3, telem[0]+3], [telem[1]-2, telem[1]+2],
'b', linewidth = 20, alpha = 0.2)
brake, = ax.plot([], [], 'k', linewidth = 20)
throttle_text = ax.text(telem[0], telem[1]-3, 'Forward', fontsize = 15,
horizontalalignment='center')
brake_text = ax.text(telem[0]+3, telem[1]-3, 'Reverse', fontsize = 15,
horizontalalignment='center')
# Obstacles
if OBSTACLES:
patch_obs = mpatches.Circle((mpc.x_obs, mpc.y_obs),0.5)
ax.add_patch(patch_obs)
# Shift xy, centered on rear of car to rear left corner of car.
def car_patch_pos(x, y, psi):
#return [x,y]
x_new = x - np.sin(psi)*(car_width/2)
y_new = y + np.cos(psi)*(car_width/2)
return [x_new, y_new]
def steering_wheel(wheel_angle):
wheel_1.set_data([telem[0]-3, telem[0]-3+np.cos(wheel_angle)*2],
[telem[1], telem[1]+np.sin(wheel_angle)*2])
wheel_2.set_data([telem[0]-3, telem[0]-3-np.cos(wheel_angle)*2],
[telem[1], telem[1]-np.sin(wheel_angle)*2])
wheel_3.set_data([telem[0]-3, telem[0]-3+np.sin(wheel_angle)*2],
[telem[1], telem[1]-np.cos(wheel_angle)*2])
def update_plot(num):
# Car.
patch_car.set_xy(car_patch_pos(state_i[num,0], state_i[num,1], state_i[num,2]))
patch_car.angle = np.rad2deg(state_i[num,2])-90
# Car wheels
print(np.rad2deg(state_i[num,2]))
steering_wheel(u_i[num,1]*2)
throttle.set_data([telem[0],telem[0]],
[telem[1]-2, telem[1]-2+max(0,u_i[num,0]/5*4)])
brake.set_data([telem[0]+3, telem[0]+3],
[telem[1]-2, telem[1]-2+max(0,-u_i[num,0]/5*4)])
# Goal.
if (num <= 130 or ref_2 == None):
patch_goal.set_xy(car_patch_pos(ref_1[0],ref_1[1],ref_1[2]))
patch_goal.angle = np.rad2deg(ref_1[2])-90
else:
patch_goal.set_xy(car_patch_pos(ref_2[0],ref_2[1],ref_2[2]))
patch_goal.angle = np.rad2deg(ref_2[2])-90
#print(str(state_i[num,3]))
predict.set_data(predict_info[num][:,0],predict_info[num][:,1])
# Timer.
#time_text.set_text(str(100-t[num]))
return patch_car, time_text
print("Compute Time: ", round(time.clock() - start, 3), "seconds.")
# Animation.
car_ani = animation.FuncAnimation(fig, update_plot, frames=range(1,len(state_i)), interval=100, repeat=True, blit=False)
#car_ani.save('mpc-video.mp4')
plt.show()
| [
"matplotlib.patches.Rectangle",
"time.clock",
"numpy.delete",
"scipy.optimize.minimize",
"numpy.append",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.rad2deg",... | [((258, 270), 'time.clock', 'time.clock', ([], {}), '()\n', (268, 270), False, 'import time\n'), ((490, 524), 'numpy.zeros', 'np.zeros', (['(mpc.horizon * num_inputs)'], {}), '(mpc.horizon * num_inputs)\n', (498, 524), True, 'import numpy as np\n'), ((872, 896), 'numpy.array', 'np.array', (['[[0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0]])\n', (880, 896), True, 'import numpy as np\n'), ((904, 922), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (912, 922), True, 'import numpy as np\n'), ((2235, 2281), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(FIG_SIZE[0], FIG_SIZE[1])'}), '(figsize=(FIG_SIZE[0], FIG_SIZE[1]))\n', (2245, 2281), True, 'import matplotlib.pyplot as plt\n'), ((2291, 2314), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(8)', '(8)'], {}), '(8, 8)\n', (2308, 2314), True, 'import matplotlib.gridspec as gridspec\n'), ((2387, 2403), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-3)', '(17)'], {}), '(-3, 17)\n', (2395, 2403), True, 'import matplotlib.pyplot as plt\n'), ((2514, 2533), 'matplotlib.pyplot.title', 'plt.title', (['"""MPC 2D"""'], {}), "('MPC 2D')\n", (2523, 2533), True, 'import matplotlib.pyplot as plt\n'), ((2663, 2725), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['(0, 0)', 'car_width', '(2.5)'], {'fc': '"""k"""', 'fill': '(False)'}), "((0, 0), car_width, 2.5, fc='k', fill=False)\n", (2681, 2725), True, 'import matplotlib.patches as mpatches\n'), ((2743, 2819), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['(0, 0)', 'car_width', '(2.5)'], {'fc': '"""b"""', 'ls': '"""dashdot"""', 'fill': '(False)'}), "((0, 0), car_width, 2.5, fc='b', ls='dashdot', fill=False)\n", (2761, 2819), True, 'import matplotlib.patches as mpatches\n'), ((3047, 3093), 'matplotlib.patches.Circle', 'mpatches.Circle', (['(telem[0] - 3, telem[1])', '(2.2)'], {}), '((telem[0] - 3, telem[1]), 2.2)\n', (3062, 3093), True, 'import matplotlib.patches as mpatches\n'), ((6088, 6098), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6096, 6098), True, 'import matplotlib.pyplot as plt\n'), ((1019, 1034), 'numpy.delete', 'np.delete', (['u', '(0)'], {}), '(u, 0)\n', (1028, 1034), True, 'import numpy as np\n'), ((1046, 1061), 'numpy.delete', 'np.delete', (['u', '(0)'], {}), '(u, 0)\n', (1055, 1061), True, 'import numpy as np\n'), ((1073, 1092), 'numpy.append', 'np.append', (['u', 'u[-2]'], {}), '(u, u[-2])\n', (1082, 1092), True, 'import numpy as np\n'), ((1105, 1124), 'numpy.append', 'np.append', (['u', 'u[-2]'], {}), '(u, u[-2])\n', (1114, 1124), True, 'import numpy as np\n'), ((1146, 1157), 'time.time', 'time.time', ([], {}), '()\n', (1155, 1157), False, 'import time\n'), ((1215, 1312), 'scipy.optimize.minimize', 'minimize', (['mpc.cost_function', 'u', '(state_i[-1], ref)'], {'method': '"""SLSQP"""', 'bounds': 'bounds', 'tol': '(1e-05)'}), "(mpc.cost_function, u, (state_i[-1], ref), method='SLSQP', bounds=\n bounds, tol=1e-05)\n", (1223, 1312), False, 'from scipy.optimize import minimize\n'), ((1761, 1774), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (1769, 1774), True, 'import numpy as np\n'), ((2445, 2469), 'numpy.arange', 'np.arange', (['(0)', '(11)'], {'step': '(2)'}), '(0, 11, step=2)\n', (2454, 2469), True, 'import numpy as np\n'), ((2485, 2509), 'numpy.arange', 'np.arange', (['(0)', '(11)'], {'step': '(2)'}), '(0, 11, step=2)\n', (2494, 2509), True, 'import numpy as np\n'), ((3981, 4025), 'matplotlib.patches.Circle', 'mpatches.Circle', (['(mpc.x_obs, mpc.y_obs)', '(0.5)'], {}), '((mpc.x_obs, mpc.y_obs), 0.5)\n', (3996, 4025), True, 'import matplotlib.patches as mpatches\n'), ((2069, 2082), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (2077, 2082), True, 'import numpy as np\n'), ((2121, 2145), 'numpy.array', 'np.array', (['[(u[0], u[1])]'], {}), '([(u[0], u[1])])\n', (2129, 2145), True, 'import numpy as np\n'), ((4924, 4951), 'numpy.rad2deg', 'np.rad2deg', (['state_i[num, 2]'], {}), '(state_i[num, 2])\n', (4934, 4951), True, 'import numpy as np\n'), ((4989, 5016), 'numpy.rad2deg', 'np.rad2deg', (['state_i[num, 2]'], {}), '(state_i[num, 2])\n', (4999, 5016), True, 'import numpy as np\n'), ((1959, 1980), 'numpy.array', 'np.array', (['[predicted]'], {}), '([predicted])\n', (1967, 1980), True, 'import numpy as np\n'), ((4202, 4213), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (4208, 4213), True, 'import numpy as np\n'), ((4248, 4259), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (4254, 4259), True, 'import numpy as np\n'), ((5458, 5478), 'numpy.rad2deg', 'np.rad2deg', (['ref_1[2]'], {}), '(ref_1[2])\n', (5468, 5478), True, 'import numpy as np\n'), ((5600, 5620), 'numpy.rad2deg', 'np.rad2deg', (['ref_2[2]'], {}), '(ref_2[2])\n', (5610, 5620), True, 'import numpy as np\n'), ((5868, 5880), 'time.clock', 'time.clock', ([], {}), '()\n', (5878, 5880), False, 'import time\n'), ((4391, 4410), 'numpy.cos', 'np.cos', (['wheel_angle'], {}), '(wheel_angle)\n', (4397, 4410), True, 'import numpy as np\n'), ((4460, 4479), 'numpy.sin', 'np.sin', (['wheel_angle'], {}), '(wheel_angle)\n', (4466, 4479), True, 'import numpy as np\n'), ((4533, 4552), 'numpy.cos', 'np.cos', (['wheel_angle'], {}), '(wheel_angle)\n', (4539, 4552), True, 'import numpy as np\n'), ((4602, 4621), 'numpy.sin', 'np.sin', (['wheel_angle'], {}), '(wheel_angle)\n', (4608, 4621), True, 'import numpy as np\n'), ((4675, 4694), 'numpy.sin', 'np.sin', (['wheel_angle'], {}), '(wheel_angle)\n', (4681, 4694), True, 'import numpy as np\n'), ((4744, 4763), 'numpy.cos', 'np.cos', (['wheel_angle'], {}), '(wheel_angle)\n', (4750, 4763), True, 'import numpy as np\n'), ((1487, 1498), 'time.time', 'time.time', ([], {}), '()\n', (1496, 1498), False, 'import time\n')] |
import xml.dom.minidom as MD
# import csv
# import pandas
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
Batch_Size = 32
LR = 0.01
GAMMA = 0.9
EPSILON = 0.9
TARGET_REPLACE_ITER = 100
MEMORY_SIZE = 700
STATES_DIMENTION = 4
ACTIONS_DIMENTION = 4
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.input = torch.nn.Linear(STATES_DIMENTION, 50)
self.input.weight.data.normal_(0,0.2)
self.output = torch.nn.Linear(50 ,ACTIONS_DIMENTION)
self.output.weight.data.normal_(0,0.2)
def forward(self,x):
x = self.input(x)
x = F.relu(x)
actions_value = self.output(x)
return actions_value
class DQN(object):
def __init__(self):
self.evalueNet = Net()
self.targetNet = Net()
self.log = None
self.learnCounter = 0
self.memoryCounter = 0
self.memory = np.zeros((MEMORY_SIZE, STATES_DIMENTION * 2 +2))
self.optimizer = torch.optim.Adam(self.evalueNet.parameters(), lr = LR)
self.lossFunction = nn.MSELoss()
def choose_action(self,x):
x = Variable(torch.unsqueeze(torch.FloatTensor(x),0))
if np.random.uniform() < EPSILON :
actionsValue = self.evalueNet.forward(x)
# print(actionsValue)
action = torch.max(actionsValue,1)[1].data.numpy()
# self.log.debug(action)
else:
action = np.random.randint(0,ACTIONS_DIMENTION)
return action
def record_transition(self,s,a,r,next_s):
transition = np.hstack((s,[a,r],next_s))
i = self.memoryCounter % MEMORY_SIZE
self.memory[i, :] = transition
self.memoryCounter += 1
def learn(self):
if self.learnCounter % TARGET_REPLACE_ITER == 0:
self.targetNet.load_state_dict(self.evalueNet.state_dict())
self.learnCounter +=1
sampleIndex = np.random.choice(MEMORY_SIZE,Batch_Size)
sampleMemory = self.memory[sampleIndex, :]
sample_s = Variable(torch.FloatTensor(sampleMemory[:,:STATES_DIMENTION]))
sample_a = Variable(torch.LongTensor(sampleMemory[:, STATES_DIMENTION:STATES_DIMENTION+1].astype(int)))
sample_r = Variable(torch.FloatTensor(sampleMemory[:,STATES_DIMENTION+1:STATES_DIMENTION+2]))
sample_next_s = Variable(torch.FloatTensor(sampleMemory[:, -STATES_DIMENTION:]))
q_value = self.evalueNet(sample_s).gather(1,sample_a)
q_next = self.targetNet(sample_next_s).detach()
q_target = sample_r + GAMMA * q_next.max(1)[0].view(Batch_Size,1)
loss = self.lossFunction(q_value, q_target)
# print(loss)
# loss_value = float(loss)
# if loss_value!= None:
# with open('/Users/sunhaoran/Desktop/analysis.csv', 'a') as csvfile:
# writer = csv.writer(csvfile)
# writer.writerow([loss_value])
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
| [
"numpy.hstack",
"numpy.random.choice",
"torch.max",
"torch.nn.MSELoss",
"numpy.zeros",
"numpy.random.uniform",
"numpy.random.randint",
"torch.nn.functional.relu",
"torch.nn.Linear",
"torch.FloatTensor"
] | [((445, 482), 'torch.nn.Linear', 'torch.nn.Linear', (['STATES_DIMENTION', '(50)'], {}), '(STATES_DIMENTION, 50)\n', (460, 482), False, 'import torch\n'), ((552, 590), 'torch.nn.Linear', 'torch.nn.Linear', (['(50)', 'ACTIONS_DIMENTION'], {}), '(50, ACTIONS_DIMENTION)\n', (567, 590), False, 'import torch\n'), ((702, 711), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (708, 711), True, 'import torch.nn.functional as F\n'), ((997, 1046), 'numpy.zeros', 'np.zeros', (['(MEMORY_SIZE, STATES_DIMENTION * 2 + 2)'], {}), '((MEMORY_SIZE, STATES_DIMENTION * 2 + 2))\n', (1005, 1046), True, 'import numpy as np\n'), ((1154, 1166), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1164, 1166), True, 'import torch.nn as nn\n'), ((1665, 1695), 'numpy.hstack', 'np.hstack', (['(s, [a, r], next_s)'], {}), '((s, [a, r], next_s))\n', (1674, 1695), True, 'import numpy as np\n'), ((2014, 2055), 'numpy.random.choice', 'np.random.choice', (['MEMORY_SIZE', 'Batch_Size'], {}), '(MEMORY_SIZE, Batch_Size)\n', (2030, 2055), True, 'import numpy as np\n'), ((1276, 1295), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1293, 1295), True, 'import numpy as np\n'), ((1530, 1569), 'numpy.random.randint', 'np.random.randint', (['(0)', 'ACTIONS_DIMENTION'], {}), '(0, ACTIONS_DIMENTION)\n', (1547, 1569), True, 'import numpy as np\n'), ((2135, 2188), 'torch.FloatTensor', 'torch.FloatTensor', (['sampleMemory[:, :STATES_DIMENTION]'], {}), '(sampleMemory[:, :STATES_DIMENTION])\n', (2152, 2188), False, 'import torch\n'), ((2329, 2406), 'torch.FloatTensor', 'torch.FloatTensor', (['sampleMemory[:, STATES_DIMENTION + 1:STATES_DIMENTION + 2]'], {}), '(sampleMemory[:, STATES_DIMENTION + 1:STATES_DIMENTION + 2])\n', (2346, 2406), False, 'import torch\n'), ((2436, 2490), 'torch.FloatTensor', 'torch.FloatTensor', (['sampleMemory[:, -STATES_DIMENTION:]'], {}), '(sampleMemory[:, -STATES_DIMENTION:])\n', (2453, 2490), False, 'import torch\n'), ((1240, 1260), 'torch.FloatTensor', 'torch.FloatTensor', (['x'], {}), '(x)\n', (1257, 1260), False, 'import torch\n'), ((1416, 1442), 'torch.max', 'torch.max', (['actionsValue', '(1)'], {}), '(actionsValue, 1)\n', (1425, 1442), False, 'import torch\n')] |
import os
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
from dataset import TestDataset
from models import ModelBuilder, SegmentationModule
from utils import colorEncode
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy
from constants import REQ_FILE_NAME, RES_FILE_NAME, DATASET_CONFIG, colors
import io
import base64
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
cwd = os.getcwd()
colors = np.array(colors).astype(np.uint8)
def save_img(img, pred):
pred[pred != 2] = 0
pred[pred == 2] = 1
pred = np.int32(pred)
res_img = colorEncode(pred, colors).astype(np.uint8)
Image.fromarray(res_img).save(os.path.join(cwd, RES_FILE_NAME))
def test(segmentation_module, loader, gpu):
segmentation_module.eval()
for batch_data in loader:
batch_data = batch_data[0]
segSize = (batch_data['img_ori'].shape[0],
batch_data['img_ori'].shape[1])
img_resized_list = batch_data['img_data']
with torch.no_grad():
scores = torch.zeros(1, DATASET_CONFIG["num_class"], segSize[0], segSize[1])
scores = async_copy_to(scores, gpu)
for img in img_resized_list:
feed_dict = batch_data.copy()
feed_dict['img_data'] = img
del feed_dict['img_ori']
del feed_dict['info']
feed_dict = async_copy_to(feed_dict, gpu)
# forward pass
pred_tmp = segmentation_module(feed_dict, segSize=segSize)
scores = scores + pred_tmp / len(DATASET_CONFIG["imgSizes"])
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu())
save_img(batch_data['img_ori'], pred)
def loadModel(model):
net_encoder = ModelBuilder.build_encoder(
arch=model["encoder_arch"],
fc_dim=model["fc_dim"],
weights=os.path.join(cwd, model["encoder_weights"])
)
net_decoder = ModelBuilder.build_decoder(
arch=model["decoder_arch"],
fc_dim=model["fc_dim"],
num_class=model["num_class"],
weights=os.path.join(cwd, model["decoder_weights"]),
use_softmax=True)
crit = nn.NLLLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
segmentation_module.cuda()
return segmentation_module
def getLoader(imgs):
imgs = [{'fpath_img': x} for x in imgs]
# preprocessing
dataset = TestDataset(imgs, DATASET_CONFIG)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
shuffle=False,
collate_fn=user_scattered_collate,
num_workers=5,
drop_last=True
)
return loader
def generateSky(img, model):
img = [os.path.join(cwd, img)]
loader = getLoader(img)
gpu = 0
torch.cuda.set_device(gpu)
test(model, loader, gpu)
def angle_of_elevation(mask_path):
image = Image.open(os.path.join(cwd, mask_path))
img_array = np.array(image)[:,:,0]
image_height = img_array.shape[0]
lowest = float('inf')
for col in img_array.T:
try:
index = list(col)[::-1].index(255)
if(index < lowest):
lowest = index
except ValueError:
continue
angle = (lowest / image_height) * 90
if angle != "inf":
angle = round(angle, 4)
return str(angle)
if __name__=="__main__":
import time
from constants import HRNET_MODEL as MODEL
model = loadModel(MODEL)
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'image'
@app.route('/handle_data', methods=['POST'])
@cross_origin()
def handle_data():
print("Request received, processing...")
req_file = request.files['image']
req_file.save(REQ_FILE_NAME)
start = time.time()
generateSky(REQ_FILE_NAME, model)
end = time.time()
print("Time taken:", end-start)
img = Image.open(RES_FILE_NAME, mode='r')
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format='PNG')
encoded_img = base64.encodebytes(img_byte_arr.getvalue()).decode('ascii')
return jsonify({'img': encoded_img, 'elevation': angle_of_elevation(RES_FILE_NAME)})
app.secret_key = 'mysecret'
app.debug = True
app.run(host='0.0.0.0', port="9002")
# app.run(host='0.0.0.0', port='9002', ssl_context='adhoc')
| [
"flask_cors.CORS",
"flask.Flask",
"numpy.int32",
"io.BytesIO",
"flask_cors.cross_origin",
"torch.max",
"numpy.array",
"dataset.TestDataset",
"models.SegmentationModule",
"utils.colorEncode",
"torch.nn.NLLLoss",
"time.time",
"torch.cuda.set_device",
"PIL.Image.fromarray",
"PIL.Image.open"... | [((481, 492), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (490, 492), False, 'import os\n'), ((621, 635), 'numpy.int32', 'np.int32', (['pred'], {}), '(pred)\n', (629, 635), True, 'import numpy as np\n'), ((2279, 2306), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {'ignore_index': '(-1)'}), '(ignore_index=-1)\n', (2289, 2306), True, 'import torch.nn as nn\n'), ((2334, 2384), 'models.SegmentationModule', 'SegmentationModule', (['net_encoder', 'net_decoder', 'crit'], {}), '(net_encoder, net_decoder, crit)\n', (2352, 2384), False, 'from models import ModelBuilder, SegmentationModule\n'), ((2547, 2580), 'dataset.TestDataset', 'TestDataset', (['imgs', 'DATASET_CONFIG'], {}), '(imgs, DATASET_CONFIG)\n', (2558, 2580), False, 'from dataset import TestDataset\n'), ((2594, 2729), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'collate_fn': 'user_scattered_collate', 'num_workers': '(5)', 'drop_last': '(True)'}), '(dataset, batch_size=1, shuffle=False,\n collate_fn=user_scattered_collate, num_workers=5, drop_last=True)\n', (2621, 2729), False, 'import torch\n'), ((2912, 2938), 'torch.cuda.set_device', 'torch.cuda.set_device', (['gpu'], {}), '(gpu)\n', (2933, 2938), False, 'import torch\n'), ((3615, 3630), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (3620, 3630), False, 'from flask import Flask, request, jsonify\n'), ((3642, 3651), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (3646, 3651), False, 'from flask_cors import CORS, cross_origin\n'), ((3748, 3762), 'flask_cors.cross_origin', 'cross_origin', ([], {}), '()\n', (3760, 3762), False, 'from flask_cors import CORS, cross_origin\n'), ((502, 518), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (510, 518), True, 'import numpy as np\n'), ((727, 759), 'os.path.join', 'os.path.join', (['cwd', 'RES_FILE_NAME'], {}), '(cwd, RES_FILE_NAME)\n', (739, 759), False, 'import os\n'), ((2844, 2866), 'os.path.join', 'os.path.join', (['cwd', 'img'], {}), '(cwd, img)\n', (2856, 2866), False, 'import os\n'), ((3031, 3059), 'os.path.join', 'os.path.join', (['cwd', 'mask_path'], {}), '(cwd, mask_path)\n', (3043, 3059), False, 'import os\n'), ((3077, 3092), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3085, 3092), True, 'import numpy as np\n'), ((3931, 3942), 'time.time', 'time.time', ([], {}), '()\n', (3940, 3942), False, 'import time\n'), ((3999, 4010), 'time.time', 'time.time', ([], {}), '()\n', (4008, 4010), False, 'import time\n'), ((4066, 4101), 'PIL.Image.open', 'Image.open', (['RES_FILE_NAME'], {'mode': '"""r"""'}), "(RES_FILE_NAME, mode='r')\n", (4076, 4101), False, 'from PIL import Image\n'), ((4125, 4137), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (4135, 4137), False, 'import io\n'), ((650, 675), 'utils.colorEncode', 'colorEncode', (['pred', 'colors'], {}), '(pred, colors)\n', (661, 675), False, 'from utils import colorEncode\n'), ((697, 721), 'PIL.Image.fromarray', 'Image.fromarray', (['res_img'], {}), '(res_img)\n', (712, 721), False, 'from PIL import Image\n'), ((1069, 1084), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1082, 1084), False, 'import torch\n'), ((1107, 1174), 'torch.zeros', 'torch.zeros', (['(1)', "DATASET_CONFIG['num_class']", 'segSize[0]', 'segSize[1]'], {}), "(1, DATASET_CONFIG['num_class'], segSize[0], segSize[1])\n", (1118, 1174), False, 'import torch\n'), ((1196, 1222), 'lib.nn.async_copy_to', 'async_copy_to', (['scores', 'gpu'], {}), '(scores, gpu)\n', (1209, 1222), False, 'from lib.nn import user_scattered_collate, async_copy_to\n'), ((1699, 1723), 'torch.max', 'torch.max', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (1708, 1723), False, 'import torch\n'), ((1974, 2017), 'os.path.join', 'os.path.join', (['cwd', "model['encoder_weights']"], {}), "(cwd, model['encoder_weights'])\n", (1986, 2017), False, 'import os\n'), ((2196, 2239), 'os.path.join', 'os.path.join', (['cwd', "model['decoder_weights']"], {}), "(cwd, model['decoder_weights'])\n", (2208, 2239), False, 'import os\n'), ((1462, 1491), 'lib.nn.async_copy_to', 'async_copy_to', (['feed_dict', 'gpu'], {}), '(feed_dict, gpu)\n', (1475, 1491), False, 'from lib.nn import user_scattered_collate, async_copy_to\n')] |
# Copyright (c) 2021 Cisco Systems, Inc. and its affiliates
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
# import matplotlib.pyplot as plt
import numpy as np
import PIL
import torch
from monai.apps import download_and_extract
from monai.networks.nets import DenseNet121
from monai.transforms import (
Activations, AddChannel, AsDiscrete, Compose, LoadImage, ScaleIntensity,
ToTensor
)
from monai.utils import set_determinism
from sklearn.metrics import classification_report
from ....channel_manager import ChannelManager
# monai mednist example from
# https://github.com/Project-MONAI/tutorials/blob/master/2d_classification/mednist_tutorial.ipynb
class MedNISTDataset(torch.utils.data.Dataset):
def __init__(self, image_files, labels, transforms):
self.image_files = image_files
self.labels = labels
self.transforms = transforms
def __len__(self):
return len(self.image_files)
def __getitem__(self, index):
return self.transforms(self.image_files[index]), self.labels[index]
class Aggregator(object):
def __init__(self, config_file: str, rounds=1):
self.cm = ChannelManager()
self.cm(config_file)
self.cm.join('param-channel')
self._rounds = rounds
def prepare(self):
root_dir = '/tmp'
resource = "https://www.dropbox.com/s/5wwskxctvcxiuea/MedNIST.tar.gz?dl=1"
md5 = "0bc7306e7427e00ad1c5526a6677552d"
compressed_file = os.path.join(root_dir, "MedNIST.tar.gz")
data_dir = os.path.join(root_dir, "MedNIST")
if not os.path.exists(data_dir):
download_and_extract(resource, compressed_file, root_dir, md5)
set_determinism(seed=0)
class_names = sorted(
x for x in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, x))
)
num_class = len(class_names)
image_files = [
[
os.path.join(data_dir, class_names[i], x)
for x in os.listdir(os.path.join(data_dir, class_names[i]))
] for i in range(num_class)
]
num_each = [len(image_files[i]) for i in range(num_class)]
image_files_list = []
image_class = []
for i in range(num_class):
image_files_list.extend(image_files[i])
image_class.extend([i] * num_each[i])
num_total = len(image_class)
image_width, image_height = PIL.Image.open(image_files_list[0]).size
print(f"Total image count: {num_total}")
print(f"Image dimensions: {image_width} x {image_height}")
print(f"Label names: {class_names}")
print(f"Label counts: {num_each}")
# plt.subplots(3, 3, figsize=(8, 8))
# for i, k in enumerate(np.random.randint(num_total, size=9)):
# im = PIL.Image.open(image_files_list[k])
# arr = np.array(im)
# plt.subplot(3, 3, i + 1)
# plt.xlabel(class_names[image_class[k]])
# plt.imshow(arr, cmap="gray", vmin=0, vmax=255)
# plt.tight_layout()
# plt.show()
val_frac = 0.1
test_frac = 0.1
length = len(image_files_list)
indices = np.arange(length)
np.random.shuffle(indices)
test_split = int(test_frac * length)
val_split = int(val_frac * length) + test_split
test_indices = indices[:test_split]
val_indices = indices[test_split:val_split]
train_indices = indices[val_split:]
train_x = [image_files_list[i] for i in train_indices]
val_x = [image_files_list[i] for i in val_indices]
test_x = [image_files_list[i] for i in test_indices]
test_y = [image_class[i] for i in test_indices]
print(
f"Training count: {len(train_x)}, Validation count: "
f"{len(val_x)}, Test count: {len(test_x)}"
)
val_transforms = Compose(
[
LoadImage(image_only=True),
AddChannel(),
ScaleIntensity(),
ToTensor()
]
)
act = Activations(softmax=True)
to_onehot = AsDiscrete(to_onehot=True, n_classes=num_class)
test_ds = MedNISTDataset(test_x, test_y, val_transforms)
test_loader = torch.utils.data.DataLoader(
test_ds, batch_size=300, num_workers=10
)
device = torch.device("cpu")
model = DenseNet121(
spatial_dims=2, in_channels=1, out_channels=num_class
).to(device)
self._model = model
self._device = device
self._test_loader = test_loader
self._class_names = class_names
self.act = act
self.to_onehot = to_onehot
def test(self):
self._model.eval()
y_true = []
y_pred = []
with torch.no_grad():
for test_data in self._test_loader:
test_images = test_data[0].to(self._device)
test_labels = test_data[1].to(self._device)
pred = self._model(test_images).argmax(dim=1)
for i in range(len(pred)):
y_true.append(test_labels[i].item())
y_pred.append(pred[i].item())
print(
classification_report(
y_true, y_pred, target_names=self._class_names, digits=4
)
)
def run(self):
self.prepare()
channel = self.cm.get('param-channel')
i = 0
while i < self._rounds:
print(f'>>> round {i+1}')
# send out global model parameters to trainers
for end in channel.ends():
channel.send(end, self._model.state_dict())
# TODO: lines below need to be abstracted for different
# frontends (e.g., keras, pytorch, etc)
total = 0
state_array = []
# receive local model parameters from trainers
for end in channel.ends():
msg = channel.recv(end)
if not msg:
print('no data received')
continue
state_dict = msg[0]
count = msg[1]
total += count
state_array.append((state_dict, count))
print(f'got {end}\'s parameters trained with {count} samples')
if len(state_array) == 0 or total == 0:
print('no local model parameters are obtained')
time.sleep(1)
continue
count = state_array[0][1]
rate = count / total
global_state = state_array[0][0]
for k, v in global_state.items():
global_state[k] = v * rate
for state_dict, count in state_array[1:]:
rate = count / total
for k in state_dict.keys():
global_state[k] += state_dict[k] * rate
self._model.load_state_dict(global_state)
self.test()
i += 1
# example cmd: python3 -m flame.examples.mednist.aggregator.main --rounds 3
# run the above command in flame/lib/python folder
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument(
'--rounds', type=int, default=1, help='number of training rounds'
)
args = parser.parse_args()
aggregator = Aggregator(
'flame/examples/mednist/aggregator/config.json',
args.rounds,
)
aggregator.run()
| [
"sklearn.metrics.classification_report",
"time.sleep",
"numpy.arange",
"monai.transforms.LoadImage",
"monai.utils.set_determinism",
"os.path.exists",
"monai.transforms.ScaleIntensity",
"os.listdir",
"argparse.ArgumentParser",
"monai.transforms.AddChannel",
"monai.transforms.ToTensor",
"monai.a... | [((7769, 7808), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (7792, 7808), False, 'import argparse\n'), ((2013, 2053), 'os.path.join', 'os.path.join', (['root_dir', '"""MedNIST.tar.gz"""'], {}), "(root_dir, 'MedNIST.tar.gz')\n", (2025, 2053), False, 'import os\n'), ((2073, 2106), 'os.path.join', 'os.path.join', (['root_dir', '"""MedNIST"""'], {}), "(root_dir, 'MedNIST')\n", (2085, 2106), False, 'import os\n'), ((2232, 2255), 'monai.utils.set_determinism', 'set_determinism', ([], {'seed': '(0)'}), '(seed=0)\n', (2247, 2255), False, 'from monai.utils import set_determinism\n'), ((3748, 3765), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (3757, 3765), True, 'import numpy as np\n'), ((3774, 3800), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (3791, 3800), True, 'import numpy as np\n'), ((4653, 4678), 'monai.transforms.Activations', 'Activations', ([], {'softmax': '(True)'}), '(softmax=True)\n', (4664, 4678), False, 'from monai.transforms import Activations, AddChannel, AsDiscrete, Compose, LoadImage, ScaleIntensity, ToTensor\n'), ((4699, 4746), 'monai.transforms.AsDiscrete', 'AsDiscrete', ([], {'to_onehot': '(True)', 'n_classes': 'num_class'}), '(to_onehot=True, n_classes=num_class)\n', (4709, 4746), False, 'from monai.transforms import Activations, AddChannel, AsDiscrete, Compose, LoadImage, ScaleIntensity, ToTensor\n'), ((4835, 4903), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_ds'], {'batch_size': '(300)', 'num_workers': '(10)'}), '(test_ds, batch_size=300, num_workers=10)\n', (4862, 4903), False, 'import torch\n'), ((4944, 4963), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4956, 4963), False, 'import torch\n'), ((2122, 2146), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (2136, 2146), False, 'import os\n'), ((2160, 2222), 'monai.apps.download_and_extract', 'download_and_extract', (['resource', 'compressed_file', 'root_dir', 'md5'], {}), '(resource, compressed_file, root_dir, md5)\n', (2180, 2222), False, 'from monai.apps import download_and_extract\n'), ((2988, 3023), 'PIL.Image.open', 'PIL.Image.open', (['image_files_list[0]'], {}), '(image_files_list[0])\n', (3002, 3023), False, 'import PIL\n'), ((5379, 5394), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5392, 5394), False, 'import torch\n'), ((5805, 5884), 'sklearn.metrics.classification_report', 'classification_report', (['y_true', 'y_pred'], {'target_names': 'self._class_names', 'digits': '(4)'}), '(y_true, y_pred, target_names=self._class_names, digits=4)\n', (5826, 5884), False, 'from sklearn.metrics import classification_report\n'), ((2488, 2529), 'os.path.join', 'os.path.join', (['data_dir', 'class_names[i]', 'x'], {}), '(data_dir, class_names[i], x)\n', (2500, 2529), False, 'import os\n'), ((4495, 4521), 'monai.transforms.LoadImage', 'LoadImage', ([], {'image_only': '(True)'}), '(image_only=True)\n', (4504, 4521), False, 'from monai.transforms import Activations, AddChannel, AsDiscrete, Compose, LoadImage, ScaleIntensity, ToTensor\n'), ((4539, 4551), 'monai.transforms.AddChannel', 'AddChannel', ([], {}), '()\n', (4549, 4551), False, 'from monai.transforms import Activations, AddChannel, AsDiscrete, Compose, LoadImage, ScaleIntensity, ToTensor\n'), ((4569, 4585), 'monai.transforms.ScaleIntensity', 'ScaleIntensity', ([], {}), '()\n', (4583, 4585), False, 'from monai.transforms import Activations, AddChannel, AsDiscrete, Compose, LoadImage, ScaleIntensity, ToTensor\n'), ((4603, 4613), 'monai.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (4611, 4613), False, 'from monai.transforms import Activations, AddChannel, AsDiscrete, Compose, LoadImage, ScaleIntensity, ToTensor\n'), ((4980, 5046), 'monai.networks.nets.DenseNet121', 'DenseNet121', ([], {'spatial_dims': '(2)', 'in_channels': '(1)', 'out_channels': 'num_class'}), '(spatial_dims=2, in_channels=1, out_channels=num_class)\n', (4991, 5046), False, 'from monai.networks.nets import DenseNet121\n'), ((7038, 7051), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7048, 7051), False, 'import time\n'), ((2310, 2330), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (2320, 2330), False, 'import os\n'), ((2360, 2385), 'os.path.join', 'os.path.join', (['data_dir', 'x'], {}), '(data_dir, x)\n', (2372, 2385), False, 'import os\n'), ((2566, 2604), 'os.path.join', 'os.path.join', (['data_dir', 'class_names[i]'], {}), '(data_dir, class_names[i])\n', (2578, 2604), False, 'import os\n')] |
'''
CIS 419/519 project: Using decision tree ensembles to infer the pathological
cause of age-related neurodegenerative changes based on clinical assessment
nadfahors: <NAME>, <NAME>, & <NAME>
This file contains code for preparing NACC data for analysis, including:
* synthesis of pathology data to create pathology class outcomes
* dropping uninformative variables from predictor set
* identifying and merging/resolving redundant clusters of variables
* identifying missing data codes and replacing with NaNs as appropriate
* creating change variables from longitudinal data
* imputation of missing data
* categorizing retained variables as interval/ratio, ordinal, or nominal
* creation of dummy variables for nominal variables
* standardizing interval/ratio and ordinal variables
* creating date variables, then converting these to useful ages or intervals
* quadratic expansion for interval/ratio variables?
'''
# Module imports
import pandas as pd
import numpy as np
import datetime
# Read in full dataset. Warning: this is about 340 MB.
fulldf = pd.read_csv('investigator_nacc48.csv')
# List of Uniform Data Set (UDS) values that will serve as potential
# predictors. Those with a "False" next to them will be excluded after data
# preparation; those with a True will be kept.
xvar = pd.read_csv('xvar.csv')
# Variables from the NACC neuropathology table that will be used to group
# individuals by pathology class:
# 1) Alzheimer's disease (AD);
# 2) frontotemporal lobar degeneration due to tauopathy (FTLD-tau)
# 3) frontotemporal lobar degeneration due to TDP-43 (FTLD-TDP)
# 4) Lewy body disease due to alpha synuclein (including Lewy body dementia and Parkinson's disease)
# 5) vascular disease
# Path classes: AD (ABC criteria); FTLD-tau; FTLD-TDP, including ALS; Lewy body disease (are PD patients captured here?); vascular
npvar = pd.DataFrame(np.array(["NPPMIH",0, # Postmortem interval--keep in as a potential confound variable?
"NPFIX",0,
"NPFIXX",0,
"NPWBRWT",0,
"NPWBRF",0,
"NACCBRNN",0,
"NPGRCCA",0,
"NPGRLA",0,
"NPGRHA",0,
"NPGRSNH",0,
"NPGRLCH",0,
"NACCAVAS",0,
"NPTAN",False,
"NPTANX",False,
"NPABAN",False,
"NPABANX",False,
"NPASAN",False,
"NPASANX",False,
"NPTDPAN",False,
"NPTDPANX",False,
"NPHISMB",False,
"NPHISG",False,
"NPHISSS",False,
"NPHIST",False,
"NPHISO",False,
"NPHISOX",False,
"NPTHAL",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCBRAA",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCNEUR",False,# Use for ABC scoring to create ordinal measure of AD change
"NPADNC",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCDIFF",False,
"NACCVASC",False,# Vasc presence/absence
"NACCAMY",False,
"NPLINF",False,
"NPLAC",False,
"NPINF",False,# Derived variable summarizing several assessments of infarcts and lacunes
"NPINF1A",False,
"NPINF1B",False,
"NPINF1D",False,
"NPINF1F",False,
"NPINF2A",False,
"NPINF2B",False,
"NPINF2D",False,
"NPINF2F",False,
"NPINF3A",False,
"NPINF3B",False,
"NPINF3D",False,
"NPINF3F",False,
"NPINF4A",False,
"NPINF4B",False,
"NPINF4D",False,
"NPINF4F",False,
"NACCINF",False,
"NPHEM",False,
"NPHEMO",False,
"NPHEMO1",False,
"NPHEMO2",False,
"NPHEMO3",False,
"NPMICRO",False,
"NPOLD",False,
"NPOLD1",False,
"NPOLD2",False,
"NPOLD3",False,
"NPOLD4",False,
"NACCMICR",False,# Derived variable for microinfarcts
"NPOLDD",False,
"NPOLDD1",False,
"NPOLDD2",False,
"NPOLDD3",False,
"NPOLDD4",False,
"NACCHEM",False,# Derived variables for microbleeds and hemorrhages
"NACCARTE",False,
"NPWMR",False,
"NPPATH",False,# Other ischemic/vascular pathology
"NACCNEC",False,
"NPPATH2",False,
"NPPATH3",False,
"NPPATH4",False,
"NPPATH5",False,
"NPPATH6",False,
"NPPATH7",False,
"NPPATH8",False,
"NPPATH9",False,
"NPPATH10",False,
"NPPATH11",False,
"NPPATHO",False,
"NPPATHOX",False,
"NPART",False,
"NPOANG",False,
"NACCLEWY",False,# Note that limbic/transitional and amygdala-predominant are not differentiated
"NPLBOD",False,# But here they are differentiated!
"NPNLOSS",False,
"NPHIPSCL",False,
"NPSCL",False,
"NPFTDTAU",False,# FTLD-tau
"NACCPICK",False,# FTLD-tau
"NPFTDT2",False,# FTLD-tau
"NACCCBD",False,# FTLD-tau
"NACCPROG",False,# FTLD-tau
"NPFTDT5",False,# FTLD-tau
"NPFTDT6",False,# FTLD-tau
"NPFTDT7",False,# FTLD-tau
"NPFTDT8",False,# This is FTLD-tau but associated with ALS/parkinsonism--wut?
"NPFTDT9",False,# tangle-dominant disease--is this PART? Maybe exclude cases who have this as only path type.
"NPFTDT10",False,# FTLD-tau: other 3R+4R tauopathy. What is this if not AD? Maybe exclude. How many cases?
"NPFRONT",False,# FTLD-tau
"NPTAU",False,# FTLD-tau
"NPFTD",False,# FTLD-TDP
"NPFTDTDP",False,# FTLD-TDP
"NPALSMND",False,# FTLD-TDP (but exclude FUS and SOD1)
"NPOFTD",False,
"NPOFTD1",False,
"NPOFTD2",False,
"NPOFTD3",False,
"NPOFTD4",False,
"NPOFTD5",False,
"NPFTDNO",False,
"NPFTDSPC",False,
"NPTDPA",False,# In second pass, use anatomical distribution to stage
"NPTDPB",False,# In second pass, use anatomical distribution to stage
"NPTDPC",False,# In second pass, use anatomical distribution to stage
"NPTDPD",False,# In second pass, use anatomical distribution to stage
"NPTDPE",False,# In second pass, use anatomical distribution to stage
"NPPDXA",False,# Exclude?
"NPPDXB",False,# Exclude
"NACCPRIO",False,# Exclude
"NPPDXD",False,# Exclude
"NPPDXE",False,
"NPPDXF",False,
"NPPDXG",False,
"NPPDXH",False,
"NPPDXI",False,
"NPPDXJ",False,
"NPPDXK",False,
"NPPDXL",False,
"NPPDXM",False,
"NPPDXN",False,
"NACCDOWN",False,
"NACCOTHP",False,# Survey for exclusion criteria
"NACCWRI1",False,# Survey for exclusion criteria
"NACCWRI2",False,# Survey for exclusion criteria
"NACCWRI3",False,# Survey for exclusion criteria
"NACCBNKF",False,
"NPBNKB",False,
"NACCFORM",False,
"NACCPARA",False,
"NACCCSFP",False,
"NPBNKF",False,
"NPFAUT",False,
"NPFAUT1",False,
"NPFAUT2",False,
"NPFAUT3",False,
"NPFAUT4",False,
"NACCINT",False,
"NPNIT",False,
"NPCERAD",False,# What sort of variable?
"NPADRDA",False,
"NPOCRIT",False,
"NPVOTH",False,
"NPLEWYCS",False,
"NPGENE",True,# Family history--include in predictors?
"NPFHSPEC",False,# Code as dummy variables if useful.
"NPCHROM",False,# Exclusion factor? Genetic/chromosomal abnormalities
"NPPNORM",False,# Check all the following variables for redundancy with the ones above.
"NPCNORM",False,
"NPPADP",False,
"NPCADP",False,
"NPPAD",False,
"NPCAD",False,
"NPPLEWY",False,
"NPCLEWY",False,
"NPPVASC",False,
"NPCVASC",False,
"NPPFTLD",False,
"NPCFTLD",False,
"NPPHIPP",False,
"NPCHIPP",False,
"NPPPRION",False,
"NPCPRION",False,
"NPPOTH1",False,
"NPCOTH1",False,
"NPOTH1X",False,
"NPPOTH2",False,
"NPCOTH2",False,
"NPOTH2X",False,
"NPPOTH3",False,
"NPCOTH3",False,
"NPOTH3X",0]).reshape((-1,2)))
npvar.columns = ['Variable','Keep']
## Case selection process.
# Include only those with autopsy data.
aut = fulldf[fulldf.NACCAUTP == 1]
del fulldf
def table(a,b):
print(pd.crosstab(aut[a],aut[b],dropna=False,margins=True))
# Exclude for Down's, Huntington's, and other conditions.
aut = aut.loc[aut.DOWNS != 1]
aut = aut.loc[aut.HUNT != 1]
aut = aut.loc[aut.PRION != 1]
aut = aut.loc[~aut.MSAIF.isin([1,2,3])]
aut = aut.loc[~aut.NEOPIF.isin([1,2,3])]
aut = aut.loc[~aut.SCHIZOIF.isin([1,2,3])]
aut.index = list(range(aut.shape[0]))
# How many unique IDs?
# For now, keep in follow-up visits to increase our training data.
uids = aut.NACCID[~aut.NACCID.duplicated()]
#aut = aut[~aut.NACCID.duplicated()]
## Coding of pathology class outcomes.
# Create binary variables for the presence of each pathology class of interest.
# Code Alzheimer's disease pathology based on NPADNC, which implements
# ABC scoring based on Montine et al. (2012).
aut = aut.assign(ADPath = 0)
aut.loc[aut.NPADNC.isin((2,3)),'ADPath'] = 1
aut.loc[aut.NPPAD == 1,'ADPath'] = 1
# The following two commands make the ADPath variable false if the AD path
# diagnosis is as contributing, not as primary.
aut.loc[aut.NPPAD == 2,'ADPath'] = 0
aut.loc[aut.NPCAD == 1,'ADPath'] = 0
aut.loc[aut.NPPVASC == 1,'ADPath'] = 0
aut.loc[aut.NPPLEWY == 1,'ADPath'] = 0
aut.loc[aut.NPPFTLD == 1,'ADPath'] = 0
# Several variables pertain to FTLD tauopathies.
aut = aut.assign(TauPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTDTAU == 1,'TauPath'] = 1
aut.loc[aut.NACCPICK == 1,'TauPath'] = 1
aut.loc[aut.NACCCBD == 1,'TauPath'] = 1
aut.loc[aut.NACCPROG == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT2 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT5 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT6 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT7 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT9 == 1,'TauPath'] = 1
aut.loc[aut.NPFRONT == 1,'TauPath'] = 1
aut.loc[aut.NPTAU == 1,'TauPath'] = 1
aut.loc[aut.ADPath == 1, 'TauPath'] = 0
aut.loc[aut.NPCFTLD == 1, 'TauPath'] = 0
# Code Lewy body disease based on NPLBOD variable. Do not include amygdala-
# predominant, brainstem-predominant, or olfactory-only cases.
# See Toledo et al. (2016, Acta Neuropathol) and Irwin et al. (2018, Nat Rev
# Neuro).
aut = aut.assign(LBPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPLBOD.isin((2,3)),'LBPath'] = 1
aut.loc[aut.NPPLEWY == 1,'LBPath'] = 1
aut.loc[aut.NPPLEWY == 2,'LBPath'] = 0
aut.loc[aut.NPCLEWY == 1,'LBPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPLEWY != 1), 'LBPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPLEWY != 1),'LBPath'] = 0
# Code TDP-43 pathology based on NPFTDTDP and NPALSMND, excluding FUS and SOD1
# cases.
aut = aut.assign(TDPPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTD == 1,'TDPPath'] = 1
aut.loc[aut.NPFTDTDP == 1,'TDPPath'] = 1
aut.loc[aut.NPALSMND == 1,'TDPPath'] = 1
aut.loc[aut.ADPath == 1, 'TDPPath'] = 0
aut.loc[aut.LBPath == 1, 'TDPPath'] = 0
aut.loc[aut.TauPath == 1, 'TDPPath'] = 0
# Code vascular disease based on relevant derived variables:
aut = aut.assign(VPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPINF == 1,'VPath'] = 1
aut.loc[aut.NACCMICR == 1,'VPath'] = 1
aut.loc[aut.NACCHEM == 1,'VPath'] = 1
aut.loc[aut.NPPATH == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 2,'VPath'] = 0
aut.loc[aut.NPCVASC == 1,'VPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.LBPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.NPPFTLD == 1 & (aut.NPPVASC != 1),'VPath'] = 0
aut.loc[aut.TDPPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut = aut.assign(Class = aut.ADPath)
aut.loc[aut.TauPath == 1,'Class'] = 2
aut.loc[aut.TDPPath == 1,'Class'] = 3
aut.loc[aut.LBPath == 1,'Class'] = 4
aut.loc[aut.VPath == 1,'Class'] = 5
aut = aut.loc[aut.Class != 0]
aut.index = list(range(aut.shape[0]))
## Predictor variable preparation: one-hot-encoding, date/age/interval operations,
# consolidating redundant variables, consolidating free-text variables.
aut = aut.assign(DOB = aut.BIRTHYR)
aut = aut.assign(DOD = aut.NACCYOD)
aut = aut.assign(VISITDATE = aut.VISITYR)
for i in range(aut.shape[0]):
aut.loc[i,'DOB'] = datetime.datetime.strptime('-'.join([str(aut.BIRTHYR.loc[i]),str(aut.BIRTHMO.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'DOD'] = datetime.datetime.strptime('-'.join([str(aut.NACCYOD.loc[i]),str(aut.NACCMOD.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'VISITDATE'] = datetime.datetime.strptime('-'.join([str(aut.VISITYR.loc[i]),str(aut.VISITMO.loc[i]),str(aut.VISITDAY.loc[i])]),'%Y-%m-%d')
# Some time/interval variables
aut = aut.assign(SinceQUITSMOK = aut.NACCAGE - aut.QUITSMOK) # Years since quitting smoking
aut = aut.assign(AgeStroke = aut.NACCSTYR - aut.BIRTHYR)
aut = aut.assign(AgeTIA = aut.NACCTIYR - aut.BIRTHYR)
aut = aut.assign(AgePD = aut.PDYR - aut.BIRTHYR)
aut = aut.assign(AgePDOTHR = aut.PDOTHRYR - aut.BIRTHYR)
aut = aut.assign(AgeTBI = aut.TBIYEAR - aut.BIRTHYR)
aut = aut.assign(Duration = aut.NACCAGE - aut.DECAGE)
# Hispanic origin
aut.HISPORX = aut.HISPORX.str.lower()
aut.loc[aut.HISPORX == 'spanish','HISPORX'] = 'spain'
# Race. RACESECX and RACETERX have too few values to be useful.
aut.RACEX = aut.RACEX.str.lower().str.replace(' ','').str.replace('-','')
aut.loc[aut.RACEX.isin(['hispanic','puerto rican']),'RACEX'] = 'latino'
aut.loc[aut.RACEX.isin(['guam - chamorro']),'RACEX'] = 'chamorro'
aut.loc[aut.RACEX.isin(['multi racial']),'RACEX'] = 'multiracial'
# Other language. But actually, let's just drop this and code as English/non-English.
#aut.PRIMLANX = aut.PRIMLANX.str.lower().str.replace(' ','').str.replace('-','')
# Drug list. First get a list of all the unique drug names, then code as dummy variables.
# Update as of 04/01/2020: drugs alone are going to be a huge amount of work.
# For now, just rely on the NACC derived variables for diabetes meds, cardiac drugs, etc.
drugcols = ['DRUG' + str(i) for i in range(1,41)]
drugs = aut[drugcols].stack()
# Several varieties of insulin--important to distinguish?
# drop "*not-codable"
# drop "diphtheria/hepb/pertussis,acel/polio/tetanus"
drugs = drugs.unique()
drugs = [eachdrug.lower() for eachdrug in drugs.tolist()]
drugs = pd.Series(drugs)
drug_corrections = [("multivitamin with minerals","multivitamin"),
("multivitamin, prenatal","multivitamin"),
("omega 3-6-9","omega369"),
("omega-3","omega3"),
("vitamin-d","vitamin d"),
("acetyl-l-carnitine","acetyl l carnitine"),
("levodopa","levadopa"),
("pro-stat","prostat"),
("alpha-d-galactosidase","alpha d galactosidase"),
("indium pentetate in-111","indium pentetate in111"),
("fludeoxyglucose f-18","fludeoxyglucose f18"),
("calcium with vitamins d and k", "calcium-vitamin d-vitamin k"),
("aloe vera topical", "aloe vera"),
("ammonium lactate topical", "ammonium lactate")]
for i in range(len(drug_corrections)):
oldval = drug_corrections[i][0]
newval = drug_corrections[i][1]
drugs = drugs.str.replace(pat = oldval, repl = newval)
drugs = drugs.loc[drugs != "*not codable*"]
drugs = drugs.loc[drugs != "diphtheria/hepb/pertussis,acel/polio/tetanus"]
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('-')])
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('/')])
drugs.sort()
## Combining redundant variables. Often this reflects a change in form or
# variable name between UDS version 2 & 3.
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 0),'CVPACE'] = 0
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 1),'CVPACE'] = 1
xvar.loc[xvar.Variable == 'CVPACDEF','Keep'] = False
# Combine TBIBRIEF and TRAUMBRF.
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([0])),'TBIBRIEF'] = 0
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([1,2])),'TBIBRIEF'] = 1
xvar.loc[xvar.Variable == 'TRAUMBRF','Keep'] = False
# More data cleaning
aut.ABRUPT = aut.ABRUPT.replace(to_replace = 2, value = 1)
aut.FOCLSYM = aut.FOCLSYM.replace(to_replace = 2, value = 1)
aut.FOCLSIGN = aut.FOCLSIGN.replace(to_replace = 2, value = 1)
# Convert language to a binary variable (English/non-English)
aut = aut.assign(English = 0)
aut.loc[aut.PRIMLANG == 1,'English'] = 1
xvar.loc[xvar.Variable == 'PRIMLANG','Keep'] = False
# Some dummy coding
vv = xvar.Variable.loc[(xvar.Keep) & (xvar.Comments == "Dummy coding for (95,96,97,98)")]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([95,96,97,98]),v + '_couldnt'] = 1
vv = xvar.Variable.loc[xvar.Comments == "Dummy coding for (995,996,997,998)"]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([995,996,997,998]),v + '_couldnt'] = 1
# Drop all columns where xvar.Keep == False.
aut2 = aut
xvar.loc[xvar.Variable == 'NACCID','Keep'] = True
xvar.loc[xvar.Variable == 'NACCID','Type'] = "ID"
xvar.loc[xvar.Variable == 'VISITDATE','Keep'] = True
xvar.loc[xvar.Variable == 'VISITDATE','Type'] = "ID"
aut = aut.drop(columns = xvar.Variable[~xvar.Keep])
# Fill with NA values
xvar = xvar.loc[xvar.Keep]
xvar.index = range(xvar.shape[0])
for i in range(xvar.shape[0]):
if not xvar.NaNValues.isna()[i]:
v = xvar.Variable[i]
badval = eval(xvar.NaNValues[i])
#print(v,badval)
if isinstance(badval,int):
badval = [badval]
aut[v].mask(aut[v].isin(badval),inplace = True)
# Get rid of variables with very few meaningful observations.
valcounts = aut.describe().iloc[0]
aut = aut.drop(columns = valcounts.loc[valcounts < 100].index)
#aut = aut[valcounts.loc[valcounts >= 100].index]
# Find correlated variables and drop.
ac = aut.corr()
acs = ac.unstack(level = 0)
acs = acs.loc[abs(acs)>0.8]
acsind = list(acs.index)
diagnames = [ind for ind in acsind if ind[0] == ind[1]]
acs = acs.drop(labels=diagnames)
acs = pd.DataFrame(acs)
acs.columns = ['r']
acs['v1'] = acs.index
acs[['v1','v2']] = pd.DataFrame(acs['v1'].tolist(),index = acs.index)
y = aut.Class
X = aut.drop(columns = npvar.Variable.loc[npvar.Variable.isin(aut.columns)])
X = X.drop(columns = ['Class','ADPath','TauPath','TDPPath','LBPath','VPath'])
xd = X.describe().iloc[0]
# Impute numeric variables with the mean.
from sklearn.impute import SimpleImputer
numvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Numeric"])
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
imp_mean.fit(X[numvar])
Xnumimp = imp_mean.transform(X[numvar])
Xnumimp = pd.DataFrame(Xnumimp)
Xnumimp.columns = X[numvar].columns
# Impute ordinal variables with the median.
ordvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Ordinal"])
imp_med = SimpleImputer(missing_values=np.nan, strategy='median')
imp_med.fit(X[ordvar])
Xordimp = imp_med.transform(X[ordvar])
Xordimp = pd.DataFrame(Xordimp)
Xordimp.columns = X[ordvar].columns
# Impute boolean variables with zero.
boolvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Boolean"])
boolenc = SimpleImputer(missing_values = np.nan, strategy = 'constant',
fill_value = 0)
boolenc.fit(X[boolvar])
Xbool = boolenc.transform(X[boolvar])
Xbool = pd.DataFrame(Xbool)
Xbool.columns = X[boolvar].columns
# One-hot encoding for nominal (not boolean, ordinal, or numeric) variables.
from sklearn.preprocessing import OneHotEncoder
nomvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Nominal"])
enc = OneHotEncoder(handle_unknown='ignore',sparse = False)
Xfull = X[nomvar].fillna(value = 0)
enc.fit(Xfull)
Xohe = enc.transform(Xfull)
Xohe = pd.DataFrame(Xohe)
Xohe.columns = enc.get_feature_names(Xfull.columns)
# Put it all together
X = X.drop(columns = boolvar)
X = X.drop(columns = numvar)
X = X.drop(columns = ordvar)
X = pd.concat([X,Xbool,Xnumimp,Xordimp,Xohe],axis = 1)
X = X.drop(columns = nomvar)
# Create 80/20 split between data for training and final testing.
# Do data split stratified by pathology class.
from sklearn.model_selection import train_test_split
classy = aut[['Class','SEX','EDUC']]
classy = classy.assign(HighEd = classy.EDUC > 12)
classy = classy.drop(columns = ['EDUC'])
classy = classy.assign(MasterClass = classy.astype(str).apply(lambda x: '_'.join(x),axis = 1))
uclass = np.unique(classy.MasterClass)
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=666, stratify=classy.MasterClass)
# Create a further split within the training dataset for CV and for validation.
classy2 = classy.iloc[X_train.index]
X_cv, X_val, y_cv, y_val = train_test_split( X_train, y_train, test_size=0.25, random_state=666, stratify=classy2.MasterClass)
X_cv.index = range(X_cv.shape[0])
y_cv.index = range(y_cv.shape[0])
X_val.index = range(X_val.shape[0])
y_val.index = range(y_val.shape[0])
X_test.index = range(X_test.shape[0])
y_test.index = range(y_test.shape[0])
#import pickle
#PIK = "nacc_train.pkl"
#data = [X_cv,y_cv,X_val,y_val]
#with open(PIK, "wb") as f:
# pickle.dump(data, f)
#with open(PIK, "rb") as f:
# pickle_list = pickle.load(f)
# Now load in classifier & classified data to do error analyses.
import pickle
pik = "weovr_classifier_og_data.pickle"
with open(pik, "rb") as f:
pickle_list = pickle.load(f)
# Here are the contents of the pickle:
#data = [weovr_clf, X_train, X_test, y_train, y_test, OG_X, OG_y, OG_weovr_pred]
wovr = pickle_list[0]
X_aug_train = pickle_list[1]
X_aug_val = pickle_list[2]
y_aug_train = pickle_list[3]
y_aug_val = pickle_list[4]
pikX = pd.DataFrame(pickle_list[5])
feat = pd.read_csv("selected_features.csv")
feat = list(feat.columns)
pikX.columns = feat
piky = pd.DataFrame(pickle_list[6])
wovr_pred = pd.Series(pickle_list[7])
#tmptrain = pd.read_csv("X_cv.csv")
#tmptest = pd.read_csv("X_val.csv")
#tmp = pd.concat([tmptrain,tmptest], axis = 0)
OG_X = pd.concat([X_cv, X_val], axis = 0)
OG_X['WOVR'] = wovr_pred
OG_y = pd.DataFrame(pd.concat([y_cv, y_val], axis = 0))
OG_y += -1
OG_y.columns = ["Class"]
OG_y.index = OG_X.index
#Xy = pd.concat([OG_X, OG_y], axis = 1)
addcol = [*['NACCID','VISITDATE','Class','ADPath','TauPath','TDPPath','LBPath','VPath'], *npvar.Variable.to_list()]
Xy = OG_X.merge(right = aut[addcol], how='inner', on=['NACCID','VISITDATE'],
indicator='Merge', validate="1:1")
Xy.Class = Xy.Class - 1
#Xy['WOVR'] = wovr_pred
from sklearn.metrics import confusion_matrix
confusion_matrix(Xy.Class, Xy.WOVR, normalize=None)
# Code some additional neuropath measures.
Xy['Braak03'] = np.ceil(Xy.NACCBRAA/2)
Xy.loc[Xy.Braak03 > 3,'Braak03'] = np.nan
thal = [0, 1, 2, 3, 4, 5,-4, 8, 9]
ascore = [0, 1, 1, 2, 3, 3, np.nan, np.nan, np.nan]
adict = dict(zip(thal,ascore))
Xy['Ascore'] = [adict[a] for a in Xy['NPTHAL']]
Xy['Bscore'] = np.ceil(Xy.NACCBRAA/2)
Xy['Cscore'] = Xy.NACCNEUR
Xy.loc[Xy['Cscore'].isin([8,9]), 'Cscore'] = np.nan
Xy['ABC'] = 0
Xy.loc[(Xy['Ascore'] == 1) & (Xy['Cscore'] < 2),'ABC'] = 1
Xy.loc[(Xy['Ascore'] > 0) & (Xy['Bscore'] < 2),'ABC'] = 1
Xy.loc[(Xy['Ascore'] == 1) & (Xy['Bscore'] > 1) & (Xy['Cscore'] > 1) ,'ABC'] = 2
Xy.loc[(Xy['Ascore'] > 1) & (Xy['Bscore'] > 1),'ABC'] = 2
Xy.loc[(Xy['Ascore'] == 3) & (Xy['Bscore'] == 3) & (Xy['Cscore'] > 1) ,'ABC'] = 3
# AD false alarms: people with primary non-AD pathology who were called AD.
print("Distribution of ABC scores for primary non-AD cases who were classified as AD:")
adfa = Xy.loc[(Xy.WOVR == 0) & (Xy.Class != 0),:]
adfatab = pd.crosstab(adfa['Class'],adfa['ABC'])
adfatab.index = ['Tau', 'TDP', 'LB', 'Vasc']
adfatab.to_latex('adfatab.tex')
# Non-AD false alarms: people with primary AD pathology who were called non-AD.
print("Distribution of ABC scores for primary AD cases who were classified as non-AD:")
nadfa = Xy.loc[(Xy.WOVR != 0) & (Xy.Class == 0),:]
pd.crosstab(nadfa['Class'],nadfa['ABC'])
nadfa.loc[nadfa.NPFTDTAU == 1,'TauPath'] = 1
nadfa.loc[nadfa.NACCPICK == 1,'TauPath'] = 1
nadfa.loc[nadfa.NACCCBD == 1,'TauPath'] = 1
nadfa.loc[nadfa.NACCPROG == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFTDT2 == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFTDT5 == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFTDT6 == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFTDT7 == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFTDT9 == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFRONT == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPTAU == 1,'TauPath'] = 1
# Code Lewy body disease based on NPLBOD variable. Do not include amygdala-
# predominant, brainstem-predominant, or olfactory-only cases.
# See Toledo et al. (2016, Acta Neuropathol) and Irwin et al. (2018, Nat Rev
# Neuro).
nadfa.loc[nadfa.NPLBOD.isin((2,3)),'LBPath'] = 1
nadfa.loc[nadfa.NPPLEWY == 1,'LBPath'] = 1
nadfa.loc[nadfa.NPPLEWY == 2,'LBPath'] = 0
# Code TDP-43 pathology based on NPFTDTDP and NPALSMND, excluding FUS and SOD1
# cases.
nadfa.loc[nadfa.NPFTD == 1,'TDPPath'] = 1
nadfa.loc[nadfa.NPFTDTDP == 1,'TDPPath'] = 1
nadfa.loc[nadfa.NPALSMND == 1,'TDPPath'] = 1
# Code vascular disease based on relevant derived variables:
nadfa.loc[nadfa.NPINF == 1,'VPath'] = 1
nadfa.loc[nadfa.NACCMICR == 1,'VPath'] = 1
nadfa.loc[nadfa.NACCHEM == 1,'VPath'] = 1
nadfa.loc[nadfa.NPPATH == 1,'VPath'] = 1
nadfa.loc[nadfa.NPPVASC == 1,'VPath'] = 1
nadfatab = pd.DataFrame(np.stack([ nadfa.TauPath.value_counts(),
nadfa.TDPPath.value_counts(),
nadfa.LBPath.value_counts(),
nadfa.VPath.value_counts() ]))
nadfatab.index = ['Tau','TDP','LB','Vasc']
nadfatab.columns = ['No','Yes']
nadfatab.to_latex('nadfatab.tex')
# Non-AD false alarms: people with primary AD pathology who were called non-AD.
print("Presence of vascular pathology in cases misclassified as primarily vascular:")
vfa = Xy.loc[(Xy.WOVR == 4) & (Xy.Class != 4),:]
vfa['NPINF'] = vfa['NPINF'].replace(to_replace = [-4,8,9], value = np.nan)
vfa['NACCMICR'] = vfa['NACCMICR'].replace(to_replace = [-4,8,9], value = np.nan)
vfa['NACCHEM'] = vfa['NACCHEM'].replace(to_replace = [-4,8,9], value = np.nan)
vfa['NACCMICR'] = vfa['NACCMICR'].replace(to_replace = [-4,8,9], value = np.nan)
vfa['NPPATH'] = vfa['NPPATH'].replace(to_replace = [-4,8,9], value = np.nan)
vfa['NPPVASC'] = vfa['NPPVASC'].replace(to_replace = [2], value = 0)
vfa['NPPVASC'] = vfa['NPPVASC'].replace(to_replace = [-4,8,9], value = np.nan)
vfa.loc[vfa.NPINF == 1,'VPath'] = 1
vfa.loc[vfa.NACCMICR == 1,'VPath'] = 1
vfa.loc[vfa.NACCHEM == 1,'VPath'] = 1
vfa.loc[vfa.NPPATH == 1,'VPath'] = 1
vfa.loc[vfa.NPPVASC == 1,'VPath'] = 1
vfatab = pd.DataFrame(np.stack([ vfa.NPPVASC.value_counts(),
vfa.NPINF.value_counts(),
vfa.NACCMICR.value_counts(),
vfa.NACCHEM.value_counts(),
vfa.NPPATH.value_counts() ]))
vfatab.index = ['Primary vascular','Old infarcts', 'Microinfarcts','Hemorrhages','Other']
vfatab.columns = ['No','Yes']
vfatab.to_latex('vfatab.tex')
| [
"pandas.Series",
"numpy.ceil",
"numpy.unique",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"pandas.crosstab",
"pickle.load",
"numpy.array",
"sklearn.impute.SimpleImputer",
"pandas.DataFrame",
"pandas.concat",
"sklearn.metrics.confusio... | [((1110, 1148), 'pandas.read_csv', 'pd.read_csv', (['"""investigator_nacc48.csv"""'], {}), "('investigator_nacc48.csv')\n", (1121, 1148), True, 'import pandas as pd\n'), ((1349, 1372), 'pandas.read_csv', 'pd.read_csv', (['"""xvar.csv"""'], {}), "('xvar.csv')\n", (1360, 1372), True, 'import pandas as pd\n'), ((13754, 13770), 'pandas.Series', 'pd.Series', (['drugs'], {}), '(drugs)\n', (13763, 13770), True, 'import pandas as pd\n'), ((17304, 17321), 'pandas.DataFrame', 'pd.DataFrame', (['acs'], {}), '(acs)\n', (17316, 17321), True, 'import pandas as pd\n'), ((17800, 17853), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': '"""mean"""'}), "(missing_values=np.nan, strategy='mean')\n", (17813, 17853), False, 'from sklearn.impute import SimpleImputer\n'), ((17928, 17949), 'pandas.DataFrame', 'pd.DataFrame', (['Xnumimp'], {}), '(Xnumimp)\n', (17940, 17949), True, 'import pandas as pd\n'), ((18116, 18171), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': '"""median"""'}), "(missing_values=np.nan, strategy='median')\n", (18129, 18171), False, 'from sklearn.impute import SimpleImputer\n'), ((18244, 18265), 'pandas.DataFrame', 'pd.DataFrame', (['Xordimp'], {}), '(Xordimp)\n', (18256, 18265), True, 'import pandas as pd\n'), ((18427, 18498), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': '"""constant"""', 'fill_value': '(0)'}), "(missing_values=np.nan, strategy='constant', fill_value=0)\n", (18440, 18498), False, 'from sklearn.impute import SimpleImputer\n'), ((18579, 18598), 'pandas.DataFrame', 'pd.DataFrame', (['Xbool'], {}), '(Xbool)\n', (18591, 18598), True, 'import pandas as pd\n'), ((18841, 18893), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""', 'sparse': '(False)'}), "(handle_unknown='ignore', sparse=False)\n", (18854, 18893), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((18981, 18999), 'pandas.DataFrame', 'pd.DataFrame', (['Xohe'], {}), '(Xohe)\n', (18993, 18999), True, 'import pandas as pd\n'), ((19167, 19220), 'pandas.concat', 'pd.concat', (['[X, Xbool, Xnumimp, Xordimp, Xohe]'], {'axis': '(1)'}), '([X, Xbool, Xnumimp, Xordimp, Xohe], axis=1)\n', (19176, 19220), True, 'import pandas as pd\n'), ((19646, 19675), 'numpy.unique', 'np.unique', (['classy.MasterClass'], {}), '(classy.MasterClass)\n', (19655, 19675), True, 'import numpy as np\n'), ((19711, 19800), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(666)', 'stratify': 'classy.MasterClass'}), '(X, y, test_size=0.2, random_state=666, stratify=classy.\n MasterClass)\n', (19727, 19800), False, 'from sklearn.model_selection import train_test_split\n'), ((19942, 20044), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'test_size': '(0.25)', 'random_state': '(666)', 'stratify': 'classy2.MasterClass'}), '(X_train, y_train, test_size=0.25, random_state=666,\n stratify=classy2.MasterClass)\n', (19958, 20044), False, 'from sklearn.model_selection import train_test_split\n'), ((20889, 20917), 'pandas.DataFrame', 'pd.DataFrame', (['pickle_list[5]'], {}), '(pickle_list[5])\n', (20901, 20917), True, 'import pandas as pd\n'), ((20925, 20961), 'pandas.read_csv', 'pd.read_csv', (['"""selected_features.csv"""'], {}), "('selected_features.csv')\n", (20936, 20961), True, 'import pandas as pd\n'), ((21015, 21043), 'pandas.DataFrame', 'pd.DataFrame', (['pickle_list[6]'], {}), '(pickle_list[6])\n', (21027, 21043), True, 'import pandas as pd\n'), ((21057, 21082), 'pandas.Series', 'pd.Series', (['pickle_list[7]'], {}), '(pickle_list[7])\n', (21066, 21082), True, 'import pandas as pd\n'), ((21210, 21242), 'pandas.concat', 'pd.concat', (['[X_cv, X_val]'], {'axis': '(0)'}), '([X_cv, X_val], axis=0)\n', (21219, 21242), True, 'import pandas as pd\n'), ((21752, 21803), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Xy.Class', 'Xy.WOVR'], {'normalize': 'None'}), '(Xy.Class, Xy.WOVR, normalize=None)\n', (21768, 21803), False, 'from sklearn.metrics import confusion_matrix\n'), ((21864, 21888), 'numpy.ceil', 'np.ceil', (['(Xy.NACCBRAA / 2)'], {}), '(Xy.NACCBRAA / 2)\n', (21871, 21888), True, 'import numpy as np\n'), ((22110, 22134), 'numpy.ceil', 'np.ceil', (['(Xy.NACCBRAA / 2)'], {}), '(Xy.NACCBRAA / 2)\n', (22117, 22134), True, 'import numpy as np\n'), ((22789, 22828), 'pandas.crosstab', 'pd.crosstab', (["adfa['Class']", "adfa['ABC']"], {}), "(adfa['Class'], adfa['ABC'])\n", (22800, 22828), True, 'import pandas as pd\n'), ((23125, 23166), 'pandas.crosstab', 'pd.crosstab', (["nadfa['Class']", "nadfa['ABC']"], {}), "(nadfa['Class'], nadfa['ABC'])\n", (23136, 23166), True, 'import pandas as pd\n'), ((20612, 20626), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (20623, 20626), False, 'import pickle\n'), ((21290, 21322), 'pandas.concat', 'pd.concat', (['[y_cv, y_val]'], {'axis': '(0)'}), '([y_cv, y_val], axis=0)\n', (21299, 21322), True, 'import pandas as pd\n'), ((7682, 7737), 'pandas.crosstab', 'pd.crosstab', (['aut[a]', 'aut[b]'], {'dropna': '(False)', 'margins': '(True)'}), '(aut[a], aut[b], dropna=False, margins=True)\n', (7693, 7737), True, 'import pandas as pd\n'), ((1934, 5433), 'numpy.array', 'np.array', (["['NPPMIH', 0, 'NPFIX', 0, 'NPFIXX', 0, 'NPWBRWT', 0, 'NPWBRF', 0,\n 'NACCBRNN', 0, 'NPGRCCA', 0, 'NPGRLA', 0, 'NPGRHA', 0, 'NPGRSNH', 0,\n 'NPGRLCH', 0, 'NACCAVAS', 0, 'NPTAN', False, 'NPTANX', False, 'NPABAN',\n False, 'NPABANX', False, 'NPASAN', False, 'NPASANX', False, 'NPTDPAN', \n False, 'NPTDPANX', False, 'NPHISMB', False, 'NPHISG', False, 'NPHISSS',\n False, 'NPHIST', False, 'NPHISO', False, 'NPHISOX', False, 'NPTHAL', \n False, 'NACCBRAA', False, 'NACCNEUR', False, 'NPADNC', False,\n 'NACCDIFF', False, 'NACCVASC', False, 'NACCAMY', False, 'NPLINF', False,\n 'NPLAC', False, 'NPINF', False, 'NPINF1A', False, 'NPINF1B', False,\n 'NPINF1D', False, 'NPINF1F', False, 'NPINF2A', False, 'NPINF2B', False,\n 'NPINF2D', False, 'NPINF2F', False, 'NPINF3A', False, 'NPINF3B', False,\n 'NPINF3D', False, 'NPINF3F', False, 'NPINF4A', False, 'NPINF4B', False,\n 'NPINF4D', False, 'NPINF4F', False, 'NACCINF', False, 'NPHEM', False,\n 'NPHEMO', False, 'NPHEMO1', False, 'NPHEMO2', False, 'NPHEMO3', False,\n 'NPMICRO', False, 'NPOLD', False, 'NPOLD1', False, 'NPOLD2', False,\n 'NPOLD3', False, 'NPOLD4', False, 'NACCMICR', False, 'NPOLDD', False,\n 'NPOLDD1', False, 'NPOLDD2', False, 'NPOLDD3', False, 'NPOLDD4', False,\n 'NACCHEM', False, 'NACCARTE', False, 'NPWMR', False, 'NPPATH', False,\n 'NACCNEC', False, 'NPPATH2', False, 'NPPATH3', False, 'NPPATH4', False,\n 'NPPATH5', False, 'NPPATH6', False, 'NPPATH7', False, 'NPPATH8', False,\n 'NPPATH9', False, 'NPPATH10', False, 'NPPATH11', False, 'NPPATHO', \n False, 'NPPATHOX', False, 'NPART', False, 'NPOANG', False, 'NACCLEWY', \n False, 'NPLBOD', False, 'NPNLOSS', False, 'NPHIPSCL', False, 'NPSCL', \n False, 'NPFTDTAU', False, 'NACCPICK', False, 'NPFTDT2', False,\n 'NACCCBD', False, 'NACCPROG', False, 'NPFTDT5', False, 'NPFTDT6', False,\n 'NPFTDT7', False, 'NPFTDT8', False, 'NPFTDT9', False, 'NPFTDT10', False,\n 'NPFRONT', False, 'NPTAU', False, 'NPFTD', False, 'NPFTDTDP', False,\n 'NPALSMND', False, 'NPOFTD', False, 'NPOFTD1', False, 'NPOFTD2', False,\n 'NPOFTD3', False, 'NPOFTD4', False, 'NPOFTD5', False, 'NPFTDNO', False,\n 'NPFTDSPC', False, 'NPTDPA', False, 'NPTDPB', False, 'NPTDPC', False,\n 'NPTDPD', False, 'NPTDPE', False, 'NPPDXA', False, 'NPPDXB', False,\n 'NACCPRIO', False, 'NPPDXD', False, 'NPPDXE', False, 'NPPDXF', False,\n 'NPPDXG', False, 'NPPDXH', False, 'NPPDXI', False, 'NPPDXJ', False,\n 'NPPDXK', False, 'NPPDXL', False, 'NPPDXM', False, 'NPPDXN', False,\n 'NACCDOWN', False, 'NACCOTHP', False, 'NACCWRI1', False, 'NACCWRI2', \n False, 'NACCWRI3', False, 'NACCBNKF', False, 'NPBNKB', False,\n 'NACCFORM', False, 'NACCPARA', False, 'NACCCSFP', False, 'NPBNKF', \n False, 'NPFAUT', False, 'NPFAUT1', False, 'NPFAUT2', False, 'NPFAUT3', \n False, 'NPFAUT4', False, 'NACCINT', False, 'NPNIT', False, 'NPCERAD', \n False, 'NPADRDA', False, 'NPOCRIT', False, 'NPVOTH', False, 'NPLEWYCS',\n False, 'NPGENE', True, 'NPFHSPEC', False, 'NPCHROM', False, 'NPPNORM', \n False, 'NPCNORM', False, 'NPPADP', False, 'NPCADP', False, 'NPPAD', \n False, 'NPCAD', False, 'NPPLEWY', False, 'NPCLEWY', False, 'NPPVASC', \n False, 'NPCVASC', False, 'NPPFTLD', False, 'NPCFTLD', False, 'NPPHIPP',\n False, 'NPCHIPP', False, 'NPPPRION', False, 'NPCPRION', False,\n 'NPPOTH1', False, 'NPCOTH1', False, 'NPOTH1X', False, 'NPPOTH2', False,\n 'NPCOTH2', False, 'NPOTH2X', False, 'NPPOTH3', False, 'NPCOTH3', False,\n 'NPOTH3X', 0]"], {}), "(['NPPMIH', 0, 'NPFIX', 0, 'NPFIXX', 0, 'NPWBRWT', 0, 'NPWBRF', 0,\n 'NACCBRNN', 0, 'NPGRCCA', 0, 'NPGRLA', 0, 'NPGRHA', 0, 'NPGRSNH', 0,\n 'NPGRLCH', 0, 'NACCAVAS', 0, 'NPTAN', False, 'NPTANX', False, 'NPABAN',\n False, 'NPABANX', False, 'NPASAN', False, 'NPASANX', False, 'NPTDPAN', \n False, 'NPTDPANX', False, 'NPHISMB', False, 'NPHISG', False, 'NPHISSS',\n False, 'NPHIST', False, 'NPHISO', False, 'NPHISOX', False, 'NPTHAL', \n False, 'NACCBRAA', False, 'NACCNEUR', False, 'NPADNC', False,\n 'NACCDIFF', False, 'NACCVASC', False, 'NACCAMY', False, 'NPLINF', False,\n 'NPLAC', False, 'NPINF', False, 'NPINF1A', False, 'NPINF1B', False,\n 'NPINF1D', False, 'NPINF1F', False, 'NPINF2A', False, 'NPINF2B', False,\n 'NPINF2D', False, 'NPINF2F', False, 'NPINF3A', False, 'NPINF3B', False,\n 'NPINF3D', False, 'NPINF3F', False, 'NPINF4A', False, 'NPINF4B', False,\n 'NPINF4D', False, 'NPINF4F', False, 'NACCINF', False, 'NPHEM', False,\n 'NPHEMO', False, 'NPHEMO1', False, 'NPHEMO2', False, 'NPHEMO3', False,\n 'NPMICRO', False, 'NPOLD', False, 'NPOLD1', False, 'NPOLD2', False,\n 'NPOLD3', False, 'NPOLD4', False, 'NACCMICR', False, 'NPOLDD', False,\n 'NPOLDD1', False, 'NPOLDD2', False, 'NPOLDD3', False, 'NPOLDD4', False,\n 'NACCHEM', False, 'NACCARTE', False, 'NPWMR', False, 'NPPATH', False,\n 'NACCNEC', False, 'NPPATH2', False, 'NPPATH3', False, 'NPPATH4', False,\n 'NPPATH5', False, 'NPPATH6', False, 'NPPATH7', False, 'NPPATH8', False,\n 'NPPATH9', False, 'NPPATH10', False, 'NPPATH11', False, 'NPPATHO', \n False, 'NPPATHOX', False, 'NPART', False, 'NPOANG', False, 'NACCLEWY', \n False, 'NPLBOD', False, 'NPNLOSS', False, 'NPHIPSCL', False, 'NPSCL', \n False, 'NPFTDTAU', False, 'NACCPICK', False, 'NPFTDT2', False,\n 'NACCCBD', False, 'NACCPROG', False, 'NPFTDT5', False, 'NPFTDT6', False,\n 'NPFTDT7', False, 'NPFTDT8', False, 'NPFTDT9', False, 'NPFTDT10', False,\n 'NPFRONT', False, 'NPTAU', False, 'NPFTD', False, 'NPFTDTDP', False,\n 'NPALSMND', False, 'NPOFTD', False, 'NPOFTD1', False, 'NPOFTD2', False,\n 'NPOFTD3', False, 'NPOFTD4', False, 'NPOFTD5', False, 'NPFTDNO', False,\n 'NPFTDSPC', False, 'NPTDPA', False, 'NPTDPB', False, 'NPTDPC', False,\n 'NPTDPD', False, 'NPTDPE', False, 'NPPDXA', False, 'NPPDXB', False,\n 'NACCPRIO', False, 'NPPDXD', False, 'NPPDXE', False, 'NPPDXF', False,\n 'NPPDXG', False, 'NPPDXH', False, 'NPPDXI', False, 'NPPDXJ', False,\n 'NPPDXK', False, 'NPPDXL', False, 'NPPDXM', False, 'NPPDXN', False,\n 'NACCDOWN', False, 'NACCOTHP', False, 'NACCWRI1', False, 'NACCWRI2', \n False, 'NACCWRI3', False, 'NACCBNKF', False, 'NPBNKB', False,\n 'NACCFORM', False, 'NACCPARA', False, 'NACCCSFP', False, 'NPBNKF', \n False, 'NPFAUT', False, 'NPFAUT1', False, 'NPFAUT2', False, 'NPFAUT3', \n False, 'NPFAUT4', False, 'NACCINT', False, 'NPNIT', False, 'NPCERAD', \n False, 'NPADRDA', False, 'NPOCRIT', False, 'NPVOTH', False, 'NPLEWYCS',\n False, 'NPGENE', True, 'NPFHSPEC', False, 'NPCHROM', False, 'NPPNORM', \n False, 'NPCNORM', False, 'NPPADP', False, 'NPCADP', False, 'NPPAD', \n False, 'NPCAD', False, 'NPPLEWY', False, 'NPCLEWY', False, 'NPPVASC', \n False, 'NPCVASC', False, 'NPPFTLD', False, 'NPCFTLD', False, 'NPPHIPP',\n False, 'NPCHIPP', False, 'NPPPRION', False, 'NPCPRION', False,\n 'NPPOTH1', False, 'NPCOTH1', False, 'NPOTH1X', False, 'NPPOTH2', False,\n 'NPCOTH2', False, 'NPOTH2X', False, 'NPPOTH3', False, 'NPCOTH3', False,\n 'NPOTH3X', 0])\n", (1942, 5433), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from PCAfold import preprocess
class TestClustering(unittest.TestCase):
################################################################################
#
# Clustering functions
#
################################################################################
def test_variable_bins_allowed_calls(self):
try:
idx = preprocess.variable_bins(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 4, verbose=False)
self.assertTrue(True)
except:
self.assertTrue(False)
self.assertTrue(isinstance(idx, np.ndarray))
self.assertTrue(idx.ndim == 1)
def test_variable_bins_not_allowed_calls(self):
with self.assertRaises(ValueError):
idx = preprocess.variable_bins(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 0, verbose=False)
with self.assertRaises(ValueError):
idx = preprocess.variable_bins(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), -1, verbose=False)
with self.assertRaises(ValueError):
idx = preprocess.variable_bins(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 4, verbose=1)
with self.assertRaises(ValueError):
idx = preprocess.variable_bins(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 4, verbose='True')
def test_predefined_variable_bins_allowed_calls(self):
try:
idx = preprocess.predefined_variable_bins(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), [3.5, 8.5], verbose=False)
self.assertTrue(True)
except:
self.assertTrue(False)
self.assertTrue(isinstance(idx, np.ndarray))
self.assertTrue(idx.ndim == 1)
def test_predefined_variable_bins_not_allowed_calls(self):
with self.assertRaises(ValueError):
idx = preprocess.predefined_variable_bins(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), [3, 11], verbose=False)
with self.assertRaises(ValueError):
idx = preprocess.predefined_variable_bins(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), [0, 6], verbose=False)
with self.assertRaises(ValueError):
idx = preprocess.predefined_variable_bins(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), [3, 8], verbose=1)
with self.assertRaises(ValueError):
idx = preprocess.predefined_variable_bins(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), [3, 8], verbose='True')
def test_mixture_fraction_bins_allowed_calls(self):
try:
idx = preprocess.mixture_fraction_bins(np.array([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]), 2, 0.2)
self.assertTrue(True)
except:
self.assertTrue(False)
try:
idx = preprocess.mixture_fraction_bins(np.array([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]), 1, 0.2)
self.assertTrue(True)
except:
self.assertTrue(False)
self.assertTrue(isinstance(idx, np.ndarray))
self.assertTrue(idx.ndim == 1)
def test_mixture_fraction_bins_not_allowed_calls(self):
with self.assertRaises(ValueError):
idx = preprocess.mixture_fraction_bins(np.array([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]), 0, 0.2)
with self.assertRaises(ValueError):
idx = preprocess.mixture_fraction_bins(np.array([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]), -1, 0.2)
with self.assertRaises(ValueError):
idx = preprocess.mixture_fraction_bins(np.array([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]), 2, 0.2, verbose=1)
with self.assertRaises(ValueError):
idx = preprocess.mixture_fraction_bins(np.array([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]), 2, 0.2, verbose='True')
def test_zero_neighborhood_bins_allowed_calls(self):
try:
idx = preprocess.zero_neighborhood_bins(np.array([-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400]), k=4, split_at_zero=True, verbose=False)
self.assertTrue(True)
except:
self.assertTrue(False)
self.assertTrue(isinstance(idx, np.ndarray))
self.assertTrue(idx.ndim == 1)
def test_zero_neighborhood_bins_not_allowed_calls(self):
with self.assertRaises(ValueError):
idx = preprocess.zero_neighborhood_bins(np.array([-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400]), k=0, split_at_zero=True, verbose=False)
with self.assertRaises(ValueError):
idx = preprocess.zero_neighborhood_bins(np.array([-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400]), k=-1, split_at_zero=True, verbose=False)
with self.assertRaises(ValueError):
idx = preprocess.zero_neighborhood_bins(np.array([-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400]), k=4, split_at_zero=True, verbose=1)
with self.assertRaises(ValueError):
idx = preprocess.zero_neighborhood_bins(np.array([-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400]), k=4, split_at_zero=True, verbose='True')
################################################################################
#
# Auxiliary functions
#
################################################################################
def test_degrade_clusters_allowed_calls(self):
try:
idx_undegraded = [1, 1, 2, 2, 3, 3]
idx_degraded = [0, 0, 1, 1, 2, 2]
(idx, k) = preprocess.degrade_clusters(idx_undegraded, verbose=False)
self.assertTrue(np.min(idx) == 0)
self.assertTrue(k == 3)
self.assertTrue(list(idx) == idx_degraded)
except:
self.assertTrue(False)
try:
idx_undegraded = [-1, -1, 1, 1, 2, 2, 3, 3]
idx_degraded = [0, 0, 1, 1, 2, 2, 3, 3]
(idx, k) = preprocess.degrade_clusters(idx_undegraded, verbose=False)
self.assertTrue(np.min(idx) == 0)
self.assertTrue(k == 4)
self.assertTrue(list(idx) == idx_degraded)
except:
self.assertTrue(False)
try:
idx_undegraded = [-1, 1, 3, -1, 1, 1, 2, 2, 3, 3]
idx_degraded = [0, 1, 3, 0, 1, 1, 2, 2, 3, 3]
(idx, k) = preprocess.degrade_clusters(idx_undegraded, verbose=False)
self.assertTrue(np.min(idx) == 0)
self.assertTrue(k == 4)
self.assertTrue(list(idx) == idx_degraded)
except:
self.assertTrue(False)
try:
idx = np.array([-1,-1,0,0,0,0,1,1,1,1,5])
(idx, k) = preprocess.degrade_clusters(idx, verbose=False)
self.assertTrue(np.min(idx) == 0)
self.assertTrue(k == 4)
except:
self.assertTrue(False)
def test_degrade_clusters_not_allowed_calls(self):
idx_test = [0,0,0,1,1,1,True,2,2,2]
with self.assertRaises(ValueError):
(idx, k) = preprocess.degrade_clusters(idx_test, verbose=False)
idx_test = [0,0,0,1,1,1,5.1,2,2,2]
with self.assertRaises(ValueError):
(idx, k) = preprocess.degrade_clusters(idx_test, verbose=False)
idx_test = np.array([0,0,0,1.1,1,1,2,2,2])
with self.assertRaises(ValueError):
(idx, k) = preprocess.degrade_clusters(idx_test, verbose=False)
idx_test = np.array([-1.2,0,0,0,1,1,1,2,2,2])
with self.assertRaises(ValueError):
(idx, k) = preprocess.degrade_clusters(idx_test, verbose=False)
with self.assertRaises(ValueError):
(idx, k) = preprocess.degrade_clusters(1, verbose=False)
with self.assertRaises(ValueError):
(idx, k) = preprocess.degrade_clusters('list', verbose=False)
def test_flip_clusters_allowed_calls(self):
try:
idx_unflipped = np.array([0,0,0,1,1,1,2,2,2])
idx_flipped = np.array([0,0,0,2,2,2,1,1,1])
idx = preprocess.flip_clusters(idx_unflipped, dictionary={1:2, 2:1})
comparison = idx_flipped == idx
self.assertTrue(comparison.all())
except:
self.assertTrue(False)
try:
idx_unflipped = np.array([0,0,0,1,1,1,2,2,2])
idx_flipped = np.array([0,0,0,10,10,10,20,20,20])
idx = preprocess.flip_clusters(idx_unflipped, dictionary={1:10, 2:20})
comparison = idx_flipped == idx
self.assertTrue(comparison.all())
except:
self.assertTrue(False)
def test_flip_clusters_not_allowed_calls(self):
idx_unflipped = np.array([0,0,0,1,1,1,2,2,2])
with self.assertRaises(ValueError):
idx = preprocess.flip_clusters(idx_unflipped, dictionary={3:2,2:3})
with self.assertRaises(ValueError):
idx = preprocess.flip_clusters(idx_unflipped, dictionary={0:1,1:1.5})
def test_get_centroids_allowed_calls(self):
try:
x = np.array([[1,2,10],[1,2,10],[1,2,10]])
idx = np.array([0,0,0])
idx_centroids = np.array([[1, 2, 10]])
centroids = preprocess.get_centroids(x, idx)
comparison = (idx_centroids == centroids)
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
try:
x = np.array([[1,2,10],[1,2,10],[20,30,40]])
idx = np.array([0,0,1])
idx_centroids = np.array([[1, 2, 10], [20,30,40]])
centroids = preprocess.get_centroids(x, idx)
comparison = (idx_centroids == centroids)
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
def test_get_centroidss_not_allowed_calls(self):
X = np.random.rand(100,10)
idx = np.zeros((90,))
with self.assertRaises(ValueError):
centroids = preprocess.get_centroids(X, idx)
X = np.random.rand(100,10)
idx = np.zeros((110,))
with self.assertRaises(ValueError):
centroids = preprocess.get_centroids(X, idx)
def test_get_partition_allowed_calls(self):
try:
x = np.array([[1,2,10],[1,2,10],[1,2,10]])
idx = np.array([0,0,0])
pre_x_in_clusters = [np.array([[1,2,10],[1,2,10],[1,2,10]])]
pre_idx_in_clusters = [np.array([0,1,2])]
(x_in_clusters, idx_in_clusters) = preprocess.get_partition(x, idx)
comparison_1 = (pre_x_in_clusters[0] == x_in_clusters[0])
self.assertTrue(comparison_1.all())
comparison_2 = (pre_idx_in_clusters[0] == idx_in_clusters[0])
self.assertTrue(comparison_2.all())
except Exception:
self.assertTrue(False)
try:
x = np.array([[1,2,10],[1,2,10],[30,40,50]])
idx = np.array([0,0,1])
pre_x_in_clusters = [np.array([[1,2,10],[1,2,10]]), np.array([[30,40,50]])]
pre_idx_in_clusters = [np.array([0,1]), np.array([2])]
(x_in_clusters, idx_in_clusters) = preprocess.get_partition(x, idx)
comparison_1 = (pre_x_in_clusters[0] == x_in_clusters[0])
comparison_2 = (pre_x_in_clusters[1] == x_in_clusters[1])
self.assertTrue(comparison_1.all())
self.assertTrue(comparison_2.all())
comparison_3 = (pre_idx_in_clusters[0] == idx_in_clusters[0])
comparison_4 = (pre_idx_in_clusters[1] == idx_in_clusters[1])
self.assertTrue(comparison_3.all())
self.assertTrue(comparison_4.all())
except Exception:
self.assertTrue(False)
def test_get_parition_not_allowed_calls(self):
X = np.random.rand(100,10)
idx = np.zeros((90,))
with self.assertRaises(ValueError):
(x_in_clusters, idx_in_clusters) = preprocess.get_partition(X, idx)
X = np.random.rand(100,10)
idx = np.zeros((110,))
with self.assertRaises(ValueError):
(x_in_clusters, idx_in_clusters) = preprocess.get_partition(X, idx)
def test_get_populations_allowed_calls(self):
x = np.linspace(-1,1,100)
try:
idx = preprocess.variable_bins(x, 4, verbose=False)
idx_populations = [25, 25, 25, 25]
populations = preprocess.get_populations(idx)
self.assertTrue(populations == idx_populations)
except Exception:
self.assertTrue(False)
try:
idx = preprocess.variable_bins(x, 5, verbose=False)
idx_populations = [20, 20, 20, 20, 20]
populations = preprocess.get_populations(idx)
self.assertTrue(populations == idx_populations)
except Exception:
self.assertTrue(False)
try:
idx = preprocess.variable_bins(x, 2, verbose=False)
idx_populations = [50, 50]
populations = preprocess.get_populations(idx)
self.assertTrue(populations == idx_populations)
except Exception:
self.assertTrue(False)
try:
idx = preprocess.variable_bins(x, 1, verbose=False)
idx_populations = [100]
populations = preprocess.get_populations(idx)
self.assertTrue(populations == idx_populations)
except Exception:
self.assertTrue(False)
try:
idx_populations = [1]
populations = preprocess.get_populations(np.array([0]))
self.assertTrue(populations == idx_populations)
except Exception:
self.assertTrue(False)
| [
"numpy.random.rand",
"PCAfold.preprocess.get_centroids",
"PCAfold.preprocess.variable_bins",
"PCAfold.preprocess.get_populations",
"numpy.min",
"PCAfold.preprocess.flip_clusters",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"PCAfold.preprocess.get_partition",
"PCAfold.preprocess.degrade_clus... | [((6999, 7038), 'numpy.array', 'np.array', (['[0, 0, 0, 1.1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1.1, 1, 1, 2, 2, 2])\n', (7007, 7038), True, 'import numpy as np\n'), ((7171, 7214), 'numpy.array', 'np.array', (['[-1.2, 0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([-1.2, 0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (7179, 7214), True, 'import numpy as np\n'), ((8394, 8431), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (8402, 8431), True, 'import numpy as np\n'), ((9553, 9576), 'numpy.random.rand', 'np.random.rand', (['(100)', '(10)'], {}), '(100, 10)\n', (9567, 9576), True, 'import numpy as np\n'), ((9590, 9605), 'numpy.zeros', 'np.zeros', (['(90,)'], {}), '((90,))\n', (9598, 9605), True, 'import numpy as np\n'), ((9721, 9744), 'numpy.random.rand', 'np.random.rand', (['(100)', '(10)'], {}), '(100, 10)\n', (9735, 9744), True, 'import numpy as np\n'), ((9758, 9774), 'numpy.zeros', 'np.zeros', (['(110,)'], {}), '((110,))\n', (9766, 9774), True, 'import numpy as np\n'), ((11487, 11510), 'numpy.random.rand', 'np.random.rand', (['(100)', '(10)'], {}), '(100, 10)\n', (11501, 11510), True, 'import numpy as np\n'), ((11524, 11539), 'numpy.zeros', 'np.zeros', (['(90,)'], {}), '((90,))\n', (11532, 11539), True, 'import numpy as np\n'), ((11678, 11701), 'numpy.random.rand', 'np.random.rand', (['(100)', '(10)'], {}), '(100, 10)\n', (11692, 11701), True, 'import numpy as np\n'), ((11715, 11731), 'numpy.zeros', 'np.zeros', (['(110,)'], {}), '((110,))\n', (11723, 11731), True, 'import numpy as np\n'), ((11921, 11944), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (11932, 11944), True, 'import numpy as np\n'), ((5279, 5337), 'PCAfold.preprocess.degrade_clusters', 'preprocess.degrade_clusters', (['idx_undegraded'], {'verbose': '(False)'}), '(idx_undegraded, verbose=False)\n', (5306, 5337), False, 'from PCAfold import preprocess\n'), ((5671, 5729), 'PCAfold.preprocess.degrade_clusters', 'preprocess.degrade_clusters', (['idx_undegraded'], {'verbose': '(False)'}), '(idx_undegraded, verbose=False)\n', (5698, 5729), False, 'from PCAfold import preprocess\n'), ((6075, 6133), 'PCAfold.preprocess.degrade_clusters', 'preprocess.degrade_clusters', (['idx_undegraded'], {'verbose': '(False)'}), '(idx_undegraded, verbose=False)\n', (6102, 6133), False, 'from PCAfold import preprocess\n'), ((6354, 6399), 'numpy.array', 'np.array', (['[-1, -1, 0, 0, 0, 0, 1, 1, 1, 1, 5]'], {}), '([-1, -1, 0, 0, 0, 0, 1, 1, 1, 1, 5])\n', (6362, 6399), True, 'import numpy as np\n'), ((6413, 6460), 'PCAfold.preprocess.degrade_clusters', 'preprocess.degrade_clusters', (['idx'], {'verbose': '(False)'}), '(idx, verbose=False)\n', (6440, 6460), False, 'from PCAfold import preprocess\n'), ((6762, 6814), 'PCAfold.preprocess.degrade_clusters', 'preprocess.degrade_clusters', (['idx_test'], {'verbose': '(False)'}), '(idx_test, verbose=False)\n', (6789, 6814), False, 'from PCAfold import preprocess\n'), ((6926, 6978), 'PCAfold.preprocess.degrade_clusters', 'preprocess.degrade_clusters', (['idx_test'], {'verbose': '(False)'}), '(idx_test, verbose=False)\n', (6953, 6978), False, 'from PCAfold import preprocess\n'), ((7098, 7150), 'PCAfold.preprocess.degrade_clusters', 'preprocess.degrade_clusters', (['idx_test'], {'verbose': '(False)'}), '(idx_test, verbose=False)\n', (7125, 7150), False, 'from PCAfold import preprocess\n'), ((7273, 7325), 'PCAfold.preprocess.degrade_clusters', 'preprocess.degrade_clusters', (['idx_test'], {'verbose': '(False)'}), '(idx_test, verbose=False)\n', (7300, 7325), False, 'from PCAfold import preprocess\n'), ((7394, 7439), 'PCAfold.preprocess.degrade_clusters', 'preprocess.degrade_clusters', (['(1)'], {'verbose': '(False)'}), '(1, verbose=False)\n', (7421, 7439), False, 'from PCAfold import preprocess\n'), ((7508, 7558), 'PCAfold.preprocess.degrade_clusters', 'preprocess.degrade_clusters', (['"""list"""'], {'verbose': '(False)'}), "('list', verbose=False)\n", (7535, 7558), False, 'from PCAfold import preprocess\n'), ((7650, 7687), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (7658, 7687), True, 'import numpy as np\n'), ((7706, 7743), 'numpy.array', 'np.array', (['[0, 0, 0, 2, 2, 2, 1, 1, 1]'], {}), '([0, 0, 0, 2, 2, 2, 1, 1, 1])\n', (7714, 7743), True, 'import numpy as np\n'), ((7754, 7822), 'PCAfold.preprocess.flip_clusters', 'preprocess.flip_clusters', (['idx_unflipped'], {'dictionary': '{(1): 2, (2): 1}'}), '(idx_unflipped, dictionary={(1): 2, (2): 1})\n', (7778, 7822), False, 'from PCAfold import preprocess\n'), ((8000, 8037), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (8008, 8037), True, 'import numpy as np\n'), ((8056, 8099), 'numpy.array', 'np.array', (['[0, 0, 0, 10, 10, 10, 20, 20, 20]'], {}), '([0, 0, 0, 10, 10, 10, 20, 20, 20])\n', (8064, 8099), True, 'import numpy as np\n'), ((8110, 8180), 'PCAfold.preprocess.flip_clusters', 'preprocess.flip_clusters', (['idx_unflipped'], {'dictionary': '{(1): 10, (2): 20}'}), '(idx_unflipped, dictionary={(1): 10, (2): 20})\n', (8134, 8180), False, 'from PCAfold import preprocess\n'), ((8486, 8554), 'PCAfold.preprocess.flip_clusters', 'preprocess.flip_clusters', (['idx_unflipped'], {'dictionary': '{(3): 2, (2): 3}'}), '(idx_unflipped, dictionary={(3): 2, (2): 3})\n', (8510, 8554), False, 'from PCAfold import preprocess\n'), ((8611, 8681), 'PCAfold.preprocess.flip_clusters', 'preprocess.flip_clusters', (['idx_unflipped'], {'dictionary': '{(0): 1, (1): 1.5}'}), '(idx_unflipped, dictionary={(0): 1, (1): 1.5})\n', (8635, 8681), False, 'from PCAfold import preprocess\n'), ((8754, 8800), 'numpy.array', 'np.array', (['[[1, 2, 10], [1, 2, 10], [1, 2, 10]]'], {}), '([[1, 2, 10], [1, 2, 10], [1, 2, 10]])\n', (8762, 8800), True, 'import numpy as np\n'), ((8811, 8830), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (8819, 8830), True, 'import numpy as np\n'), ((8857, 8879), 'numpy.array', 'np.array', (['[[1, 2, 10]]'], {}), '([[1, 2, 10]])\n', (8865, 8879), True, 'import numpy as np\n'), ((8904, 8936), 'PCAfold.preprocess.get_centroids', 'preprocess.get_centroids', (['x', 'idx'], {}), '(x, idx)\n', (8928, 8936), False, 'from PCAfold import preprocess\n'), ((9128, 9176), 'numpy.array', 'np.array', (['[[1, 2, 10], [1, 2, 10], [20, 30, 40]]'], {}), '([[1, 2, 10], [1, 2, 10], [20, 30, 40]])\n', (9136, 9176), True, 'import numpy as np\n'), ((9187, 9206), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (9195, 9206), True, 'import numpy as np\n'), ((9233, 9269), 'numpy.array', 'np.array', (['[[1, 2, 10], [20, 30, 40]]'], {}), '([[1, 2, 10], [20, 30, 40]])\n', (9241, 9269), True, 'import numpy as np\n'), ((9292, 9324), 'PCAfold.preprocess.get_centroids', 'preprocess.get_centroids', (['x', 'idx'], {}), '(x, idx)\n', (9316, 9324), False, 'from PCAfold import preprocess\n'), ((9675, 9707), 'PCAfold.preprocess.get_centroids', 'preprocess.get_centroids', (['X', 'idx'], {}), '(X, idx)\n', (9699, 9707), False, 'from PCAfold import preprocess\n'), ((9844, 9876), 'PCAfold.preprocess.get_centroids', 'preprocess.get_centroids', (['X', 'idx'], {}), '(X, idx)\n', (9868, 9876), False, 'from PCAfold import preprocess\n'), ((9956, 10002), 'numpy.array', 'np.array', (['[[1, 2, 10], [1, 2, 10], [1, 2, 10]]'], {}), '([[1, 2, 10], [1, 2, 10], [1, 2, 10]])\n', (9964, 10002), True, 'import numpy as np\n'), ((10013, 10032), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (10021, 10032), True, 'import numpy as np\n'), ((10205, 10237), 'PCAfold.preprocess.get_partition', 'preprocess.get_partition', (['x', 'idx'], {}), '(x, idx)\n', (10229, 10237), False, 'from PCAfold import preprocess\n'), ((10569, 10617), 'numpy.array', 'np.array', (['[[1, 2, 10], [1, 2, 10], [30, 40, 50]]'], {}), '([[1, 2, 10], [1, 2, 10], [30, 40, 50]])\n', (10577, 10617), True, 'import numpy as np\n'), ((10628, 10647), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (10636, 10647), True, 'import numpy as np\n'), ((10848, 10880), 'PCAfold.preprocess.get_partition', 'preprocess.get_partition', (['x', 'idx'], {}), '(x, idx)\n', (10872, 10880), False, 'from PCAfold import preprocess\n'), ((11632, 11664), 'PCAfold.preprocess.get_partition', 'preprocess.get_partition', (['X', 'idx'], {}), '(X, idx)\n', (11656, 11664), False, 'from PCAfold import preprocess\n'), ((11824, 11856), 'PCAfold.preprocess.get_partition', 'preprocess.get_partition', (['X', 'idx'], {}), '(X, idx)\n', (11848, 11856), False, 'from PCAfold import preprocess\n'), ((11975, 12020), 'PCAfold.preprocess.variable_bins', 'preprocess.variable_bins', (['x', '(4)'], {'verbose': '(False)'}), '(x, 4, verbose=False)\n', (11999, 12020), False, 'from PCAfold import preprocess\n'), ((12094, 12125), 'PCAfold.preprocess.get_populations', 'preprocess.get_populations', (['idx'], {}), '(idx)\n', (12120, 12125), False, 'from PCAfold import preprocess\n'), ((12279, 12324), 'PCAfold.preprocess.variable_bins', 'preprocess.variable_bins', (['x', '(5)'], {'verbose': '(False)'}), '(x, 5, verbose=False)\n', (12303, 12324), False, 'from PCAfold import preprocess\n'), ((12402, 12433), 'PCAfold.preprocess.get_populations', 'preprocess.get_populations', (['idx'], {}), '(idx)\n', (12428, 12433), False, 'from PCAfold import preprocess\n'), ((12587, 12632), 'PCAfold.preprocess.variable_bins', 'preprocess.variable_bins', (['x', '(2)'], {'verbose': '(False)'}), '(x, 2, verbose=False)\n', (12611, 12632), False, 'from PCAfold import preprocess\n'), ((12698, 12729), 'PCAfold.preprocess.get_populations', 'preprocess.get_populations', (['idx'], {}), '(idx)\n', (12724, 12729), False, 'from PCAfold import preprocess\n'), ((12883, 12928), 'PCAfold.preprocess.variable_bins', 'preprocess.variable_bins', (['x', '(1)'], {'verbose': '(False)'}), '(x, 1, verbose=False)\n', (12907, 12928), False, 'from PCAfold import preprocess\n'), ((12991, 13022), 'PCAfold.preprocess.get_populations', 'preprocess.get_populations', (['idx'], {}), '(idx)\n', (13017, 13022), False, 'from PCAfold import preprocess\n'), ((405, 446), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (413, 446), True, 'import numpy as np\n'), ((785, 826), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (793, 826), True, 'import numpy as np\n'), ((934, 975), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (942, 975), True, 'import numpy as np\n'), ((1084, 1125), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (1092, 1125), True, 'import numpy as np\n'), ((1229, 1270), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (1237, 1270), True, 'import numpy as np\n'), ((1419, 1460), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (1427, 1460), True, 'import numpy as np\n'), ((1830, 1871), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (1838, 1871), True, 'import numpy as np\n'), ((1996, 2037), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (2004, 2037), True, 'import numpy as np\n'), ((2161, 2202), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (2169, 2202), True, 'import numpy as np\n'), ((2322, 2363), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (2330, 2363), True, 'import numpy as np\n'), ((2511, 2556), 'numpy.array', 'np.array', (['[0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]'], {}), '([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1])\n', (2519, 2556), True, 'import numpy as np\n'), ((2716, 2761), 'numpy.array', 'np.array', (['[0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]'], {}), '([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1])\n', (2724, 2761), True, 'import numpy as np\n'), ((3106, 3151), 'numpy.array', 'np.array', (['[0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]'], {}), '([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1])\n', (3114, 3151), True, 'import numpy as np\n'), ((3257, 3302), 'numpy.array', 'np.array', (['[0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]'], {}), '([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1])\n', (3265, 3302), True, 'import numpy as np\n'), ((3409, 3454), 'numpy.array', 'np.array', (['[0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]'], {}), '([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1])\n', (3417, 3454), True, 'import numpy as np\n'), ((3571, 3616), 'numpy.array', 'np.array', (['[0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1]'], {}), '([0.1, 0.15, 0.2, 0.25, 0.6, 0.8, 1])\n', (3579, 3616), True, 'import numpy as np\n'), ((3766, 3827), 'numpy.array', 'np.array', (['[-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400]'], {}), '([-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400])\n', (3774, 3827), True, 'import numpy as np\n'), ((4206, 4267), 'numpy.array', 'np.array', (['[-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400]'], {}), '([-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400])\n', (4214, 4267), True, 'import numpy as np\n'), ((4406, 4467), 'numpy.array', 'np.array', (['[-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400]'], {}), '([-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400])\n', (4414, 4467), True, 'import numpy as np\n'), ((4607, 4668), 'numpy.array', 'np.array', (['[-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400]'], {}), '([-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400])\n', (4615, 4668), True, 'import numpy as np\n'), ((4803, 4864), 'numpy.array', 'np.array', (['[-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400]'], {}), '([-100, -20, -0.1, 0, 0.1, 1, 10, 20, 200, 300, 400])\n', (4811, 4864), True, 'import numpy as np\n'), ((10064, 10110), 'numpy.array', 'np.array', (['[[1, 2, 10], [1, 2, 10], [1, 2, 10]]'], {}), '([[1, 2, 10], [1, 2, 10], [1, 2, 10]])\n', (10072, 10110), True, 'import numpy as np\n'), ((10139, 10158), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (10147, 10158), True, 'import numpy as np\n'), ((10679, 10713), 'numpy.array', 'np.array', (['[[1, 2, 10], [1, 2, 10]]'], {}), '([[1, 2, 10], [1, 2, 10]])\n', (10687, 10713), True, 'import numpy as np\n'), ((10710, 10734), 'numpy.array', 'np.array', (['[[30, 40, 50]]'], {}), '([[30, 40, 50]])\n', (10718, 10734), True, 'import numpy as np\n'), ((10769, 10785), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (10777, 10785), True, 'import numpy as np\n'), ((10786, 10799), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (10794, 10799), True, 'import numpy as np\n'), ((13245, 13258), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (13253, 13258), True, 'import numpy as np\n'), ((5366, 5377), 'numpy.min', 'np.min', (['idx'], {}), '(idx)\n', (5372, 5377), True, 'import numpy as np\n'), ((5758, 5769), 'numpy.min', 'np.min', (['idx'], {}), '(idx)\n', (5764, 5769), True, 'import numpy as np\n'), ((6162, 6173), 'numpy.min', 'np.min', (['idx'], {}), '(idx)\n', (6168, 6173), True, 'import numpy as np\n'), ((6489, 6500), 'numpy.min', 'np.min', (['idx'], {}), '(idx)\n', (6495, 6500), True, 'import numpy as np\n')] |
#!/usr/local/bin/env python
import json
import os
import numpy as np
from PIL import Image
# Read input data from JSON
fn = "../../../versign-core/src/app/register_request.json"
fo = open(fn, "r")
payload = json.loads(fo.read())
fo.close()
os.remove(fn)
# Get customer ID
user = payload['customerId']
print('Customer:', user)
# Extract all four reference signatures
refSigns = [payload['refSignA'], payload['refSignB'], payload['refSignC'], payload['refSignD']]
signImages = []
index = 0
for refSign in refSigns:
pixelData = np.array(refSign['pixelData']).astype('uint8')
width = refSign['width']
height = refSign['height']
a = np.reshape(pixelData, (height, width))
Image.fromarray(a).save("../../../versign-core/src/app/register_" + user + "_" + str(index) + ".png")
index += 1
| [
"numpy.array",
"PIL.Image.fromarray",
"numpy.reshape",
"os.remove"
] | [((242, 255), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (251, 255), False, 'import os\n'), ((649, 687), 'numpy.reshape', 'np.reshape', (['pixelData', '(height, width)'], {}), '(pixelData, (height, width))\n', (659, 687), True, 'import numpy as np\n'), ((533, 563), 'numpy.array', 'np.array', (["refSign['pixelData']"], {}), "(refSign['pixelData'])\n", (541, 563), True, 'import numpy as np\n'), ((693, 711), 'PIL.Image.fromarray', 'Image.fromarray', (['a'], {}), '(a)\n', (708, 711), False, 'from PIL import Image\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define some basic robot costs used in reinforcement learning and optimization.
Dependencies:
- `pyrobolearn.states`
- `pyrobolearn.actions`
"""
from abc import ABCMeta
import numpy as np
import pyrobolearn as prl
from pyrobolearn.robots.robot import Robot
from pyrobolearn.rewards.cost import Cost
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, PyRoboLearn"
__credits__ = ["<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class RobotCost(Cost):
r"""Robot reward (abstract).
Abstract reward class that accepts as input the state and/or action which must depends on a robotic platform.
"""
__metaclass__ = ABCMeta
def __init__(self, state=None, update_state=False):
"""
Initialize the Robot reward.
Args:
state (State, Robot): robot state.
update_state (bool): if we should call the state and update its value.
"""
if isinstance(state, prl.states.State):
super(RobotCost, self).__init__(state)
else:
super(RobotCost, self).__init__()
self.update_state = update_state
@staticmethod
def _check_state(state, cls, update_state=False, **kwargs):
"""
Check that the given state is an instance of the given class. If not, check if it can be constructed.
Args:
state (Robot, State, list/tuple[State]): the state or robot instance that we have to check.
cls (State class): the state class that the state should belong to.
update_state (bool): if the state should be updated or not by default.
**kwargs (dict): dictionary of arguments passed to the `cls` class if the state is a `Robot` instance.
Returns:
State: an instance of the specified class `cls`.
bool: if the state should be updated or not.
"""
# check given state
if isinstance(state, Robot): # if robot, instantiate state class with robot as param.
state = cls(robot=state, **kwargs)
update_state = True
if not isinstance(state, cls): # if not an instance of the given state class, look for it (the first instance)
if isinstance(state, prl.states.State):
state = state.lookfor(cls)
elif isinstance(state, (tuple, list)):
for s in state:
if isinstance(s, cls):
state = s
elif isinstance(s, prl.states.State):
state = s.lookfor(cls)
if state is not None:
break
else:
raise TypeError("Expecting the given 'state' to be an instance of `Robot`, `{}`, `State` or a list of "
"`State`, but instead got: {}".format(cls.__name__, type(state)))
if state is None:
raise ValueError("Couldn't find the specified state class `{}` in the given "
"state.".format(cls.__name__))
return state, update_state
@staticmethod
def normalize(x):
"""
Normalize the given vector.
"""
if np.allclose(x, 0):
return x
return x / np.linalg.norm(x)
class DriftCost(RobotCost):
"""Drift cost.
Calculates the drift of a moving robot wrt a specified direction.
"""
def __init__(self, state, direction=(1, 0, 0), normalize=False, update_state=False): # TODO: use direction.
"""
Initialize the drift cost.
Args:
state (BasePositionState, Robot): robot or base position state.
direction (np.array[float[3]], None): forward direction vector. If None, it will take the initial forward
vector.
normalize (bool): if we should normalize the direction vector.
update_state (bool): if we should call the state and update its value.
"""
super(DriftCost, self).__init__(state=state, update_state=update_state)
# check given base position state
self.state, self.update_state = self._check_state(state, prl.states.BasePositionState,
update_state=self.update_state)
# if no direction specified, take the body forward vector
if direction is None:
self.direction = self.state.body.forward_vector
else:
self.direction = np.array(direction)
# normalize the direction vector if specified
if normalize:
self.direction = self.normalize(self.direction)
# remember current position
self.prev_pos = np.copy(self.state.data[0])
self.value = 0
def _compute(self):
"""Compute the difference vector between the current and previous position (i.e. ~ velocity vector), and
compute the dot product between this velocity vector and the direction vector."""
if self.update_state:
self.state()
curr_pos = self.state.data[0]
velocity = curr_pos - self.prev_pos
self.value = -np.abs(velocity[1])
self.prev_pos = np.copy(curr_pos)
return self.value
class ShakeCost(RobotCost):
"""Shake cost.
Calculates the shaking cost of a moving robot wrt a specified direction.
"""
def __init__(self, state, direction=(1, 0, 0), normalize=False, update_state=False):
"""
Initialize the shake cost.
Args:
state (BasePositionState, Robot): robot or base position state.
direction (np.array[float[3]], None): forward direction vector. If None, it will take the initial forward
vector.
normalize (bool): if we should normalize the direction vector.
update_state (bool): if we should call the state and update its value.
"""
super(ShakeCost, self).__init__(state=state, update_state=update_state)
# check given base position state
self.state, self.update_state = self._check_state(state, prl.states.BasePositionState,
update_state=self.update_state)
# if no direction specified, take the body forward vector
if direction is None:
self.direction = self.state.body.forward_vector
else:
self.direction = np.array(direction)
# normalize the direction vector if specified
if normalize:
self.direction = self.normalize(self.direction)
# remember current position
self.prev_pos = np.copy(self.state.data[0])
self.value = 0
def _compute(self):
"""Compute the difference vector between the current and previous position (i.e. ~ velocity vector), and
compute the dot product between this velocity vector and the direction vector."""
if self.update_state:
self.state()
curr_pos = self.state.data[0]
velocity = curr_pos - self.prev_pos
self.value = -np.abs(velocity[2])
self.prev_pos = np.copy(curr_pos)
return self.value
| [
"numpy.copy",
"numpy.abs",
"numpy.allclose",
"numpy.array",
"numpy.linalg.norm"
] | [((3318, 3335), 'numpy.allclose', 'np.allclose', (['x', '(0)'], {}), '(x, 0)\n', (3329, 3335), True, 'import numpy as np\n'), ((4811, 4838), 'numpy.copy', 'np.copy', (['self.state.data[0]'], {}), '(self.state.data[0])\n', (4818, 4838), True, 'import numpy as np\n'), ((5293, 5310), 'numpy.copy', 'np.copy', (['curr_pos'], {}), '(curr_pos)\n', (5300, 5310), True, 'import numpy as np\n'), ((6736, 6763), 'numpy.copy', 'np.copy', (['self.state.data[0]'], {}), '(self.state.data[0])\n', (6743, 6763), True, 'import numpy as np\n'), ((7218, 7235), 'numpy.copy', 'np.copy', (['curr_pos'], {}), '(curr_pos)\n', (7225, 7235), True, 'import numpy as np\n'), ((3377, 3394), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (3391, 3394), True, 'import numpy as np\n'), ((4593, 4612), 'numpy.array', 'np.array', (['direction'], {}), '(direction)\n', (4601, 4612), True, 'import numpy as np\n'), ((5249, 5268), 'numpy.abs', 'np.abs', (['velocity[1]'], {}), '(velocity[1])\n', (5255, 5268), True, 'import numpy as np\n'), ((6518, 6537), 'numpy.array', 'np.array', (['direction'], {}), '(direction)\n', (6526, 6537), True, 'import numpy as np\n'), ((7174, 7193), 'numpy.abs', 'np.abs', (['velocity[2]'], {}), '(velocity[2])\n', (7180, 7193), True, 'import numpy as np\n')] |
import logging
import os
import shutil
import tempfile
from pythonrouge.pythonrouge import Pythonrouge
from dotenv import load_dotenv
from sacred.observers import MongoObserver
import torch
import numpy as np
load_dotenv()
SAVE_FILES = os.getenv("SACRED_SAVE_FILES", "false").lower() == "true"
def setup_mongo_observer(ex):
mongo_url = os.getenv("SACRED_MONGO_URL")
db_name = os.getenv("SACRED_DB_NAME")
if mongo_url is not None and db_name is not None:
ex.observers.append(MongoObserver.create(url=mongo_url, db_name=db_name))
def extract_preds(outputs):
for pred, doc_lens in outputs:
start = 0
for doc_len in doc_lens:
end = start + doc_len
yield pred[start:end]
start += doc_len
def eval_summaries(
summaries, docs, logger=None, topk=3, encoding="utf-8", delete_temps=True, log=True
):
if logger is None:
logger = logging.getLogger(__name__)
references = []
hypotheses = []
for i, (summary, doc) in enumerate(zip(summaries, docs)):
# if doc.id_ != "1503203400-mainan-terlarang-bagi-pangeran-george-dan-putri-ch":
# continue
if log:
ext_index = [i for i, sent in enumerate(doc.sentences) if sent.label][:topk]
hyp_index = [i.item() for i in torch.sort(summary[:len(doc.sentences)].topk(topk)[1])[0]]
logger.info(f"Generating summary for doc {i} {ext_index} {hyp_index}")
if np.in1d(hyp_index, ext_index).sum() >= 3:
logger.info(f"Bagus: {i}")
topk = min(topk, len(summary))
refs = [[" ".join(sent) for sent in doc.summary]]
hyp = [
" ".join(doc.sentences[idx].words)
for idx in torch.sort(summary[:len(doc.sentences)].topk(topk)[1])[0]
]
references.append(refs)
hypotheses.append(hyp)
assert len(references) == len(
hypotheses
), "Number of references and hypotheses mismatch"
ref_dirname = tempfile.mkdtemp()
hyp_dirname = tempfile.mkdtemp()
if log:
logger.info("References directory: %s", ref_dirname)
logger.info("Hypotheses directory: %s", hyp_dirname)
for doc_id, (refs, hyp) in enumerate(zip(references, hypotheses)):
# Write references
for rid, ref in enumerate(refs):
ref_filename = os.path.join(ref_dirname, f"{doc_id}.{rid}.txt")
write_to_file(ref_filename, encoding, ref)
# Write hypothesis
hyp_filename = os.path.join(hyp_dirname, f"{doc_id}.txt")
write_to_file(hyp_filename, encoding, hyp)
rouge = Pythonrouge(
peer_path=hyp_dirname,
model_path=ref_dirname,
stemming=False,
ROUGE_L=True,
ROUGE_SU4=False,
)
score = rouge.calc_score()
if log:
logger.info("ROUGE scores: %s", score)
if delete_temps:
if log:
logger.info("Deleting temporary files and directories")
shutil.rmtree(ref_dirname)
shutil.rmtree(hyp_dirname)
return score
def write_to_file(filename, encoding, data):
with open(filename, "w", encoding=encoding) as f:
print("\n".join(data), file=f)
| [
"logging.getLogger",
"pythonrouge.pythonrouge.Pythonrouge",
"os.getenv",
"numpy.in1d",
"os.path.join",
"dotenv.load_dotenv",
"tempfile.mkdtemp",
"shutil.rmtree",
"sacred.observers.MongoObserver.create"
] | [((210, 223), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (221, 223), False, 'from dotenv import load_dotenv\n'), ((344, 373), 'os.getenv', 'os.getenv', (['"""SACRED_MONGO_URL"""'], {}), "('SACRED_MONGO_URL')\n", (353, 373), False, 'import os\n'), ((388, 415), 'os.getenv', 'os.getenv', (['"""SACRED_DB_NAME"""'], {}), "('SACRED_DB_NAME')\n", (397, 415), False, 'import os\n'), ((1992, 2010), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2008, 2010), False, 'import tempfile\n'), ((2029, 2047), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2045, 2047), False, 'import tempfile\n'), ((2612, 2721), 'pythonrouge.pythonrouge.Pythonrouge', 'Pythonrouge', ([], {'peer_path': 'hyp_dirname', 'model_path': 'ref_dirname', 'stemming': '(False)', 'ROUGE_L': '(True)', 'ROUGE_SU4': '(False)'}), '(peer_path=hyp_dirname, model_path=ref_dirname, stemming=False,\n ROUGE_L=True, ROUGE_SU4=False)\n', (2623, 2721), False, 'from pythonrouge.pythonrouge import Pythonrouge\n'), ((916, 943), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (933, 943), False, 'import logging\n'), ((2505, 2547), 'os.path.join', 'os.path.join', (['hyp_dirname', 'f"""{doc_id}.txt"""'], {}), "(hyp_dirname, f'{doc_id}.txt')\n", (2517, 2547), False, 'import os\n'), ((2970, 2996), 'shutil.rmtree', 'shutil.rmtree', (['ref_dirname'], {}), '(ref_dirname)\n', (2983, 2996), False, 'import shutil\n'), ((3005, 3031), 'shutil.rmtree', 'shutil.rmtree', (['hyp_dirname'], {}), '(hyp_dirname)\n', (3018, 3031), False, 'import shutil\n'), ((238, 277), 'os.getenv', 'os.getenv', (['"""SACRED_SAVE_FILES"""', '"""false"""'], {}), "('SACRED_SAVE_FILES', 'false')\n", (247, 277), False, 'import os\n'), ((498, 550), 'sacred.observers.MongoObserver.create', 'MongoObserver.create', ([], {'url': 'mongo_url', 'db_name': 'db_name'}), '(url=mongo_url, db_name=db_name)\n', (518, 550), False, 'from sacred.observers import MongoObserver\n'), ((2350, 2398), 'os.path.join', 'os.path.join', (['ref_dirname', 'f"""{doc_id}.{rid}.txt"""'], {}), "(ref_dirname, f'{doc_id}.{rid}.txt')\n", (2362, 2398), False, 'import os\n'), ((1465, 1494), 'numpy.in1d', 'np.in1d', (['hyp_index', 'ext_index'], {}), '(hyp_index, ext_index)\n', (1472, 1494), True, 'import numpy as np\n')] |
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
import torch.nn.functional as F
import numpy as np
from dataset.data_loader_kitti_reimpl import KITTIReader_traj
from models.vgg_warper_weak_shortcut_nobn import VGG_Warper
from utils.visual import colorcode, VisdomShow, pbar
from ops.flow_warper_pad_2x import FlowWarp
from ops.hardshinkloss import HardshinkLoss
from ops.laplace2d import Laplace2D
from skimage.measure import compare_ssim as ssim
from skimage.measure import compare_psnr as psnr
from skimage.measure import compare_mse as mse
args = {}
args['gpus'] = [0]
args['seed'] = 12345
torch.backends.cudnn.benchmark = True
# Initialize Pytorch Dataloader
datareader = KITTIReader_traj(is_test=True, max_interval=10, min_ntraj=10, max_ntraj=10, is_eval=True)
train_loader = torch.utils.data.DataLoader(
datareader, batch_size=4, shuffle=False, collate_fn=datareader.collate_fn, worker_init_fn=datareader.worker_init_fn, num_workers=4, pin_memory=True, drop_last = True)
class MModel(nn.Module):
def __init__(self):
super(MModel, self).__init__()
self.warp_cnn = VGG_Warper(9)
self.flow_warper = FlowWarp()
self.mseloss = nn.MSELoss(size_average=True, reduce=True)
self.hardshrinkloss = HardshinkLoss(0., 1.)
def forward(self, img_input, warp_input, img_gt):
warp_flow, masks, comp_imgs = self.warp_cnn(warp_input) # W*H*2
warp_imgs = self.flow_warper(img_input, warp_flow, padl=83)
comp_imgs = F.hardtanh(comp_imgs,0.,1.)
masks = F.sigmoid(masks)
recon_img = torch.mul(warp_imgs, masks)+torch.mul(comp_imgs,1-masks)
return recon_img, warp_flow, comp_imgs, masks
mmodel = MModel()
mmodel.cuda()
mmodel = nn.DataParallel(mmodel, device_ids=[0])
visual = VisdomShow('kitti_eval_10')
def test():
print('\n\n=========================== Testing ============================')
mmodel.eval()
mse_stor = []
ssim_stor = []
for batch_idx, (img_input, warp_input, img_gt, vid_mask, img_input_2x) in enumerate(train_loader):
img_input = Variable(img_input, volatile=True).cuda(args['gpus'][0])
img_input_2x = Variable(img_input_2x).cuda(args['gpus'][0])
warp_input = Variable(warp_input, volatile=True).cuda(args['gpus'][0])
img_gt = Variable(img_gt, volatile=True).cuda(args['gpus'][0])
vid_mask = Variable(vid_mask, volatile=True).cuda(args['gpus'][0])
# warp_input : [interval-1, 9, H, W]
# print(warp_input.shape) # ([1, 9, 9, 192, 256])
recon_img, warp_flow, comp_imgs, masks = mmodel(img_input_2x, warp_input, img_gt)
recon_img *= vid_mask
img_gt *= vid_mask
gen_seq = recon_img.data.cpu().numpy()
gt_seq = img_gt.data.cpu().numpy()
mses = np.zeros(gen_seq.shape[0])
ssims = np.zeros(gen_seq.shape[0])
for i in range(gen_seq.shape[0]):
gen = np.transpose(gen_seq[i,:,:,:], [1,2,0])
gt = np.transpose(gt_seq[i,:,:,:], [1,2,0])
mses[i] = mse(gen,gt)
ssims[i] = ssim(gt, gen, data_range=1., multichannel=True)
mse_stor.append(mses.reshape([-1,9]))
ssim_stor.append(ssims.reshape([-1,9]))
if batch_idx%1 == 0:
pbar(batch_idx, len(train_loader), 0)
if batch_idx%10 == 0:
mse_a = np.concatenate(mse_stor, axis=0)
ssim_a = np.concatenate(ssim_stor, axis=0)
psnr_all = -10*np.log(np.mean(mse_a, axis=0))/np.log(10)
ssim_all = np.mean(ssim_a, axis=0)
print('PSNR')
print(psnr_all)
print('SSIM')
print(ssim_all)
if batch_idx%10 == 0:
out_seq = torch.cat((img_input[(0,),:,:,:],recon_img), dim=0).data.cpu().numpy()
for i in range(out_seq.shape[0]):
out_seq[i,:,:,:] = visual.add_text(out_seq[i,:,:,:], str(i), (0,1,1))
out_gt = torch.cat((img_input[(0,),:,:,:],img_gt), dim=0).data.cpu().numpy()
for i in range(out_gt.shape[0]):
out_gt[i,:,:,:] = visual.add_text(out_gt[i,:,:,:], 'GT', (0,1,0))
out_seq = np.concatenate((out_seq,out_gt), axis=3)
visual.show_vid(out_seq)
mse_a = np.concatenate(mse_stor, axis=0)
ssim_a = np.concatenate(ssim_stor, axis=0)
psnr_all = -10*np.log(np.mean(mse_a, axis=0))/np.log(10)
ssim_all = np.mean(ssim_a, axis=0)
print('\nPSNR SSIM')
for i in range(psnr_all.size):
print('{} {}'.format(psnr_all[i], ssim_all[i]))
def restore(ckpt_file):
ckpt = torch.load(ckpt_file)
mmodel.module.load_state_dict(ckpt['mmodel_state_dict'])
#optimizer.load_state_dict(ckpt['optimizer'])
#hist = ckpt['hist']
print('Restored from {}'.format(ckpt_file))
restore('./snapshots/kitti/ckpt_e0_b0_rev2.pth')
test()
| [
"torch.mul",
"numpy.log",
"torch.nn.functional.sigmoid",
"torch.nn.MSELoss",
"utils.visual.VisdomShow",
"ops.flow_warper_pad_2x.FlowWarp",
"numpy.mean",
"numpy.concatenate",
"torch.autograd.Variable",
"dataset.data_loader_kitti_reimpl.KITTIReader_traj",
"models.vgg_warper_weak_shortcut_nobn.VGG_... | [((807, 900), 'dataset.data_loader_kitti_reimpl.KITTIReader_traj', 'KITTIReader_traj', ([], {'is_test': '(True)', 'max_interval': '(10)', 'min_ntraj': '(10)', 'max_ntraj': '(10)', 'is_eval': '(True)'}), '(is_test=True, max_interval=10, min_ntraj=10, max_ntraj=10,\n is_eval=True)\n', (823, 900), False, 'from dataset.data_loader_kitti_reimpl import KITTIReader_traj\n'), ((912, 1113), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['datareader'], {'batch_size': '(4)', 'shuffle': '(False)', 'collate_fn': 'datareader.collate_fn', 'worker_init_fn': 'datareader.worker_init_fn', 'num_workers': '(4)', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(datareader, batch_size=4, shuffle=False,\n collate_fn=datareader.collate_fn, worker_init_fn=datareader.\n worker_init_fn, num_workers=4, pin_memory=True, drop_last=True)\n', (939, 1113), False, 'import torch\n'), ((1853, 1892), 'torch.nn.DataParallel', 'nn.DataParallel', (['mmodel'], {'device_ids': '[0]'}), '(mmodel, device_ids=[0])\n', (1868, 1892), True, 'import torch.nn as nn\n'), ((1903, 1930), 'utils.visual.VisdomShow', 'VisdomShow', (['"""kitti_eval_10"""'], {}), "('kitti_eval_10')\n", (1913, 1930), False, 'from utils.visual import colorcode, VisdomShow, pbar\n'), ((4449, 4481), 'numpy.concatenate', 'np.concatenate', (['mse_stor'], {'axis': '(0)'}), '(mse_stor, axis=0)\n', (4463, 4481), True, 'import numpy as np\n'), ((4495, 4528), 'numpy.concatenate', 'np.concatenate', (['ssim_stor'], {'axis': '(0)'}), '(ssim_stor, axis=0)\n', (4509, 4528), True, 'import numpy as np\n'), ((4605, 4628), 'numpy.mean', 'np.mean', (['ssim_a'], {'axis': '(0)'}), '(ssim_a, axis=0)\n', (4612, 4628), True, 'import numpy as np\n'), ((4805, 4826), 'torch.load', 'torch.load', (['ckpt_file'], {}), '(ckpt_file)\n', (4815, 4826), False, 'import torch\n'), ((1229, 1242), 'models.vgg_warper_weak_shortcut_nobn.VGG_Warper', 'VGG_Warper', (['(9)'], {}), '(9)\n', (1239, 1242), False, 'from models.vgg_warper_weak_shortcut_nobn import VGG_Warper\n'), ((1270, 1280), 'ops.flow_warper_pad_2x.FlowWarp', 'FlowWarp', ([], {}), '()\n', (1278, 1280), False, 'from ops.flow_warper_pad_2x import FlowWarp\n'), ((1304, 1346), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'size_average': '(True)', 'reduce': '(True)'}), '(size_average=True, reduce=True)\n', (1314, 1346), True, 'import torch.nn as nn\n'), ((1377, 1400), 'ops.hardshinkloss.HardshinkLoss', 'HardshinkLoss', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1390, 1400), False, 'from ops.hardshinkloss import HardshinkLoss\n'), ((1618, 1649), 'torch.nn.functional.hardtanh', 'F.hardtanh', (['comp_imgs', '(0.0)', '(1.0)'], {}), '(comp_imgs, 0.0, 1.0)\n', (1628, 1649), True, 'import torch.nn.functional as F\n'), ((1662, 1678), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['masks'], {}), '(masks)\n', (1671, 1678), True, 'import torch.nn.functional as F\n'), ((2920, 2946), 'numpy.zeros', 'np.zeros', (['gen_seq.shape[0]'], {}), '(gen_seq.shape[0])\n', (2928, 2946), True, 'import numpy as np\n'), ((2963, 2989), 'numpy.zeros', 'np.zeros', (['gen_seq.shape[0]'], {}), '(gen_seq.shape[0])\n', (2971, 2989), True, 'import numpy as np\n'), ((4579, 4589), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (4585, 4589), True, 'import numpy as np\n'), ((1699, 1726), 'torch.mul', 'torch.mul', (['warp_imgs', 'masks'], {}), '(warp_imgs, masks)\n', (1708, 1726), False, 'import torch\n'), ((1727, 1758), 'torch.mul', 'torch.mul', (['comp_imgs', '(1 - masks)'], {}), '(comp_imgs, 1 - masks)\n', (1736, 1758), False, 'import torch\n'), ((3050, 3094), 'numpy.transpose', 'np.transpose', (['gen_seq[i, :, :, :]', '[1, 2, 0]'], {}), '(gen_seq[i, :, :, :], [1, 2, 0])\n', (3062, 3094), True, 'import numpy as np\n'), ((3107, 3150), 'numpy.transpose', 'np.transpose', (['gt_seq[i, :, :, :]', '[1, 2, 0]'], {}), '(gt_seq[i, :, :, :], [1, 2, 0])\n', (3119, 3150), True, 'import numpy as np\n'), ((3168, 3180), 'skimage.measure.compare_mse', 'mse', (['gen', 'gt'], {}), '(gen, gt)\n', (3171, 3180), True, 'from skimage.measure import compare_mse as mse\n'), ((3203, 3251), 'skimage.measure.compare_ssim', 'ssim', (['gt', 'gen'], {'data_range': '(1.0)', 'multichannel': '(True)'}), '(gt, gen, data_range=1.0, multichannel=True)\n', (3207, 3251), True, 'from skimage.measure import compare_ssim as ssim\n'), ((3510, 3542), 'numpy.concatenate', 'np.concatenate', (['mse_stor'], {'axis': '(0)'}), '(mse_stor, axis=0)\n', (3524, 3542), True, 'import numpy as np\n'), ((3564, 3597), 'numpy.concatenate', 'np.concatenate', (['ssim_stor'], {'axis': '(0)'}), '(ssim_stor, axis=0)\n', (3578, 3597), True, 'import numpy as np\n'), ((3690, 3713), 'numpy.mean', 'np.mean', (['ssim_a'], {'axis': '(0)'}), '(ssim_a, axis=0)\n', (3697, 3713), True, 'import numpy as np\n'), ((4354, 4395), 'numpy.concatenate', 'np.concatenate', (['(out_seq, out_gt)'], {'axis': '(3)'}), '((out_seq, out_gt), axis=3)\n', (4368, 4395), True, 'import numpy as np\n'), ((2204, 2238), 'torch.autograd.Variable', 'Variable', (['img_input'], {'volatile': '(True)'}), '(img_input, volatile=True)\n', (2212, 2238), False, 'from torch.autograd import Variable\n'), ((2284, 2306), 'torch.autograd.Variable', 'Variable', (['img_input_2x'], {}), '(img_input_2x)\n', (2292, 2306), False, 'from torch.autograd import Variable\n'), ((2350, 2385), 'torch.autograd.Variable', 'Variable', (['warp_input'], {'volatile': '(True)'}), '(warp_input, volatile=True)\n', (2358, 2385), False, 'from torch.autograd import Variable\n'), ((2425, 2456), 'torch.autograd.Variable', 'Variable', (['img_gt'], {'volatile': '(True)'}), '(img_gt, volatile=True)\n', (2433, 2456), False, 'from torch.autograd import Variable\n'), ((2498, 2531), 'torch.autograd.Variable', 'Variable', (['vid_mask'], {'volatile': '(True)'}), '(vid_mask, volatile=True)\n', (2506, 2531), False, 'from torch.autograd import Variable\n'), ((3656, 3666), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (3662, 3666), True, 'import numpy as np\n'), ((4555, 4577), 'numpy.mean', 'np.mean', (['mse_a'], {'axis': '(0)'}), '(mse_a, axis=0)\n', (4562, 4577), True, 'import numpy as np\n'), ((3632, 3654), 'numpy.mean', 'np.mean', (['mse_a'], {'axis': '(0)'}), '(mse_a, axis=0)\n', (3639, 3654), True, 'import numpy as np\n'), ((3896, 3951), 'torch.cat', 'torch.cat', (['(img_input[(0,), :, :, :], recon_img)'], {'dim': '(0)'}), '((img_input[(0,), :, :, :], recon_img), dim=0)\n', (3905, 3951), False, 'import torch\n'), ((4120, 4172), 'torch.cat', 'torch.cat', (['(img_input[(0,), :, :, :], img_gt)'], {'dim': '(0)'}), '((img_input[(0,), :, :, :], img_gt), dim=0)\n', (4129, 4172), False, 'import torch\n')] |
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap, Normalize
import matplotlib.pylab as pylab
import numpy as np
import sys
np.random.seed(0)
class Plot:
"""
Plot procrystalline lattice.
"""
def __init__(self,nodes=False,cnxs=False,rings=False,periodic=False,dual=False,envs=False):
"""
Initialise with plot options.
"""
# Get options
self.nodes = nodes
self.cnxs = cnxs
self.rings = rings
self.periodic = periodic
self.dual = dual
self.envs = envs
# Set up empty figure
params = {"figure.figsize": (6, 6)}
pylab.rcParams.update(params)
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.set_axis_off()
def __call__(self,prefix,sample,**kwargs):
"""
Plot procrystalline lattice.
"""
# Images to generate based on periodicity
if self.periodic:
images = [-1,0,1]
else:
images = [0]
# Unpack options
self.lw = kwargs.get("lw",1.0)
self.lc = kwargs.get("lc","k")
self.ms = kwargs.get("ms",10)
self.mc = kwargs.get("mc","k")
save = kwargs.get("save",False)
# Load data
self.load_sample(prefix,sample)
# Add images to plot
for y in images:
for x in images:
self.plot_rings(x,y)
self.plot_nodes(x,y)
self.plot_cnxs(x,y)
self.plot_dual(x,y)
self.plot_envs(x,y)
# Display
if save:
plt.savefig("config.png",dpi=300)
plt.savefig("config.pdf")
plt.show()
def load_sample(self,prefix,sample):
"""
Load coordinates and network information.
"""
# Node coordination, periodicity and node crds
data = np.genfromtxt(prefix+"_crds.dat")
self.cnd = data[0,:].astype(int)
self.pbc = data[1,:]
self.mic = self.pbc/2
self.node_crds = data[2:,:]
if self.cnd[1] == 2:
self.mean_ring_size = 0
elif self.cnd[1] == 3:
self.mean_ring_size = 6
elif self.cnd[1] == 4:
self.mean_ring_size = 4
elif self.cnd[1] == 5:
self.mean_ring_size = 3+1/3.0
print(self.mean_ring_size)
# Reciprocal crds
data = np.genfromtxt(prefix+"_rcrds.dat")
self.rec_crds = data[1:,:]
# Node connections and rings
self.node_cnxs = []
self.node_rings = []
self.node_chains = []
self.dual_crds = []
self.dual_cnxs = []
self.dual_ids = {}
with open("{}_sample_{}.dat".format(prefix,sample),"r") as f:
n = int(f.readline())
for i in range(n):
cnxs = f.readline().split()
for j in cnxs:
self.node_cnxs.append([i,j])
self.node_cnxs = np.array(self.node_cnxs,dtype=int)
n = int(f.readline())
for i in range(n):
j = int(f.readline())
self.dual_ids[j] = i
for i in range(n):
cnxs = f.readline().split()
for j in cnxs:
self.dual_cnxs.append([int(i),self.dual_ids[int(j)]])
for i in range(n):
ring = f.readline().split()
self.node_rings.append(np.array(ring,dtype=int))
for i in range(n):
self.dual_crds.append(np.array([float(x) for x in f.readline().split()]))
self.dual_crds = np.array(self.dual_crds)
# n = int(f.readline())
# for i in range(n):
# chain = np.array(f.readline().split(),dtype=int)
# self.node_chains.append(chain)
# if self.envs:
# self.rec_envs = np.zeros(self.rec_crds.shape[0],dtype=int)
# for i in range(self.rec_envs.size):
# self.rec_envs[i] = int(f.readline())
# Make connections accounting for periodicity
self.node_cnx_crds = np.zeros((self.node_cnxs.shape[0],4))
crd_i = np.zeros(2)
crd_j = np.zeros(2)
for i,p in enumerate(self.node_cnxs):
crd_i[:] = self.node_crds[p[0]][:]
crd_j[:] = self.node_crds[p[1]][:]
x = crd_j[0]-crd_i[0]
y = crd_j[1]-crd_i[1]
if x>self.mic[0]: x-=self.pbc[0]
elif x<-self.mic[0]: x+=self.pbc[0]
if y>self.mic[1]: y-=self.pbc[1]
elif y<-self.mic[1]: y+=self.pbc[1]
self.node_cnx_crds[i,0] = crd_i[0]
self.node_cnx_crds[i,1] = crd_i[0]+x/2
self.node_cnx_crds[i,2] = crd_i[1]
self.node_cnx_crds[i,3] = crd_i[1]+y/2
# Make rings accounting for periodicity
self.ring_crds = []
self.max_ring_size = 0
for ring in self.node_rings:
crds = np.zeros((ring.size,2))
for i,j in enumerate(ring):
crds[i,:] = self.node_crds[j,:]
for i in range(1,ring.size):
x = crds[i,0] - crds[i-1,0]
y = crds[i,1] - crds[i-1,1]
if x>self.mic[0]: x -= self.pbc[0]
elif x<-self.mic[0]: x += self.pbc[0]
if y>self.mic[1]: y -= self.pbc[1]
elif y<-self.mic[1]: y += self.pbc[1]
crds[i,0] = crds[i-1,0] + x
crds[i,1] = crds[i-1,1] + y
x_com = np.average(crds[:,0])
y_com = np.average(crds[:,1])
if x_com>self.mic[0]: crds[:,0]-=self.pbc[0]
elif x_com<-self.mic[0]: crds[:,0]+=self.pbc[0]
if y_com>self.mic[1]: crds[:,1]-=self.pbc[1]
elif y_com<-self.mic[1]: crds[:,1]+=self.pbc[1]
self.ring_crds.append(crds)
if ring.size > self.max_ring_size:
self.max_ring_size = ring.size
self.init_ring_colours(self.mean_ring_size,self.max_ring_size)
# Make chain colouring
if len(self.node_chains)>0:
self.chains = True
else:
self.chains = False
self.init_chain_colours()
def plot_nodes(self,x_shift,y_shift):
"""
Plot image nodes as scatter.
"""
if not self.nodes: return # Bounce if option not selected
if not self.chains:
self.ax.scatter(self.node_crds[:,0]+x_shift*self.pbc[0],self.node_crds[:,1]+y_shift*self.pbc[1],
marker="o",s=self.ms,c=self.mc,zorder=1)
else:
for i,chain in enumerate(self.node_chains):
self.ax.scatter(self.node_crds[chain,0]+x_shift*self.pbc[0],self.node_crds[chain,1]+y_shift*self.pbc[1],
marker="o",s=self.ms,color=self.chain_colours[i],edgecolor='k',zorder=1)
# for r in self.ring_crds:
# self.ax.scatter(r[:,0]+x_shift*self.pbc[0],r[:,1]+y_shift*self.pbc[1],
# marker="o",s=self.ms,c=self.mc,zorder=1)
# for i,c in enumerate(self.node_crds):
# self.ax.text(c[0],c[1],i,size=8)
def plot_cnxs(self,x_shift,y_shift):
"""
Plot image connections as lines.
"""
if not self.cnxs: return # Bounce if option not selected
# Find under coordinated
cc_count = {}
for c in self.node_cnx_crds:
cc = [c[1],c[3]]
if cc[0]<0: cc[0] += self.pbc[0]
if cc[1]<0: cc[1] += self.pbc[1]
cc = tuple(cc)
if cc in cc_count:
cc_count[cc] += 1
else:
cc_count[cc] = 1
complete = np.zeros(self.node_cnx_crds[:,0].size,dtype=bool)
for i,c in enumerate(self.node_cnx_crds):
cc = [c[1],c[3]]
if cc[0]<0: cc[0] += self.pbc[0]
if cc[1]<0: cc[1] += self.pbc[1]
cc = tuple(cc)
if cc_count[cc]==2:
complete[i]=True
self.node_cnx_crds[:,:2] += x_shift*self.pbc[0]
self.node_cnx_crds[:,2:] += y_shift*self.pbc[1]
for cnx_crd in self.node_cnx_crds[complete]:
self.ax.plot(cnx_crd[:2],cnx_crd[2:],c=self.lc,lw=self.lw,zorder=-1)
for cnx_crd in self.node_cnx_crds[~complete]:
self.ax.plot(cnx_crd[:2],cnx_crd[2:],c='r',lw=self.lw,zorder=-1)
self.node_cnx_crds[:,:2] -= x_shift*self.pbc[0]
self.node_cnx_crds[:,2:] -= y_shift*self.pbc[1]
def plot_rings(self,x_shift,y_shift):
"""
Plot rings as polygons.
"""
if not self.rings: return # Bounce if option not selected
patches = []
colours = []
for ring in self.ring_crds:
ring[:,0] += x_shift*self.pbc[0]
ring[:,1] += y_shift*self.pbc[1]
# xbox = (ring[:,0]>0)*(ring[:,0]<6) # square
# ybox = (ring[:,1]>0)*(ring[:,1]<6) # square
# xbox = (ring[:,0]>1.35)*(ring[:,0]<7.25) # snub
# ybox = (ring[:,1]>0.97)*(ring[:,1]<7.2) # snub
# xbox = (ring[:,0]>1.35)*(ring[:,0]<6.3) # isosnub
# ybox = (ring[:,1]>0.97)*(ring[:,1]<4.8) # isosnub
# xbox = (ring[:,0]>1.35)*(ring[:,0]<6.71) # tri
# ybox = (ring[:,1]>0.97)*(ring[:,1]<5.5) # tri
# xbox = (ring[:,0]>0.84)*(ring[:,0]<8.4) # trihex
# ybox = (ring[:,1]>0.56)*(ring[:,1]<8.1) # trihex
xbox=np.ones_like(ring[:,0])
ybox=np.ones_like(ring[:,1])
if np.all(xbox*ybox==1):
patches.append(Polygon(np.array(ring), True))
colours.append(self.ring_colours[ring[:,0].size])
# self.ax.scatter(ring[:,0],ring[:,1],c=self.mc,s=self.ms)
ring[:,0]-=x_shift*self.pbc[0]
ring[:,1]-=y_shift*self.pbc[1]
self.ax.add_collection(PatchCollection(patches,facecolor=colours,linewidths=self.lw,edgecolor="k",zorder=0))
def plot_dual(self,x_shift,y_shift):
"""
Plot image ring com as scatter.
"""
if not self.dual: return # Bounce if option not selected
self.ax.scatter(self.dual_crds[:,0]+x_shift*self.pbc[0],self.dual_crds[:,1]+y_shift*self.pbc[1],
marker="s",s=self.ms,zorder=1,color='grey')
# for cnx in self.dual_cnxs:
# print(cnx)
# c0 = self.dual_crds[cnx[0],:]
# c1 = self.dual_crds[cnx[1],:]
# v = c1-c0
# if v[0]>self.mic[0]: v[0]-=self.pbc[0]
# elif v[0]<-self.mic[0]: v[0]+=self.pbc[0]
# if v[1]>self.mic[1]: v[1]-=self.pbc[1]
# elif v[1]<-self.mic[1]: v[1]+=self.pbc[1]
# self.ax.plot([c0[0],c0[0]+v[0]],[c0[1],c0[1]+v[1]],color='grey',lw=self.lw)
# for i,c in enumerate(self.dual_crds):
# self.ax.text(c[0],c[1],i,size=8)
def plot_envs(self,x_shift,y_shift):
"""
Plot image environments as scatter.
"""
if not self.envs: return # Bounce if option not selected
cmap = cm.get_cmap('Set1')
colours = []
for e in self.rec_envs:
colours.append(cmap(e))
self.ax.scatter(self.rec_crds[:,0]+x_shift*self.pbc[0],self.rec_crds[:,1]+y_shift*self.pbc[1],
marker="o",s=self.ms,facecolors=colours,edgecolors='k',zorder=1)
def init_ring_colours(self,av_ring_size=6,max_ring_size=10):
"""
Initialise colouring for rings.
"""
av_ring_size=6
map_lower = cm.get_cmap('Blues_r', 128)
map_upper = cm.get_cmap('Reds', 128)
map_mean=cm.get_cmap("Greys")
map_lower=ListedColormap(map_lower(np.arange(20,100)))
map_upper=ListedColormap(map_upper(np.arange(20,100)))
norm_lower=Normalize(vmin=av_ring_size-3,vmax=av_ring_size)
norm_upper=Normalize(vmin=av_ring_size,vmax=av_ring_size+6)
colour_mean=map_mean(50)
self.ring_colours=[]
for i in range(max_ring_size+1):
if i < 3:
self.ring_colours.append("white")
elif np.abs(i-av_ring_size)<1e-6:
self.ring_colours.append(colour_mean)
elif i<av_ring_size:
self.ring_colours.append(map_lower(norm_lower(i)))
else:
self.ring_colours.append(map_upper(norm_upper(i)))
# if i%2==0:
# self.ring_colours[-1] = 'whitesmoke'
# else:
# self.ring_colours[-1] = 'gold'
# self.ring_colours[-1] = 'whitesmoke'
def init_chain_colours(self):
"""
Initialising colouring for chains
"""
n_chains = len(self.node_chains)
cmap=cm.get_cmap("rainbow")
self.chain_colours=cmap(np.linspace(0,1,n_chains))
# cmap=ListedColormap(cmap(np.arange(0,1,0.001)))
# lengths=np.array([c.size for c in self.node_chains])
# norm=Normalize(vmin=0,vmax=np.max(lengths))
# rand = np.random.uniform(0,1,len(self.node_chains))
# rand = np.ones(len(self.node_chains))
# self.chain_colours = [cmap(norm(l)) for l in lengths]
if __name__ == "__main__":
prefix = sys.argv[1]
sample = int(sys.argv[2])
if len(sys.argv) <= 3:
nodes = True
cnxs = False
rings = True
periodic = False
dual = False
envs = False
save = False
else:
flags = sys.argv[3]
nodes = 'n' in flags
cnxs = 'c' in flags
rings = 'r' in flags
periodic = 'p' in flags
dual = 'd' in flags
envs = 'e' in flags
save = 's' in flags
plot=Plot(nodes=nodes,cnxs=cnxs,rings=rings,periodic=periodic,dual=dual,envs=envs)
plot(prefix,sample,ms=0,lw=0.75,save=save)
| [
"numpy.ones_like",
"numpy.abs",
"matplotlib.pyplot.savefig",
"numpy.average",
"matplotlib.collections.PatchCollection",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.linspace",
"numpy.random.seed",
"matplotlib.pylab.rcParams.update",
"matplotlib.colors.Normalize",
"numpy.a... | [((293, 310), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (307, 310), True, 'import numpy as np\n'), ((801, 830), 'matplotlib.pylab.rcParams.update', 'pylab.rcParams.update', (['params'], {}), '(params)\n', (822, 830), True, 'import matplotlib.pylab as pylab\n'), ((850, 862), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (860, 862), True, 'import matplotlib.pyplot as plt\n'), ((1870, 1880), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1878, 1880), True, 'import matplotlib.pyplot as plt\n'), ((2069, 2104), 'numpy.genfromtxt', 'np.genfromtxt', (["(prefix + '_crds.dat')"], {}), "(prefix + '_crds.dat')\n", (2082, 2104), True, 'import numpy as np\n'), ((2588, 2624), 'numpy.genfromtxt', 'np.genfromtxt', (["(prefix + '_rcrds.dat')"], {}), "(prefix + '_rcrds.dat')\n", (2601, 2624), True, 'import numpy as np\n'), ((4311, 4349), 'numpy.zeros', 'np.zeros', (['(self.node_cnxs.shape[0], 4)'], {}), '((self.node_cnxs.shape[0], 4))\n', (4319, 4349), True, 'import numpy as np\n'), ((4365, 4376), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4373, 4376), True, 'import numpy as np\n'), ((4393, 4404), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4401, 4404), True, 'import numpy as np\n'), ((7902, 7953), 'numpy.zeros', 'np.zeros', (['self.node_cnx_crds[:, 0].size'], {'dtype': 'bool'}), '(self.node_cnx_crds[:, 0].size, dtype=bool)\n', (7910, 7953), True, 'import numpy as np\n'), ((11298, 11317), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Set1"""'], {}), "('Set1')\n", (11309, 11317), False, 'from matplotlib import cm\n'), ((11773, 11800), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Blues_r"""', '(128)'], {}), "('Blues_r', 128)\n", (11784, 11800), False, 'from matplotlib import cm\n'), ((11821, 11845), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Reds"""', '(128)'], {}), "('Reds', 128)\n", (11832, 11845), False, 'from matplotlib import cm\n'), ((11863, 11883), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Greys"""'], {}), "('Greys')\n", (11874, 11883), False, 'from matplotlib import cm\n'), ((12030, 12081), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(av_ring_size - 3)', 'vmax': 'av_ring_size'}), '(vmin=av_ring_size - 3, vmax=av_ring_size)\n', (12039, 12081), False, 'from matplotlib.colors import ListedColormap, LinearSegmentedColormap, Normalize\n'), ((12098, 12149), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': 'av_ring_size', 'vmax': '(av_ring_size + 6)'}), '(vmin=av_ring_size, vmax=av_ring_size + 6)\n', (12107, 12149), False, 'from matplotlib.colors import ListedColormap, LinearSegmentedColormap, Normalize\n'), ((12966, 12988), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""rainbow"""'], {}), "('rainbow')\n", (12977, 12988), False, 'from matplotlib import cm\n'), ((1790, 1824), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""config.png"""'], {'dpi': '(300)'}), "('config.png', dpi=300)\n", (1801, 1824), True, 'import matplotlib.pyplot as plt\n'), ((1836, 1861), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""config.pdf"""'], {}), "('config.pdf')\n", (1847, 1861), True, 'import matplotlib.pyplot as plt\n'), ((3154, 3189), 'numpy.array', 'np.array', (['self.node_cnxs'], {'dtype': 'int'}), '(self.node_cnxs, dtype=int)\n', (3162, 3189), True, 'import numpy as np\n'), ((3799, 3823), 'numpy.array', 'np.array', (['self.dual_crds'], {}), '(self.dual_crds)\n', (3807, 3823), True, 'import numpy as np\n'), ((5159, 5183), 'numpy.zeros', 'np.zeros', (['(ring.size, 2)'], {}), '((ring.size, 2))\n', (5167, 5183), True, 'import numpy as np\n'), ((5718, 5740), 'numpy.average', 'np.average', (['crds[:, 0]'], {}), '(crds[:, 0])\n', (5728, 5740), True, 'import numpy as np\n'), ((5760, 5782), 'numpy.average', 'np.average', (['crds[:, 1]'], {}), '(crds[:, 1])\n', (5770, 5782), True, 'import numpy as np\n'), ((9672, 9696), 'numpy.ones_like', 'np.ones_like', (['ring[:, 0]'], {}), '(ring[:, 0])\n', (9684, 9696), True, 'import numpy as np\n'), ((9713, 9737), 'numpy.ones_like', 'np.ones_like', (['ring[:, 1]'], {}), '(ring[:, 1])\n', (9725, 9737), True, 'import numpy as np\n'), ((9752, 9776), 'numpy.all', 'np.all', (['(xbox * ybox == 1)'], {}), '(xbox * ybox == 1)\n', (9758, 9776), True, 'import numpy as np\n'), ((10094, 10187), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches'], {'facecolor': 'colours', 'linewidths': 'self.lw', 'edgecolor': '"""k"""', 'zorder': '(0)'}), "(patches, facecolor=colours, linewidths=self.lw, edgecolor=\n 'k', zorder=0)\n", (10109, 10187), False, 'from matplotlib.collections import PatchCollection\n'), ((13021, 13048), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_chains'], {}), '(0, 1, n_chains)\n', (13032, 13048), True, 'import numpy as np\n'), ((11927, 11945), 'numpy.arange', 'np.arange', (['(20)', '(100)'], {}), '(20, 100)\n', (11936, 11945), True, 'import numpy as np\n'), ((11990, 12008), 'numpy.arange', 'np.arange', (['(20)', '(100)'], {}), '(20, 100)\n', (11999, 12008), True, 'import numpy as np\n'), ((3623, 3648), 'numpy.array', 'np.array', (['ring'], {'dtype': 'int'}), '(ring, dtype=int)\n', (3631, 3648), True, 'import numpy as np\n'), ((12339, 12363), 'numpy.abs', 'np.abs', (['(i - av_ring_size)'], {}), '(i - av_ring_size)\n', (12345, 12363), True, 'import numpy as np\n'), ((9813, 9827), 'numpy.array', 'np.array', (['ring'], {}), '(ring)\n', (9821, 9827), True, 'import numpy as np\n')] |
import io
import re
from contextlib import redirect_stdout
import pytest
from numpy.distutils import log
def setup_module():
f = io.StringIO() # changing verbosity also logs here, capture that
with redirect_stdout(f):
log.set_verbosity(2, force=True) # i.e. DEBUG
def teardown_module():
log.set_verbosity(0, force=True) # the default
r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"])
def test_log_prefix(func_name):
func = getattr(log, func_name)
msg = f"{func_name} message"
f = io.StringIO()
with redirect_stdout(f):
func(msg)
out = f.getvalue()
assert out # sanity check
clean_out = r_ansi.sub("", out)
line = next(line for line in clean_out.splitlines())
assert line == f"{func_name.upper()}: {msg}"
| [
"contextlib.redirect_stdout",
"numpy.distutils.log.set_verbosity",
"re.compile",
"pytest.mark.parametrize",
"io.StringIO"
] | [((374, 429), 're.compile', 're.compile', (['"""\\\\x1B(?:[@-Z\\\\\\\\-_]|\\\\[[0-?]*[ -/]*[@-~])"""'], {}), "('\\\\x1B(?:[@-Z\\\\\\\\-_]|\\\\[[0-?]*[ -/]*[@-~])')\n", (384, 429), False, 'import re\n'), ((430, 502), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func_name"""', "['error', 'warn', 'info', 'debug']"], {}), "('func_name', ['error', 'warn', 'info', 'debug'])\n", (453, 502), False, 'import pytest\n'), ((137, 150), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (148, 150), False, 'import io\n'), ((315, 347), 'numpy.distutils.log.set_verbosity', 'log.set_verbosity', (['(0)'], {'force': '(True)'}), '(0, force=True)\n', (332, 347), False, 'from numpy.distutils import log\n'), ((611, 624), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (622, 624), False, 'import io\n'), ((211, 229), 'contextlib.redirect_stdout', 'redirect_stdout', (['f'], {}), '(f)\n', (226, 229), False, 'from contextlib import redirect_stdout\n'), ((239, 271), 'numpy.distutils.log.set_verbosity', 'log.set_verbosity', (['(2)'], {'force': '(True)'}), '(2, force=True)\n', (256, 271), False, 'from numpy.distutils import log\n'), ((634, 652), 'contextlib.redirect_stdout', 'redirect_stdout', (['f'], {}), '(f)\n', (649, 652), False, 'from contextlib import redirect_stdout\n')] |
# https://colab.research.google.com/github/akeshavan/IntroDL/blob/master/IntroToKeras.ipynb#scrollTo=ZCR5NALKsm1o
# *********************************************************************************************************
# 0. Required libraries
# *********************************************************************************************************
import numpy as np
import matplotlib
matplotlib.use('Agg')
from glob import glob
import matplotlib.pyplot as plt
from keras.utils import to_categorical
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout
from keras.layers import Conv2D, BatchNormalization, MaxPooling2D
from keras.optimizers import Adam, SGD
from keras import backend as K
# *********************************************************************************************************
# 1. Required functions
# *********************************************************************************************************
def load_image(filename):
data_slice = plt.imread(filename)
assert data_slice.shape == (256,256), "image file is not the right shape"
return data_slice
def view_slice(data, index):
plt.imshow(data[index,:,:,0],cmap=plt.cm.Greys_r)
plt.axis('off');
plt.savefig('./results/viewSlice_'+str(index)+'.png')
def get_figure():
fig, ax = plt.subplots(1)
plt.tick_params(top='off', right='off', which='both')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
return fig, ax
def show_activation(layer_name):
layer_output = layer_dict[layer_name].output
fn = K.function([model.input], [layer_output])
inp = X_train[0:1]
this_hidden = fn([inp])[0]
# plot the activations from the first 8 filters
plt.figure(figsize=(15,8))
for i in range(8):
plt.subplot(1,8,i+1)
plt.imshow(this_hidden[0,:,:,i], plt.cm.Greys_r)
plt.axis('off')
plt.savefig('./results/show_activation_'+layer_name+'.png')
return
# *********************************************************************************************************
# 2. Loading and viewing data
# *********************************************************************************************************
image_files = glob('dataset/*.jpg')
N = len(image_files)
print(N)
# initialize an array that is the required shape:
data_array = np.zeros((N, 256, 256, 1))
# iterate through all of the image files
for i, file in enumerate(image_files):
data_slice = load_image(file)
data_array[i, :, :, 0] = data_slice
view_slice(data_array, 0)
# *********************************************************************************************************
# 3. Split data into a training and testing set
# *********************************************************************************************************
np.random.seed(0)
indices = np.arange(N)
np.random.shuffle(indices)
# the first 80% of the data will be the training set.
N_80p = int(0.8 * N)
indices_train = indices[:N_80p]
X_train = data_array[indices_train,:,:,:]
# the last 20% of the data will be the testing set.
indices_test = indices[N_80p:]
X_test = data_array[indices_test,:,:,:]
print(X_train.shape, X_test.shape)
# *********************************************************************************************************
# 4. Introducing an Anterior-Posterior Flip
# *********************************************************************************************************
X_train_flip = X_train[:, :, ::-1, :]
X_test_flip = X_test[:, :, ::-1, :]
X_train = np.vstack((X_train, X_train_flip))
X_test = np.vstack((X_test, X_test_flip))
print(X_train.shape, X_test.shape)
# *********************************************************************************************************
# 5. Creating the outcome variable
# *********************************************************************************************************
y_train_label = np.zeros(X_train.shape[0])
N_train_half = int(X_train.shape[0] / 2)
y_train_label[:N_train_half] = 1
y_test_label = np.zeros(X_test.shape[0])
N_test_half = int(X_test.shape[0] / 2)
y_test_label[:N_test_half] = 1
y_train = to_categorical(y_train_label) # to make 2-D data
y_test = to_categorical(y_test_label)
shuffled_train_indices = np.arange(2*N_train_half)
np.random.shuffle(shuffled_train_indices)
X_train = X_train[shuffled_train_indices, :,:,:]
y_train = y_train[shuffled_train_indices, :]
# *********************************************************************************************************
# 6. Creating a Sequential Model
# *********************************************************************************************************
K.clear_session()
kernel_size = (3, 3)
n_classes = 2
filters = 8
model = Sequential()
model.add(Conv2D(filters, kernel_size, activation='relu', input_shape=(256, 256, 1)))
# zero mean unit variance
model.add(BatchNormalization())
model.add(MaxPooling2D())
model.add(Conv2D(filters*2, kernel_size, activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters*4, kernel_size, activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters*8, kernel_size, activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters*16, kernel_size, activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters*32, kernel_size, activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))
learning_rate = 1e-5
# optimizer
adam = Adam(lr=learning_rate)
sgd = SGD(lr=learning_rate)
model.compile(loss='categorical_crossentropy',
optimizer=adam, # swap out for sgd
metrics=['accuracy'])
model.summary()
# *********************************************************************************************************
# 7. Fitting the Model
# *********************************************************************************************************
fit = model.fit(X_train, y_train, epochs=5, batch_size=2)
fig, ax = get_figure()
epoch = np.arange(5) + 1
ax.plot(epoch, fit.history['acc'], marker="o", linewidth=2, color="steelblue", label="accuracy")
ax.plot(epoch, fit.history['loss'], marker="o", linewidth=2, color="orange", label="loss")
ax.set_xlabel('epoch')
ax.legend(frameon=False);
plt.savefig('./results/training_process.png')
# *********************************************************************************************************
# 8. Visualizing middle levels
# *********************************************************************************************************
layer_dict = dict([(layer.name, layer) for layer in model.layers])
show_activation('conv2d_1')
show_activation('conv2d_2')
show_activation('conv2d_3')
show_activation('conv2d_4')
show_activation('conv2d_5')
# END ****************
| [
"keras.layers.Conv2D",
"keras.utils.to_categorical",
"keras.optimizers.SGD",
"keras.layers.Dense",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.random.seed",
"numpy.vstack",
"keras.backend.clear_session",
"matplotlib.pyplot.axis",
"glob.glob",
"keras.optimizers.Adam",
"matplotlib.pyplo... | [((391, 412), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (405, 412), False, 'import matplotlib\n'), ((2278, 2299), 'glob.glob', 'glob', (['"""dataset/*.jpg"""'], {}), "('dataset/*.jpg')\n", (2282, 2299), False, 'from glob import glob\n'), ((2394, 2420), 'numpy.zeros', 'np.zeros', (['(N, 256, 256, 1)'], {}), '((N, 256, 256, 1))\n', (2402, 2420), True, 'import numpy as np\n'), ((2873, 2890), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2887, 2890), True, 'import numpy as np\n'), ((2902, 2914), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2911, 2914), True, 'import numpy as np\n'), ((2916, 2942), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2933, 2942), True, 'import numpy as np\n'), ((3599, 3633), 'numpy.vstack', 'np.vstack', (['(X_train, X_train_flip)'], {}), '((X_train, X_train_flip))\n', (3608, 3633), True, 'import numpy as np\n'), ((3643, 3675), 'numpy.vstack', 'np.vstack', (['(X_test, X_test_flip)'], {}), '((X_test, X_test_flip))\n', (3652, 3675), True, 'import numpy as np\n'), ((3979, 4005), 'numpy.zeros', 'np.zeros', (['X_train.shape[0]'], {}), '(X_train.shape[0])\n', (3987, 4005), True, 'import numpy as np\n'), ((4096, 4121), 'numpy.zeros', 'np.zeros', (['X_test.shape[0]'], {}), '(X_test.shape[0])\n', (4104, 4121), True, 'import numpy as np\n'), ((4203, 4232), 'keras.utils.to_categorical', 'to_categorical', (['y_train_label'], {}), '(y_train_label)\n', (4217, 4232), False, 'from keras.utils import to_categorical\n'), ((4261, 4289), 'keras.utils.to_categorical', 'to_categorical', (['y_test_label'], {}), '(y_test_label)\n', (4275, 4289), False, 'from keras.utils import to_categorical\n'), ((4316, 4343), 'numpy.arange', 'np.arange', (['(2 * N_train_half)'], {}), '(2 * N_train_half)\n', (4325, 4343), True, 'import numpy as np\n'), ((4342, 4383), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffled_train_indices'], {}), '(shuffled_train_indices)\n', (4359, 4383), True, 'import numpy as np\n'), ((4728, 4745), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (4743, 4745), True, 'from keras import backend as K\n'), ((4801, 4813), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4811, 4813), False, 'from keras.models import Sequential\n'), ((5664, 5686), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (5668, 5686), False, 'from keras.optimizers import Adam, SGD\n'), ((5693, 5714), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (5696, 5714), False, 'from keras.optimizers import Adam, SGD\n'), ((6455, 6500), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./results/training_process.png"""'], {}), "('./results/training_process.png')\n", (6466, 6500), True, 'import matplotlib.pyplot as plt\n'), ((1035, 1055), 'matplotlib.pyplot.imread', 'plt.imread', (['filename'], {}), '(filename)\n', (1045, 1055), True, 'import matplotlib.pyplot as plt\n'), ((1190, 1243), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data[index, :, :, 0]'], {'cmap': 'plt.cm.Greys_r'}), '(data[index, :, :, 0], cmap=plt.cm.Greys_r)\n', (1200, 1243), True, 'import matplotlib.pyplot as plt\n'), ((1244, 1259), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1252, 1259), True, 'import matplotlib.pyplot as plt\n'), ((1356, 1371), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (1368, 1371), True, 'import matplotlib.pyplot as plt\n'), ((1376, 1429), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'top': '"""off"""', 'right': '"""off"""', 'which': '"""both"""'}), "(top='off', right='off', which='both')\n", (1391, 1429), True, 'import matplotlib.pyplot as plt\n'), ((1631, 1672), 'keras.backend.function', 'K.function', (['[model.input]', '[layer_output]'], {}), '([model.input], [layer_output])\n', (1641, 1672), True, 'from keras import backend as K\n'), ((1783, 1810), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (1793, 1810), True, 'import matplotlib.pyplot as plt\n'), ((1947, 2010), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./results/show_activation_' + layer_name + '.png')"], {}), "('./results/show_activation_' + layer_name + '.png')\n", (1958, 2010), True, 'import matplotlib.pyplot as plt\n'), ((4825, 4899), 'keras.layers.Conv2D', 'Conv2D', (['filters', 'kernel_size'], {'activation': '"""relu"""', 'input_shape': '(256, 256, 1)'}), "(filters, kernel_size, activation='relu', input_shape=(256, 256, 1))\n", (4831, 4899), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((4937, 4957), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4955, 4957), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((4969, 4983), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (4981, 4983), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((4995, 5046), 'keras.layers.Conv2D', 'Conv2D', (['(filters * 2)', 'kernel_size'], {'activation': '"""relu"""'}), "(filters * 2, kernel_size, activation='relu')\n", (5001, 5046), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((5056, 5070), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (5068, 5070), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((5082, 5133), 'keras.layers.Conv2D', 'Conv2D', (['(filters * 4)', 'kernel_size'], {'activation': '"""relu"""'}), "(filters * 4, kernel_size, activation='relu')\n", (5088, 5133), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((5143, 5157), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (5155, 5157), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((5169, 5220), 'keras.layers.Conv2D', 'Conv2D', (['(filters * 8)', 'kernel_size'], {'activation': '"""relu"""'}), "(filters * 8, kernel_size, activation='relu')\n", (5175, 5220), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((5230, 5244), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (5242, 5244), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((5256, 5308), 'keras.layers.Conv2D', 'Conv2D', (['(filters * 16)', 'kernel_size'], {'activation': '"""relu"""'}), "(filters * 16, kernel_size, activation='relu')\n", (5262, 5308), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((5318, 5332), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (5330, 5332), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((5344, 5396), 'keras.layers.Conv2D', 'Conv2D', (['(filters * 32)', 'kernel_size'], {'activation': '"""relu"""'}), "(filters * 32, kernel_size, activation='relu')\n", (5350, 5396), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((5406, 5420), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (5418, 5420), False, 'from keras.layers import Conv2D, BatchNormalization, MaxPooling2D\n'), ((5432, 5441), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5439, 5441), False, 'from keras.layers import Dense, Flatten, Dropout\n'), ((5453, 5482), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (5458, 5482), False, 'from keras.layers import Dense, Flatten, Dropout\n'), ((5494, 5506), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5501, 5506), False, 'from keras.layers import Dense, Flatten, Dropout\n'), ((5518, 5547), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (5523, 5547), False, 'from keras.layers import Dense, Flatten, Dropout\n'), ((5559, 5571), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5566, 5571), False, 'from keras.layers import Dense, Flatten, Dropout\n'), ((5583, 5621), 'keras.layers.Dense', 'Dense', (['n_classes'], {'activation': '"""softmax"""'}), "(n_classes, activation='softmax')\n", (5588, 5621), False, 'from keras.layers import Dense, Flatten, Dropout\n'), ((6201, 6213), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6210, 6213), True, 'import numpy as np\n'), ((1841, 1865), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(8)', '(i + 1)'], {}), '(1, 8, i + 1)\n', (1852, 1865), True, 'import matplotlib.pyplot as plt\n'), ((1870, 1921), 'matplotlib.pyplot.imshow', 'plt.imshow', (['this_hidden[0, :, :, i]', 'plt.cm.Greys_r'], {}), '(this_hidden[0, :, :, i], plt.cm.Greys_r)\n', (1880, 1921), True, 'import matplotlib.pyplot as plt\n'), ((1927, 1942), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1935, 1942), True, 'import matplotlib.pyplot as plt\n')] |
import logging, os
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout
from tensorflow.keras.optimizers import SGD, Adam, Adagrad
from tensorflow.keras import backend as K
import numpy as np
from numpy.random import seed
from datetime import datetime
from datetime import timedelta
import pickle
import os
import os.path
import math
import argparse
def accuracy05(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>0.5,y_pred>0.5), tf.float32))
tn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>0.5),tf.math.logical_not(y_pred>0.5)), tf.float32))
return (tp+tn)/tf.cast(tf.size(y_true), tf.float32)
def precision05(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>0.5,y_pred>0.5), tf.float32))
total_pred = tf.reduce_sum(tf.cast(y_pred>0.5, tf.float32))
return tp/(total_pred+K.epsilon())
def recall05(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>0.5,y_pred>0.5), tf.float32))
total_true = tf.reduce_sum(tf.cast(y_true>0.5, tf.float32))
return tp/(total_true+K.epsilon())
def accuracy1(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>1,y_pred>1), tf.float32))
tn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>1),tf.math.logical_not(y_pred>1)), tf.float32))
return (tp+tn)/tf.cast(tf.size(y_true), tf.float32)
def precision1(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>1,y_pred>1), tf.float32))
#fp = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>1),y_pred>1), tf.float64))
total_pred = tf.reduce_sum(tf.cast(y_pred>1, tf.float32))
#if tf.math.less(total_pred, tf.constant([1.])):
# return 0.
return tp/(total_pred+K.epsilon())
def recall1(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>1,y_pred>1), tf.float32))
#fn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_pred>1),y_true>1), tf.float64))
total_true = tf.reduce_sum(tf.cast(y_true>1, tf.float32))
#if tf.math.less(total_true, tf.constant([1.])):
# return 0.
return tp/(total_true+K.epsilon())
def accuracy5(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>5,y_pred>5), tf.float32))
tn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>5),tf.math.logical_not(y_pred>5)), tf.float32))
return (tp+tn)/tf.cast(tf.size(y_true), tf.float32)
def precision5(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>5,y_pred>5), tf.float32))
#fp = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>5),y_pred>5), tf.float64))
total_pred = tf.reduce_sum(tf.cast(y_pred>5, tf.float32))
#if tf.math.less(total_pred, tf.constant([1.])):
# return 0.
return tp/(total_pred+K.epsilon())
def recall5(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>5,y_pred>5), tf.float32))
#fn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_pred>5),y_true>5), tf.float64))
total_true = tf.reduce_sum(tf.cast(y_true>5, tf.float32))
#if tf.math.less(total_true, tf.constant([1.])):
# return 0.
return tp/(total_true+K.epsilon())
def accuracy10(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>10,y_pred>10), tf.float32))
tn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>10),tf.math.logical_not(y_pred>10)), tf.float32))
return (tp+tn)/tf.cast(tf.size(y_true), tf.float32)
def precision10(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>10,y_pred>10), tf.float32))
#fp = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>10),y_pred>10), tf.float64))
total_pred = tf.reduce_sum(tf.cast(y_pred>10, tf.float32))
#if tf.math.less(total_pred, tf.constant([1.])):
# return 0.
return tp/(total_pred+K.epsilon())
def recall10(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>10,y_pred>10), tf.float32))
#fn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_pred>10),y_true>10), tf.float64))
total_true = tf.reduce_sum(tf.cast(y_true>10, tf.float32))
#if tf.math.less(total_true, tf.constant([1])):
# return 0.
return tp/(total_true+K.epsilon())
def get_unet():
concat_axis = 3
inputs = layers.Input(shape=(512, 512, 3))
feats = 8#16
bn0 = BatchNormalization(axis=3)(inputs)
conv1 = layers.Conv2D(feats, (3, 3), activation='relu', padding='same', name='conv1_1')(bn0)
bn2 = BatchNormalization(axis=3)(conv1)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(bn2) #256
conv2 = layers.Conv2D(2*feats, (3, 3), activation='relu', padding='same')(pool1)
bn4 = BatchNormalization(axis=3)(conv2)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(bn4) #128
conv3 = layers.Conv2D(4*feats, (3, 3), activation='relu', padding='same')(pool2)
bn6 = BatchNormalization(axis=3)(conv3)
pool3 = layers.MaxPooling2D(pool_size=(2, 2))(bn6) #64
conv4 = layers.Conv2D(8*feats, (3, 3), activation='relu', padding='same')(pool3)
bn8 = BatchNormalization(axis=3)(conv4)
pool4 = layers.MaxPooling2D(pool_size=(2, 2))(bn8) #32
conv5 = layers.Conv2D(16*feats, (3, 3), activation='relu', padding='same')(pool4)
bn10 = BatchNormalization(axis=3)(conv5)
pool5 = layers.MaxPooling2D(pool_size=(2, 2))(bn10) #16
conv6 = layers.Conv2D(32*feats, (3, 3), activation='relu', padding='same')(pool5)
bn11 = BatchNormalization(axis=3)(conv6)
up_conv6 = layers.UpSampling2D(size=(2, 2))(bn11) #32
up7 = layers.concatenate([up_conv6, conv5], axis=concat_axis)
conv7 = layers.Conv2D(16*feats, (3, 3), activation='relu', padding='same')(up7)
bn13 = BatchNormalization(axis=3)(conv7)
up_conv5 = layers.UpSampling2D(size=(2, 2))(bn13) #64
up6 = layers.concatenate([up_conv5, conv4], axis=concat_axis)
conv6 = layers.Conv2D(8*feats, (3, 3), activation='relu', padding='same')(up6)
bn15 = BatchNormalization(axis=3)(conv6)
up_conv6 = layers.UpSampling2D(size=(2, 2))(bn15) #128
up7 = layers.concatenate([up_conv6, conv3], axis=concat_axis)
conv7 = layers.Conv2D(4*feats, (3, 3), activation='relu', padding='same')(up7)
bn13 = BatchNormalization(axis=3)(conv7)
# Rectify last convolution layer to constraint output to positive precipitation values.
conv8 = layers.Conv2D(1, (1, 1), activation='relu')(bn13)
model = models.Model(inputs=inputs, outputs=conv8)
return model
def get_band_data(loc, dates, b, mean=None, std=None):
y = np.concatenate([np.load(f"Y_{loc}_{d}.npy") for d in dates], axis=0)
y = np.clip(y,0,30)
x11 = np.concatenate([np.load(f"X_B11_{loc}_{d}.npy") for d in dates], axis=0)
x16 = np.concatenate([np.load(f"X_B16_{loc}_{d}.npy") for d in dates], axis=0)
xi = np.concatenate([np.load(f"X_B{b}_{loc}_{d}.npy") for d in dates], axis=0)
if mean is None:
mean = [x11.mean(),x16.mean(),xi.mean()]
std = [x11.std(),x16.std(),xi.std()]
x11 = (x11-mean[0])/std[0]
x16 = (x16-mean[1])/std[1]
xi = (xi-mean[2])/std[2]
x = np.stack((x11,x16,xi), axis=3)
x11 = None
x16 = None
xi = None
return x, y[:,:,:,None], mean, std
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Himawari-GPM Band comparison')
parser.add_argument('-b1', '--band1', help='Band 1 in list', type=int, required=True)
parser.add_argument('-b2', '--band2', help='Band 2 in list', type=int, required=True)
parser.add_argument('-b3', '--band3', help='Band 3 in list', type=int, required=True)
parser.add_argument('-loc', '--location', help='Geographic location', type=str, required=True)
parser.add_argument('-val', '--validation', help='Month used for validation', type=int, required=True)
parser.add_argument('-s', '--seed', help='Random seed', type=int, required=False, default=1)
args = parser.parse_args()
seed(args.seed)
if os.path.isfile(f'model_3months_200epochs_8chann_v{args.validation}_{args.location}_s{args.seed}_b{args.band1}_{args.band2}_{args.band3}.h5'):
exit()
tf.random.set_seed(args.seed)
dates = ["201811","201812","201901","201902"]
x_train, y_train, mean, std = get_band_data(args.location, [x for i, x in enumerate(dates) if i!=args.validation], args.band3)
x_test, y_test, _, _ = get_band_data(args.location, [x for i, x in enumerate(dates) if i==args.validation], args.band3, mean, std)
print(x_train.shape, y_train.shape)
print("MSE train", np.mean(np.square(y_train)))
print("MSE test", np.mean(np.square(y_test)))
model = get_unet()
print(model.summary())
opt = Adagrad(lr=0.0001)
model.compile(loss='mse', metrics=[accuracy05,precision05,recall05,accuracy1,precision1,recall1,accuracy5,precision5,recall5,accuracy10,precision10,recall10], optimizer=opt)
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), shuffle=True, epochs=200, verbose=1)
with open(f'history_3months_200epochs_8chann_v{args.validation}_{args.location}_s{args.seed}_b{args.band1}_{args.band2}_{args.band3}.pkl', 'wb') as f:
pickle.dump(history.history, f)
model.save(f'model_3months_200epochs_8chann_v{args.validation}_{args.location}_s{args.seed}_b{args.band1}_{args.band2}_{args.band3}.h5')
| [
"numpy.clip",
"tensorflow.math.logical_not",
"tensorflow.keras.backend.epsilon",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.cast",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Conv2D",
"argparse.ArgumentParser",
"tensorflow.keras.optimizers.Adagrad",
"numpy.stack",
"n... | [((20, 52), 'logging.disable', 'logging.disable', (['logging.WARNING'], {}), '(logging.WARNING)\n', (35, 52), False, 'import logging, os\n'), ((4737, 4770), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': '(512, 512, 3)'}), '(shape=(512, 512, 3))\n', (4749, 4770), False, 'from tensorflow.keras import layers\n'), ((6001, 6056), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[up_conv6, conv5]'], {'axis': 'concat_axis'}), '([up_conv6, conv5], axis=concat_axis)\n', (6019, 6056), False, 'from tensorflow.keras import layers\n'), ((6260, 6315), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[up_conv5, conv4]'], {'axis': 'concat_axis'}), '([up_conv5, conv4], axis=concat_axis)\n', (6278, 6315), False, 'from tensorflow.keras import layers\n'), ((6515, 6570), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[up_conv6, conv3]'], {'axis': 'concat_axis'}), '([up_conv6, conv3], axis=concat_axis)\n', (6533, 6570), False, 'from tensorflow.keras import layers\n'), ((6872, 6914), 'tensorflow.keras.models.Model', 'models.Model', ([], {'inputs': 'inputs', 'outputs': 'conv8'}), '(inputs=inputs, outputs=conv8)\n', (6884, 6914), False, 'from tensorflow.keras import models\n'), ((7075, 7092), 'numpy.clip', 'np.clip', (['y', '(0)', '(30)'], {}), '(y, 0, 30)\n', (7082, 7092), True, 'import numpy as np\n'), ((7569, 7601), 'numpy.stack', 'np.stack', (['(x11, x16, xi)'], {'axis': '(3)'}), '((x11, x16, xi), axis=3)\n', (7577, 7601), True, 'import numpy as np\n'), ((7730, 7797), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Himawari-GPM Band comparison"""'}), "(description='Himawari-GPM Band comparison')\n", (7753, 7797), False, 'import argparse\n'), ((8407, 8422), 'numpy.random.seed', 'seed', (['args.seed'], {}), '(args.seed)\n', (8411, 8422), False, 'from numpy.random import seed\n'), ((8435, 8585), 'os.path.isfile', 'os.path.isfile', (['f"""model_3months_200epochs_8chann_v{args.validation}_{args.location}_s{args.seed}_b{args.band1}_{args.band2}_{args.band3}.h5"""'], {}), "(\n f'model_3months_200epochs_8chann_v{args.validation}_{args.location}_s{args.seed}_b{args.band1}_{args.band2}_{args.band3}.h5'\n )\n", (8449, 8585), False, 'import os\n'), ((8601, 8630), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['args.seed'], {}), '(args.seed)\n', (8619, 8630), True, 'import tensorflow as tf\n'), ((9157, 9175), 'tensorflow.keras.optimizers.Adagrad', 'Adagrad', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (9164, 9175), False, 'from tensorflow.keras.optimizers import SGD, Adam, Adagrad\n'), ((1037, 1070), 'tensorflow.cast', 'tf.cast', (['(y_pred > 0.5)', 'tf.float32'], {}), '(y_pred > 0.5, tf.float32)\n', (1044, 1070), True, 'import tensorflow as tf\n'), ((1265, 1298), 'tensorflow.cast', 'tf.cast', (['(y_true > 0.5)', 'tf.float32'], {}), '(y_true > 0.5, tf.float32)\n', (1272, 1298), True, 'import tensorflow as tf\n'), ((1901, 1932), 'tensorflow.cast', 'tf.cast', (['(y_pred > 1)', 'tf.float32'], {}), '(y_pred > 1, tf.float32)\n', (1908, 1932), True, 'import tensorflow as tf\n'), ((2305, 2336), 'tensorflow.cast', 'tf.cast', (['(y_true > 1)', 'tf.float32'], {}), '(y_true > 1, tf.float32)\n', (2312, 2336), True, 'import tensorflow as tf\n'), ((3011, 3042), 'tensorflow.cast', 'tf.cast', (['(y_pred > 5)', 'tf.float32'], {}), '(y_pred > 5, tf.float32)\n', (3018, 3042), True, 'import tensorflow as tf\n'), ((3415, 3446), 'tensorflow.cast', 'tf.cast', (['(y_true > 5)', 'tf.float32'], {}), '(y_true > 5, tf.float32)\n', (3422, 3446), True, 'import tensorflow as tf\n'), ((4132, 4164), 'tensorflow.cast', 'tf.cast', (['(y_pred > 10)', 'tf.float32'], {}), '(y_pred > 10, tf.float32)\n', (4139, 4164), True, 'import tensorflow as tf\n'), ((4538, 4570), 'tensorflow.cast', 'tf.cast', (['(y_true > 10)', 'tf.float32'], {}), '(y_true > 10, tf.float32)\n', (4545, 4570), True, 'import tensorflow as tf\n'), ((4799, 4825), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (4817, 4825), False, 'from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout\n'), ((4851, 4930), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['feats', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""conv1_1"""'}), "(feats, (3, 3), activation='relu', padding='same', name='conv1_1')\n", (4864, 4930), False, 'from tensorflow.keras import layers\n'), ((4946, 4972), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (4964, 4972), False, 'from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout\n'), ((4992, 5029), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5011, 5029), False, 'from tensorflow.keras import layers\n'), ((5053, 5120), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(2 * feats)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(2 * feats, (3, 3), activation='relu', padding='same')\n", (5066, 5120), False, 'from tensorflow.keras import layers\n'), ((5136, 5162), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (5154, 5162), False, 'from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout\n'), ((5182, 5219), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5201, 5219), False, 'from tensorflow.keras import layers\n'), ((5243, 5310), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(4 * feats)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(4 * feats, (3, 3), activation='relu', padding='same')\n", (5256, 5310), False, 'from tensorflow.keras import layers\n'), ((5326, 5352), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (5344, 5352), False, 'from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout\n'), ((5372, 5409), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5391, 5409), False, 'from tensorflow.keras import layers\n'), ((5432, 5499), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(8 * feats)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(8 * feats, (3, 3), activation='relu', padding='same')\n", (5445, 5499), False, 'from tensorflow.keras import layers\n'), ((5515, 5541), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (5533, 5541), False, 'from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout\n'), ((5561, 5598), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5580, 5598), False, 'from tensorflow.keras import layers\n'), ((5621, 5689), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(16 * feats)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(16 * feats, (3, 3), activation='relu', padding='same')\n", (5634, 5689), False, 'from tensorflow.keras import layers\n'), ((5706, 5732), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (5724, 5732), False, 'from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout\n'), ((5752, 5789), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5771, 5789), False, 'from tensorflow.keras import layers\n'), ((5813, 5881), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(32 * feats)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32 * feats, (3, 3), activation='relu', padding='same')\n", (5826, 5881), False, 'from tensorflow.keras import layers\n'), ((5898, 5924), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (5916, 5924), False, 'from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout\n'), ((5948, 5980), 'tensorflow.keras.layers.UpSampling2D', 'layers.UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (5967, 5980), False, 'from tensorflow.keras import layers\n'), ((6070, 6138), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(16 * feats)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(16 * feats, (3, 3), activation='relu', padding='same')\n", (6083, 6138), False, 'from tensorflow.keras import layers\n'), ((6153, 6179), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (6171, 6179), False, 'from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout\n'), ((6207, 6239), 'tensorflow.keras.layers.UpSampling2D', 'layers.UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (6226, 6239), False, 'from tensorflow.keras import layers\n'), ((6329, 6396), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(8 * feats)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(8 * feats, (3, 3), activation='relu', padding='same')\n", (6342, 6396), False, 'from tensorflow.keras import layers\n'), ((6411, 6437), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (6429, 6437), False, 'from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout\n'), ((6461, 6493), 'tensorflow.keras.layers.UpSampling2D', 'layers.UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (6480, 6493), False, 'from tensorflow.keras import layers\n'), ((6588, 6655), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(4 * feats)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(4 * feats, (3, 3), activation='relu', padding='same')\n", (6601, 6655), False, 'from tensorflow.keras import layers\n'), ((6670, 6696), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (6688, 6696), False, 'from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout\n'), ((6809, 6852), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(1)', '(1, 1)'], {'activation': '"""relu"""'}), "(1, (1, 1), activation='relu')\n", (6822, 6852), False, 'from tensorflow.keras import layers\n'), ((9640, 9671), 'pickle.dump', 'pickle.dump', (['history.history', 'f'], {}), '(history.history, f)\n', (9651, 9671), False, 'import pickle\n'), ((635, 682), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 0.5)', '(y_pred > 0.5)'], {}), '(y_true > 0.5, y_pred > 0.5)\n', (654, 682), True, 'import tensorflow as tf\n'), ((850, 865), 'tensorflow.size', 'tf.size', (['y_true'], {}), '(y_true)\n', (857, 865), True, 'import tensorflow as tf\n'), ((949, 996), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 0.5)', '(y_pred > 0.5)'], {}), '(y_true > 0.5, y_pred > 0.5)\n', (968, 996), True, 'import tensorflow as tf\n'), ((1101, 1112), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1110, 1112), True, 'from tensorflow.keras import backend as K\n'), ((1177, 1224), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 0.5)', '(y_pred > 0.5)'], {}), '(y_true > 0.5, y_pred > 0.5)\n', (1196, 1224), True, 'import tensorflow as tf\n'), ((1329, 1340), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1338, 1340), True, 'from tensorflow.keras import backend as K\n'), ((1406, 1449), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 1)', '(y_pred > 1)'], {}), '(y_true > 1, y_pred > 1)\n', (1425, 1449), True, 'import tensorflow as tf\n'), ((1613, 1628), 'tensorflow.size', 'tf.size', (['y_true'], {}), '(y_true)\n', (1620, 1628), True, 'import tensorflow as tf\n'), ((1711, 1754), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 1)', '(y_pred > 1)'], {}), '(y_true > 1, y_pred > 1)\n', (1730, 1754), True, 'import tensorflow as tf\n'), ((2040, 2051), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2049, 2051), True, 'from tensorflow.keras import backend as K\n'), ((2115, 2158), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 1)', '(y_pred > 1)'], {}), '(y_true > 1, y_pred > 1)\n', (2134, 2158), True, 'import tensorflow as tf\n'), ((2440, 2451), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2449, 2451), True, 'from tensorflow.keras import backend as K\n'), ((2516, 2559), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 5)', '(y_pred > 5)'], {}), '(y_true > 5, y_pred > 5)\n', (2535, 2559), True, 'import tensorflow as tf\n'), ((2723, 2738), 'tensorflow.size', 'tf.size', (['y_true'], {}), '(y_true)\n', (2730, 2738), True, 'import tensorflow as tf\n'), ((2821, 2864), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 5)', '(y_pred > 5)'], {}), '(y_true > 5, y_pred > 5)\n', (2840, 2864), True, 'import tensorflow as tf\n'), ((3150, 3161), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (3159, 3161), True, 'from tensorflow.keras import backend as K\n'), ((3225, 3268), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 5)', '(y_pred > 5)'], {}), '(y_true > 5, y_pred > 5)\n', (3244, 3268), True, 'import tensorflow as tf\n'), ((3550, 3561), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (3559, 3561), True, 'from tensorflow.keras import backend as K\n'), ((3628, 3673), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 10)', '(y_pred > 10)'], {}), '(y_true > 10, y_pred > 10)\n', (3647, 3673), True, 'import tensorflow as tf\n'), ((3839, 3854), 'tensorflow.size', 'tf.size', (['y_true'], {}), '(y_true)\n', (3846, 3854), True, 'import tensorflow as tf\n'), ((3938, 3983), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 10)', '(y_pred > 10)'], {}), '(y_true > 10, y_pred > 10)\n', (3957, 3983), True, 'import tensorflow as tf\n'), ((4268, 4279), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (4277, 4279), True, 'from tensorflow.keras import backend as K\n'), ((4344, 4389), 'tensorflow.math.logical_and', 'tf.math.logical_and', (['(y_true > 10)', '(y_pred > 10)'], {}), '(y_true > 10, y_pred > 10)\n', (4363, 4389), True, 'import tensorflow as tf\n'), ((4673, 4684), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (4682, 4684), True, 'from tensorflow.keras import backend as K\n'), ((7014, 7041), 'numpy.load', 'np.load', (['f"""Y_{loc}_{d}.npy"""'], {}), "(f'Y_{loc}_{d}.npy')\n", (7021, 7041), True, 'import numpy as np\n'), ((7121, 7152), 'numpy.load', 'np.load', (['f"""X_B11_{loc}_{d}.npy"""'], {}), "(f'X_B11_{loc}_{d}.npy')\n", (7128, 7152), True, 'import numpy as np\n'), ((7204, 7235), 'numpy.load', 'np.load', (['f"""X_B16_{loc}_{d}.npy"""'], {}), "(f'X_B16_{loc}_{d}.npy')\n", (7211, 7235), True, 'import numpy as np\n'), ((7286, 7318), 'numpy.load', 'np.load', (['f"""X_B{b}_{loc}_{d}.npy"""'], {}), "(f'X_B{b}_{loc}_{d}.npy')\n", (7293, 7318), True, 'import numpy as np\n'), ((9025, 9043), 'numpy.square', 'np.square', (['y_train'], {}), '(y_train)\n', (9034, 9043), True, 'import numpy as np\n'), ((9076, 9093), 'numpy.square', 'np.square', (['y_test'], {}), '(y_test)\n', (9085, 9093), True, 'import numpy as np\n'), ((743, 776), 'tensorflow.math.logical_not', 'tf.math.logical_not', (['(y_true > 0.5)'], {}), '(y_true > 0.5)\n', (762, 776), True, 'import tensorflow as tf\n'), ((775, 808), 'tensorflow.math.logical_not', 'tf.math.logical_not', (['(y_pred > 0.5)'], {}), '(y_pred > 0.5)\n', (794, 808), True, 'import tensorflow as tf\n'), ((1510, 1541), 'tensorflow.math.logical_not', 'tf.math.logical_not', (['(y_true > 1)'], {}), '(y_true > 1)\n', (1529, 1541), True, 'import tensorflow as tf\n'), ((1540, 1571), 'tensorflow.math.logical_not', 'tf.math.logical_not', (['(y_pred > 1)'], {}), '(y_pred > 1)\n', (1559, 1571), True, 'import tensorflow as tf\n'), ((2620, 2651), 'tensorflow.math.logical_not', 'tf.math.logical_not', (['(y_true > 5)'], {}), '(y_true > 5)\n', (2639, 2651), True, 'import tensorflow as tf\n'), ((2650, 2681), 'tensorflow.math.logical_not', 'tf.math.logical_not', (['(y_pred > 5)'], {}), '(y_pred > 5)\n', (2669, 2681), True, 'import tensorflow as tf\n'), ((3734, 3766), 'tensorflow.math.logical_not', 'tf.math.logical_not', (['(y_true > 10)'], {}), '(y_true > 10)\n', (3753, 3766), True, 'import tensorflow as tf\n'), ((3765, 3797), 'tensorflow.math.logical_not', 'tf.math.logical_not', (['(y_pred > 10)'], {}), '(y_pred > 10)\n', (3784, 3797), True, 'import tensorflow as tf\n')] |
# Donut problem using logistic regression
# Code Flow:
# 1. Import all relevant libraries.
# 2. Generate sample data.
# 3. Plot the data.
# 4. Add bias term.
# 5. Add radius as a feature.
# 6. Generate random weights for initialization.
# 7. Define sigmoid function.
# 8. Calculate Y.
# 9. Define Cross-entropy error function.
# 10. Gradient Descent with L2.
# 11. Plot - Cross-entropy error.
# 12. Classification Rate.
# 1. Import all relevant libraries:
import numpy as np
import matplotlib.pyplot as plt
# 2. Generate input data:
N = 1000 # No. of samples
D = 2 # No. of features
# Create 2 clouds of data at different radii:
R_inner = 5 # inner radius
R_outer = 10 # outer radius
# Spread the data along the circumference of each clouds.
# Distance from origin is radius + random normal
# Angle theta is uniformly distributed between (0, 2pi)
R1 = np.random.randn(N//2) + R_inner
theta = 2*np.pi*np.random.random(N//2)
X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T
R2 = np.random.randn(N//2) + R_outer
theta = 2*np.pi*np.random.random(N//2)
X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T
X = np.concatenate([ X_inner, X_outer ])
T = np.array([0]*(N//2) + [1]*(N//2)) # Labels: first 50 are 0, last 50 are 1
# 3. Plot the data:
plt.figure(1)
plt.scatter(X[:,0], X[:,1], c=T)
plt.xlabel('Cosine angles')
plt.ylabel('Sine angles')
plt.title('Donut Data')
# 4. Add bias term:
ones = np.ones((N, 1))
# 5. Add radius as a feature:
# a column of r = sqrt(x^2 + y^2)
r = np.sqrt( (X * X).sum(axis=1) ).reshape(-1, 1)
Xb = np.concatenate((ones, r, X), axis=1)
# 6. Initialize weights:
w = np.random.randn(D + 2)
# 7. Sigmoid Function:
def sigmoid(z):
return 1/(1 + np.exp(-z))
# 8. Calculate model output Y:
z = Xb.dot(w)
Y = sigmoid(z)
# 9. Cross-entropy function:
def cross_entropy(T, Y):
return -(T*np.log(Y) + (1-T)*np.log(1-Y)).sum()
# 10. Gradient Descent with L2:
learning_rate = 0.0001 # trial & error
error = [] # keep track of cross-entropy error
for i in range(5000):
e = cross_entropy(T, Y)
error.append(e) # append error
if i % 500 == 0:
print(e) # print error every 10 epochs
# Gradient descent weight update with L2:
w += learning_rate * ( Xb.T.dot(T - Y) - 0.1*w )
# Calculate Y:
Y = sigmoid(Xb.dot(w))
# 11. Plots:
plt.figure(2)
plt.plot(error)
plt.xlabel('Iteration')
plt.ylabel('Cross-Entropy Error')
plt.title("Cross-Entropy Error vs. Iteration")
# 12. Classification Rate:
# This tells us what % of values are classified correctly.
# We round the output of sigmoid to get either 0 or 1.
# Then we calcualte the sum of the difference between T and round(Y).
# Divide that by N
# Subtract from 1 to get classification rate.
print("Final classification rate:", 1 - np.abs(T - np.round(Y)).sum() / N)
| [
"numpy.ones",
"matplotlib.pyplot.ylabel",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.cos",
"matplotlib.pyplot.scatter",
"numpy.concatenate",
"numpy.sin",
"matplotlib.pyplot.title",... | [((1208, 1242), 'numpy.concatenate', 'np.concatenate', (['[X_inner, X_outer]'], {}), '([X_inner, X_outer])\n', (1222, 1242), True, 'import numpy as np\n'), ((1249, 1290), 'numpy.array', 'np.array', (['([0] * (N // 2) + [1] * (N // 2))'], {}), '([0] * (N // 2) + [1] * (N // 2))\n', (1257, 1290), True, 'import numpy as np\n'), ((1344, 1357), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1354, 1357), True, 'import matplotlib.pyplot as plt\n'), ((1358, 1392), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'T'}), '(X[:, 0], X[:, 1], c=T)\n', (1369, 1392), True, 'import matplotlib.pyplot as plt\n'), ((1391, 1418), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cosine angles"""'], {}), "('Cosine angles')\n", (1401, 1418), True, 'import matplotlib.pyplot as plt\n'), ((1419, 1444), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sine angles"""'], {}), "('Sine angles')\n", (1429, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1468), 'matplotlib.pyplot.title', 'plt.title', (['"""Donut Data"""'], {}), "('Donut Data')\n", (1454, 1468), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1513), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (1505, 1513), True, 'import numpy as np\n'), ((1635, 1671), 'numpy.concatenate', 'np.concatenate', (['(ones, r, X)'], {'axis': '(1)'}), '((ones, r, X), axis=1)\n', (1649, 1671), True, 'import numpy as np\n'), ((1702, 1724), 'numpy.random.randn', 'np.random.randn', (['(D + 2)'], {}), '(D + 2)\n', (1717, 1724), True, 'import numpy as np\n'), ((2397, 2410), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2407, 2410), True, 'import matplotlib.pyplot as plt\n'), ((2411, 2426), 'matplotlib.pyplot.plot', 'plt.plot', (['error'], {}), '(error)\n', (2419, 2426), True, 'import matplotlib.pyplot as plt\n'), ((2427, 2450), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (2437, 2450), True, 'import matplotlib.pyplot as plt\n'), ((2451, 2484), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cross-Entropy Error"""'], {}), "('Cross-Entropy Error')\n", (2461, 2484), True, 'import matplotlib.pyplot as plt\n'), ((2485, 2531), 'matplotlib.pyplot.title', 'plt.title', (['"""Cross-Entropy Error vs. Iteration"""'], {}), "('Cross-Entropy Error vs. Iteration')\n", (2494, 2531), True, 'import matplotlib.pyplot as plt\n'), ((909, 932), 'numpy.random.randn', 'np.random.randn', (['(N // 2)'], {}), '(N // 2)\n', (924, 932), True, 'import numpy as np\n'), ((957, 981), 'numpy.random.random', 'np.random.random', (['(N // 2)'], {}), '(N // 2)\n', (973, 981), True, 'import numpy as np\n'), ((1059, 1082), 'numpy.random.randn', 'np.random.randn', (['(N // 2)'], {}), '(N // 2)\n', (1074, 1082), True, 'import numpy as np\n'), ((1107, 1131), 'numpy.random.random', 'np.random.random', (['(N // 2)'], {}), '(N // 2)\n', (1123, 1131), True, 'import numpy as np\n'), ((1783, 1793), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (1789, 1793), True, 'import numpy as np\n'), ((1012, 1025), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1018, 1025), True, 'import numpy as np\n'), ((1034, 1047), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1040, 1047), True, 'import numpy as np\n'), ((1162, 1175), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1168, 1175), True, 'import numpy as np\n'), ((1184, 1197), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1190, 1197), True, 'import numpy as np\n'), ((1926, 1935), 'numpy.log', 'np.log', (['Y'], {}), '(Y)\n', (1932, 1935), True, 'import numpy as np\n'), ((1944, 1957), 'numpy.log', 'np.log', (['(1 - Y)'], {}), '(1 - Y)\n', (1950, 1957), True, 'import numpy as np\n'), ((2860, 2871), 'numpy.round', 'np.round', (['Y'], {}), '(Y)\n', (2868, 2871), True, 'import numpy as np\n')] |
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
# As from Tensorflow 2.6, keras is a separate package and some classes should be imported differently.
from model_compression_toolkit.core.common.target_platform import QuantizationMethod
from model_compression_toolkit.core.common.substitutions.shift_negative_activation import apply_shift_negative_correction
if tf.__version__ < "2.6":
from tensorflow.python.keras.engine.base_layer import TensorFlowOpLayer
else:
from keras.engine.base_layer import TensorFlowOpLayer
import numpy as np
from tensorflow.keras.layers import Activation, Conv2D, Dense, DepthwiseConv2D, ZeroPadding2D, Reshape, \
GlobalAveragePooling2D, Dropout, ReLU, PReLU, ELU
from typing import Tuple, Any
from model_compression_toolkit import CoreConfig, FrameworkInfo
from model_compression_toolkit.core import common
from model_compression_toolkit.core.common import BaseNode, Graph
from model_compression_toolkit.core.common.constants import FLOAT_32, DATA_TYPE
from model_compression_toolkit.core.keras.constants import NEGATIVE_SLOPE, PADDING, PAD_SAME, PAD_VALID, BIAS, USE_BIAS
from model_compression_toolkit.core.common.graph.graph_matchers import EdgeMatcher
from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, \
NodeFrameworkAttrMatcher
from model_compression_toolkit.core.keras.constants import KERNEL_SIZE, STRIDES, ACTIVATION, TRAINABLE, LAYER_NAME, SWISH, \
SELU, GELU
# Tensorflow Op layer attributes:
NODE_DEF = 'node_def'
CONSTANTS = 'constants'
# NodeDef keys constants:
NODE_NAME = 'name'
NODE_DICT_TYPES = 'attr'
NODE_INPUT = 'input'
INPUT_VARIABLE_SUFFIX = '/y'
# NodeDef operators:
NODE_OP = 'op'
NODE_ADD_OPERATOR = 'Add'
NODE_PAD_OPERATOR = 'PadV2'
# NodeDef padding input variables names:
NODE_PAD_SIZE_NAME = 'padding'
NODE_PAD_VALUE_NAME = 'constant_values'
NODE_CONSTANTS_TYPE = 'type'
NODE_CONSTANTS_DT_FLOAT = 'DT_FLOAT'
NODE_CONSTANTS_DT_INT32 = 'DT_INT32'
SHIFT_NEGATIVE_NON_LINEAR_NUM_BITS = 16
"""
This substitution aims to solve an issue of activation with negative outputs where
the portion of the negative range is relatively small. In a symmetric quantization this causes
of bit loosing as the entire negative quantization range does not contain
any values. To solve it, we shift the output of the activation by the minimal output value (quantized) such
that all values after the shifting are positive. To correct the impact of such shifting, a correction
to the next linear node is computed and added to its bias term.
If the linear node pads the input tensor with zeros, we modify the padded value as well.
"""
def shift_negative_activation_node_matchers():
# Match activation nodes with negative outputs.
snc_node = NodeOperationMatcher(tf.nn.silu) | \
NodeOperationMatcher(tf.nn.swish) | \
NodeOperationMatcher(tf.nn.leaky_relu) | \
NodeOperationMatcher(tf.nn.selu) | \
NodeOperationMatcher(tf.nn.gelu) | \
NodeOperationMatcher(tf.nn.elu) | \
(NodeOperationMatcher(Activation) & (NodeFrameworkAttrMatcher(ACTIVATION, SWISH) |
NodeFrameworkAttrMatcher(ACTIVATION, GELU) |
NodeFrameworkAttrMatcher(ACTIVATION, SELU))) | \
NodeOperationMatcher(PReLU) | \
NodeOperationMatcher(ELU) | \
(NodeOperationMatcher(ReLU) & NodeFrameworkAttrMatcher(NEGATIVE_SLOPE, 0.0).logic_not()) # Leaky ReLU
# Match linear layers where we can add a correction.
linear_node = NodeOperationMatcher(Conv2D) | \
NodeOperationMatcher(Dense) | \
NodeOperationMatcher(DepthwiseConv2D)
# Match nodes that can be in between the non-linear node to the linear node,
# and still the substitution can be applied correctly.
bypass_node = NodeOperationMatcher(Reshape) | \
NodeOperationMatcher(GlobalAveragePooling2D) | \
NodeOperationMatcher(Dropout)
# Match a pad node that can be in between the non-linear node to the linear node.
pad_node = NodeOperationMatcher(ZeroPadding2D)
return snc_node, linear_node, bypass_node, pad_node
def create_add_node(add_value: float,
prev_node_name: str,
input_shape: tuple) -> BaseNode:
"""
Create a new Add node, with a constant to add.
The name of the node is determined by its previous node's name.
Args:
add_value: Constant to add to to the node's input tensor.
prev_node_name: The name of the node before the Add node
input_shape: Shape of the Add node's input tensor.
Returns:
New Add node.
"""
add_node_name = prev_node_name + '_post_add'
fw_attr = {
LAYER_NAME: add_node_name,
TRAINABLE: False,
DATA_TYPE: FLOAT_32,
NODE_DEF: {
NODE_NAME: add_node_name,
NODE_DICT_TYPES: {'T': {NODE_CONSTANTS_TYPE: NODE_CONSTANTS_DT_FLOAT}},
NODE_OP: NODE_ADD_OPERATOR,
NODE_INPUT: [prev_node_name, add_node_name + INPUT_VARIABLE_SUFFIX],
},
CONSTANTS: {1: np.array(add_value, dtype=np.float32).reshape([1]*len(input_shape))}}
add_node = common.graph.BaseNode(add_node_name,
fw_attr,
input_shape,
input_shape,
weights={},
quantization_attr={},
layer_class=TensorFlowOpLayer)
return add_node
def create_pad_node(next_node_name: str,
prev_node_name: str,
value_to_pad: float,
input_shape: tuple,
pad_top: int,
pad_btm: int,
pad_left: int,
pad_right: int) -> BaseNode:
"""
Create a pad node with a constant value to pad its input tensor.
Args:
next_node_name: Name of the node next to the pad node.
prev_node_name: Name of the node previous to the pad node.
value_to_pad: Constant to use for padding the input of the node.
input_shape: Shape of input tensor.
pad_top: Number of elements to pad above the tensor.
pad_btm: Number of elements to pad below the tensor.
pad_left: Number of elements to pad left to the tensor.
pad_right: Number of elements to pad right to the tensor.
Returns:
A pad node which pads its input with a constant value.
"""
pad_node_name = next_node_name + '_pre_pad'
fw_attr = {LAYER_NAME: pad_node_name,
TRAINABLE: False,
DATA_TYPE: FLOAT_32,
NODE_DEF: {
NODE_NAME: pad_node_name,
NODE_OP: NODE_PAD_OPERATOR,
NODE_INPUT: [prev_node_name,
pad_node_name + f'/{NODE_PAD_SIZE_NAME}', # name of padding size variable
pad_node_name + f'/{NODE_PAD_VALUE_NAME}'],
NODE_DICT_TYPES: {'Tpaddings': {NODE_CONSTANTS_TYPE: NODE_CONSTANTS_DT_INT32},
'T': {NODE_CONSTANTS_TYPE: NODE_CONSTANTS_DT_FLOAT}}},
CONSTANTS: {1: np.array([[0, 0],
[pad_top, pad_btm],
[pad_left, pad_right],
[0, 0]], dtype=np.int32), # padding size
2: value_to_pad}} # padded value
padded_shape = list(input_shape)
padded_shape[1] += pad_top + pad_btm
padded_shape[2] += pad_left + pad_right
pad_node = common.graph.BaseNode(pad_node_name,
fw_attr,
input_shape,
tuple(padded_shape),
weights={},
quantization_attr={},
layer_class=TensorFlowOpLayer)
return pad_node
def compute_op2d_padding(op2d_node: BaseNode) -> Tuple[int, int, int, int]:
"""
Compute the padding around an input tensor of a linear node.
This is needed to replace tensorflow 'same' padding with actual number of elements to pad.
Args:
op2d_node: Node to compute the number of elements it adds when padding.
Returns:
Tuple of four numbers: number of elements to pad.
"""
sh = op2d_node.framework_attr.get(STRIDES)[0]
sw = op2d_node.framework_attr.get(STRIDES)[1]
kh = op2d_node.framework_attr.get(KERNEL_SIZE)[0]
kw = op2d_node.framework_attr.get(KERNEL_SIZE)[1]
pad_along_h = sh * op2d_node.output_shape[1] - op2d_node.input_shape[1] + kh - sh
pad_along_w = sw * op2d_node.output_shape[2] - op2d_node.input_shape[2] + kw - sw
pad_top = pad_along_h // 2
pad_btm = pad_along_h - (pad_along_h // 2)
pad_left = pad_along_w // 2
pad_right = pad_along_w - (pad_along_w // 2)
return pad_top, pad_btm, pad_left, pad_right
def get_padding_values(op2d_node) -> Tuple[Any, Any]:
"""
Args:
op2d_node: convolution type node from which to extract the padding values.
Returns:
A tuple of containing the padding attribute and padding values.
"""
padding, padding_values = None, None
if op2d_node.framework_attr.get(PADDING) == PAD_SAME and not (
op2d_node.framework_attr.get(KERNEL_SIZE)[0] == 1 and op2d_node.framework_attr.get(KERNEL_SIZE)[1] == 1):
padding = compute_op2d_padding(op2d_node)
padding_values = padding[0], padding[1], padding[2], padding[3]
op2d_node.framework_attr[PADDING] = PAD_VALID
return padding, padding_values
def is_padding_node_and_node_has_padding(pad_node_to_consider: BaseNode,
next_node: BaseNode) -> bool:
"""
Args:
pad_node_to_consider: Pad node between the non-linear and linear nodes to consider when
correcting the expected shift.
next_node: The next node after the node in check for correction.
Returns:
Whether a padding node exists and the next node is a linear node with padding.
"""
return pad_node_to_consider is not None and next_node.framework_attr.get(PADDING) == PAD_SAME
def keras_apply_shift_negative_correction(graph: Graph,
core_config: CoreConfig,
fw_info: FrameworkInfo) -> Graph:
"""
Apply shift negative correction (SNC) on a graph built from a Keras model.
Args:
graph: Graph to apply SNC on.
core_config: Quantization configuration.
fw_info: FrameworkInfo object with information about the specific framework's module.
Returns:
Graph after SNC.
"""
snc_node, linear_node, bypass_node, pad_node = shift_negative_activation_node_matchers()
return apply_shift_negative_correction(graph,
core_config,
fw_info,
snc_node,
linear_node,
bypass_node,
pad_node,
create_add_node,
get_padding_values,
create_pad_node,
is_padding_node_and_node_has_padding,
PADDING,
BIAS,
USE_BIAS
) | [
"model_compression_toolkit.core.common.graph.graph_matchers.NodeFrameworkAttrMatcher",
"numpy.array",
"model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher",
"model_compression_toolkit.core.common.graph.BaseNode",
"model_compression_toolkit.core.common.substitutions.shift_negative... | [((4868, 4903), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['ZeroPadding2D'], {}), '(ZeroPadding2D)\n', (4888, 4903), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((6009, 6149), 'model_compression_toolkit.core.common.graph.BaseNode', 'common.graph.BaseNode', (['add_node_name', 'fw_attr', 'input_shape', 'input_shape'], {'weights': '{}', 'quantization_attr': '{}', 'layer_class': 'TensorFlowOpLayer'}), '(add_node_name, fw_attr, input_shape, input_shape,\n weights={}, quantization_attr={}, layer_class=TensorFlowOpLayer)\n', (6030, 6149), False, 'from model_compression_toolkit.core import common\n'), ((11830, 12065), 'model_compression_toolkit.core.common.substitutions.shift_negative_activation.apply_shift_negative_correction', 'apply_shift_negative_correction', (['graph', 'core_config', 'fw_info', 'snc_node', 'linear_node', 'bypass_node', 'pad_node', 'create_add_node', 'get_padding_values', 'create_pad_node', 'is_padding_node_and_node_has_padding', 'PADDING', 'BIAS', 'USE_BIAS'], {}), '(graph, core_config, fw_info, snc_node,\n linear_node, bypass_node, pad_node, create_add_node, get_padding_values,\n create_pad_node, is_padding_node_and_node_has_padding, PADDING, BIAS,\n USE_BIAS)\n', (11861, 12065), False, 'from model_compression_toolkit.core.common.substitutions.shift_negative_activation import apply_shift_negative_correction\n'), ((4420, 4457), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['DepthwiseConv2D'], {}), '(DepthwiseConv2D)\n', (4440, 4457), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((4736, 4765), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['Dropout'], {}), '(Dropout)\n', (4756, 4765), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((4095, 4120), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['ELU'], {}), '(ELU)\n', (4115, 4120), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((4141, 4167), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['ReLU'], {}), '(ReLU)\n', (4161, 4167), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((4319, 4347), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['Conv2D'], {}), '(Conv2D)\n', (4339, 4347), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((4370, 4397), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['Dense'], {}), '(Dense)\n', (4390, 4397), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((4617, 4646), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['Reshape'], {}), '(Reshape)\n', (4637, 4646), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((4669, 4713), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['GlobalAveragePooling2D'], {}), '(GlobalAveragePooling2D)\n', (4689, 4713), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((8110, 8200), 'numpy.array', 'np.array', (['[[0, 0], [pad_top, pad_btm], [pad_left, pad_right], [0, 0]]'], {'dtype': 'np.int32'}), '([[0, 0], [pad_top, pad_btm], [pad_left, pad_right], [0, 0]], dtype\n =np.int32)\n', (8118, 8200), True, 'import numpy as np\n'), ((4048, 4075), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['PReLU'], {}), '(PReLU)\n', (4068, 4075), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((4170, 4215), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeFrameworkAttrMatcher', 'NodeFrameworkAttrMatcher', (['NEGATIVE_SLOPE', '(0.0)'], {}), '(NEGATIVE_SLOPE, 0.0)\n', (4194, 4215), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((5923, 5960), 'numpy.array', 'np.array', (['add_value'], {'dtype': 'np.float32'}), '(add_value, dtype=np.float32)\n', (5931, 5960), True, 'import numpy as np\n'), ((3701, 3732), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['tf.nn.elu'], {}), '(tf.nn.elu)\n', (3721, 3732), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((3753, 3785), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['Activation'], {}), '(Activation)\n', (3773, 3785), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((3649, 3681), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['tf.nn.gelu'], {}), '(tf.nn.gelu)\n', (3669, 3681), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((3984, 4026), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeFrameworkAttrMatcher', 'NodeFrameworkAttrMatcher', (['ACTIVATION', 'SELU'], {}), '(ACTIVATION, SELU)\n', (4008, 4026), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((3597, 3629), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['tf.nn.selu'], {}), '(tf.nn.selu)\n', (3617, 3629), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((3789, 3832), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeFrameworkAttrMatcher', 'NodeFrameworkAttrMatcher', (['ACTIVATION', 'SWISH'], {}), '(ACTIVATION, SWISH)\n', (3813, 3832), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((3887, 3929), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeFrameworkAttrMatcher', 'NodeFrameworkAttrMatcher', (['ACTIVATION', 'GELU'], {}), '(ACTIVATION, GELU)\n', (3911, 3929), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((3539, 3577), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['tf.nn.leaky_relu'], {}), '(tf.nn.leaky_relu)\n', (3559, 3577), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((3434, 3466), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['tf.nn.silu'], {}), '(tf.nn.silu)\n', (3454, 3466), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n'), ((3486, 3519), 'model_compression_toolkit.core.common.graph.graph_matchers.NodeOperationMatcher', 'NodeOperationMatcher', (['tf.nn.swish'], {}), '(tf.nn.swish)\n', (3506, 3519), False, 'from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, NodeFrameworkAttrMatcher\n')] |
import numpy as np
import pandas as pd
l_2d = [[0, 1, 2], [3, 4, 5]]
arr_t = np.array(l_2d).T
print(arr_t)
print(type(arr_t))
# [[0 3]
# [1 4]
# [2 5]]
# <class 'numpy.ndarray'>
l_2d_t = np.array(l_2d).T.tolist()
print(l_2d_t)
print(type(l_2d_t))
# [[0, 3], [1, 4], [2, 5]]
# <class 'list'>
df_t = pd.DataFrame(l_2d).T
print(df_t)
print(type(df_t))
# 0 1
# 0 0 3
# 1 1 4
# 2 2 5
# <class 'pandas.core.frame.DataFrame'>
l_2d_t = pd.DataFrame(l_2d).T.values.tolist()
print(l_2d_t)
print(type(l_2d_t))
# [[0, 3], [1, 4], [2, 5]]
# <class 'list'>
l_2d_t_tuple = list(zip(*l_2d))
print(l_2d_t_tuple)
print(type(l_2d_t_tuple))
# [(0, 3), (1, 4), (2, 5)]
# <class 'list'>
print(l_2d_t_tuple[0])
print(type(l_2d_t_tuple[0]))
# (0, 3)
# <class 'tuple'>
l_2d_t = [list(x) for x in zip(*l_2d)]
print(l_2d_t)
print(type(l_2d_t))
# [[0, 3], [1, 4], [2, 5]]
# <class 'list'>
print(l_2d_t[0])
print(type(l_2d_t[0]))
# [0, 3]
# <class 'list'>
print(*l_2d)
# [0, 1, 2] [3, 4, 5]
print(list(zip([0, 1, 2], [3, 4, 5])))
# [(0, 3), (1, 4), (2, 5)]
print([list(x) for x in [(0, 3), (1, 4), (2, 5)]])
# [[0, 3], [1, 4], [2, 5]]
| [
"pandas.DataFrame",
"numpy.array"
] | [((79, 93), 'numpy.array', 'np.array', (['l_2d'], {}), '(l_2d)\n', (87, 93), True, 'import numpy as np\n'), ((306, 324), 'pandas.DataFrame', 'pd.DataFrame', (['l_2d'], {}), '(l_2d)\n', (318, 324), True, 'import pandas as pd\n'), ((193, 207), 'numpy.array', 'np.array', (['l_2d'], {}), '(l_2d)\n', (201, 207), True, 'import numpy as np\n'), ((448, 466), 'pandas.DataFrame', 'pd.DataFrame', (['l_2d'], {}), '(l_2d)\n', (460, 466), True, 'import pandas as pd\n')] |
import torch
import shapely
from shapely.geometry import Polygon
import numpy as np
from .transformer_obb import poly2bbox
from .bbox_overlaps_cython import bbox_overlaps_cython
import DOTA_devkit.polyiou as polyiou
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate overlap between two set of bboxes.
If ``is_aligned`` is ``False``, then calculate the ious between each bbox
of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (m, 4)
bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n
must be equal.
mode (str): "iou" (intersection over union) or iof (intersection over
foreground).
Returns:
ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
"""
assert mode in ['iou', 'iof']
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if is_aligned:
assert rows == cols
if rows * cols == 0:
return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)
if is_aligned:
lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]
rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, 2]
overlap = wh[:, 0] * wh[:, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1 + area2 - overlap)
else:
ious = overlap / area1
else:
lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2]
rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2]
overlap = wh[:, :, 0] * wh[:, :, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1[:, None] + area2 - overlap)
else:
ious = overlap / (area1[:, None])
return ious
def rbbox_overlap_single(rbbox1, rbbox2):
"""
:param rbbox1: Tensor (1, 8)
:param rbbox2: Tensor (1, 8)
:return: iou
"""
g = rbbox1.cpu().numpy()
p = rbbox2.cpu().detach().numpy()
g = Polygon(g[:8].reshape(4, 2))
p = Polygon(p[:8].reshape(4, 2))
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
union = g.area + p.area - inter
if union == 0 :
return 0
else :
return inter/union
def rbbox_overlaps(rbboxes1, rbboxes2):
"""Calculate overlap between two set of rbboxes.
Args:
rbboxes1 (Tensor): shape (m, 8)
rbboxes2 (Tensor): shape (n, 8), if is_aligned is ``True``, then m and n
must be equal.
Returns:
ious(Tensor): shape (m, n)
"""
rows = rbboxes1.size(0)
cols = rbboxes2.size(0)
if rows * cols == 0:
return rbboxes1.new(rows, cols)
ious = rbboxes1.new(rows, cols)
for i in range(rows):
for j in range(cols):
ious[i, j] = rbbox_overlap_single(rbboxes1[i, :], rbboxes2[j, :])
return ious
def rbbox_overlaps_cy_warp(rbboxes, query_boxes):
# TODO: first calculate the hbb overlaps, for overlaps > 0, calculate the obb overlaps
# import pdb
# pdb.set_trace()
box_device = query_boxes.device
query_polys_np = query_boxes.cpu().numpy().astype(np.float)
# polys_np = RotBox2Polys(boxes_np)
# TODO: change it to only use pos gt_masks
# polys_np = mask2poly(gt_masks)
# polys_np = np.array(Tuplelist2Polylist(polys_np)).astype(np.float)
polys_np = rbboxes.cpu().numpy().astype(np.float)
h_bboxes_np = poly2bbox(polys_np)
h_query_bboxes_np = poly2bbox(query_polys_np)
# hious
ious = bbox_overlaps_cython(h_bboxes_np, h_query_bboxes_np)
# import pdb
# pdb.set_trace()
inds = np.where(ious > 0)
for index in range(len(inds[0])):
box_index = inds[0][index]
query_box_index = inds[1][index]
box = polys_np[box_index]
query_box = query_polys_np[query_box_index]
# calculate obb iou
# import pdb
# pdb.set_trace()
overlap = polyiou.iou_poly(polyiou.VectorDouble(box), polyiou.VectorDouble(query_box))
ious[box_index][query_box_index] = overlap
return torch.from_numpy(ious).to(box_device) | [
"DOTA_devkit.polyiou.VectorDouble",
"numpy.where",
"torch.max",
"torch.from_numpy",
"torch.min",
"shapely.geometry.Polygon"
] | [((4246, 4264), 'numpy.where', 'np.where', (['(ious > 0)'], {}), '(ious > 0)\n', (4254, 4264), True, 'import numpy as np\n'), ((1150, 1191), 'torch.max', 'torch.max', (['bboxes1[:, :2]', 'bboxes2[:, :2]'], {}), '(bboxes1[:, :2], bboxes2[:, :2])\n', (1159, 1191), False, 'import torch\n'), ((1218, 1259), 'torch.min', 'torch.min', (['bboxes1[:, 2:]', 'bboxes2[:, 2:]'], {}), '(bboxes1[:, 2:], bboxes2[:, 2:])\n', (1227, 1259), False, 'import torch\n'), ((1733, 1780), 'torch.max', 'torch.max', (['bboxes1[:, None, :2]', 'bboxes2[:, :2]'], {}), '(bboxes1[:, None, :2], bboxes2[:, :2])\n', (1742, 1780), False, 'import torch\n'), ((1813, 1860), 'torch.min', 'torch.min', (['bboxes1[:, None, 2:]', 'bboxes2[:, 2:]'], {}), '(bboxes1[:, None, 2:], bboxes2[:, 2:])\n', (1822, 1860), False, 'import torch\n'), ((2743, 2753), 'shapely.geometry.Polygon', 'Polygon', (['p'], {}), '(p)\n', (2750, 2753), False, 'from shapely.geometry import Polygon\n'), ((4577, 4602), 'DOTA_devkit.polyiou.VectorDouble', 'polyiou.VectorDouble', (['box'], {}), '(box)\n', (4597, 4602), True, 'import DOTA_devkit.polyiou as polyiou\n'), ((4604, 4635), 'DOTA_devkit.polyiou.VectorDouble', 'polyiou.VectorDouble', (['query_box'], {}), '(query_box)\n', (4624, 4635), True, 'import DOTA_devkit.polyiou as polyiou\n'), ((4700, 4722), 'torch.from_numpy', 'torch.from_numpy', (['ious'], {}), '(ious)\n', (4716, 4722), False, 'import torch\n'), ((2719, 2729), 'shapely.geometry.Polygon', 'Polygon', (['g'], {}), '(g)\n', (2726, 2729), False, 'from shapely.geometry import Polygon\n')] |
import os
import imageio
import numpy as np
from skimage.transform import resize
import tensorflow as tf
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.layers import Conv2D, Activation, Concatenate
# from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
import matplotlib.pyplot as plt
class InstanceNormalization(tf.keras.layers.Layer):
# Initialization of Objects
def __init__(self, epsilon=1e-5):
# calling parent's init
super(InstanceNormalization, self).__init__()
self.epsilon = epsilon
def build(self, input_shape):
self.scale = self.add_weight(
name='scale',
shape=input_shape[-1:],
initializer=tf.random_normal_initializer(1., 0.02),
trainable=True)
self.offset = self.add_weight(
name='offset',
shape=input_shape[-1:],
initializer='zeros',
trainable=True)
def call(self, x):
# Compute Mean and Variance, Axes=[1,2] ensures Instance Normalization
mean, variance = tf.nn.moments(x, axes=[1, 2], keepdims=True)
inv = tf.math.rsqrt(variance + self.epsilon)
normalized = (x - mean) * inv
return self.scale * normalized + self.offset
class DataUtils:
def __init__(self, sourcePath, resize):
'''
:param sourcePath: File path to data source
'''
self.data = None
self.sourcePath = sourcePath
self.resize = resize
self.imageSize = None
self.prepareData()
def prepareData(self):
self.readFiles()
self.dataPreprocess()
def readFiles(self):
'''
function to read the data files from the path specified
:return: the list of data elements in the form of numpy array
'''
# check if the path is valid or not
if os.path.isdir(self.sourcePath):
self.data = [imageio.imread(os.path.join(self.sourcePath, f)) for f in os.listdir(self.sourcePath)
if f.endswith(('.jpeg', '.jpg', '.png'))]
self.imageSize = self.data[-1].shape
if len(self.imageSize) != len(self.resize):
raise Exception("Size mismatch!!")
else:
raise Exception("Path Invalid")
def dataPreprocess(self):
# normalize the data between -1 to +1
normalized_data = (np.asarray(self.data, dtype=np.float32) / 127.5) - 1
if len(self.imageSize) == 2:
self.resize = (self.resize[0], self.resize[1], 1)
self.imageSize = (self.imageSize[0], self.imageSize[1], 1)
normalized_data.reshape((normalized_data.shape[0],self.imageSize[0],self.imageSize[1], self.imageSize[2]))
final_shape = (normalized_data.shape[0], self.resize[0], self.resize[1], self.resize[2])
self.data = np.zeros(final_shape, dtype=np.float32)
for index, img in enumerate(normalized_data):
self.data[index, :, :,:] = resize(img, self.resize)
def get_data(self, batch_size):
# batch and shuffle the data
return tf.data.Dataset.from_tensor_slices(self.data).shuffle(self.data.shape[0], seed=42).batch(batch_size)
def resnet_block(n_filters, input_layer):
# weight initialization
init = RandomNormal(stddev=0.02)
# first layer convolutional layer
g = Conv2D(n_filters, (3, 3), padding='same', kernel_initializer=init)(input_layer)
g = InstanceNormalization()(g)
g = Activation('relu')(g)
# second convolutional layer
g = Conv2D(n_filters, (3, 3), padding='same', kernel_initializer=init)(g)
g = InstanceNormalization()(g)
# concatenate merge channel-wise with input layer
g = Concatenate()([g, input_layer])
return g
| [
"os.listdir",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.layers.Concatenate",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.nn.moments",
"numpy.asarray",
"os.path.join",
"tensorflow.random_normal_initializer",
"numpy.zeros",
"ten... | [((3435, 3460), 'tensorflow.keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (3447, 3460), False, 'from tensorflow.keras.initializers import RandomNormal\n'), ((1142, 1186), 'tensorflow.nn.moments', 'tf.nn.moments', (['x'], {'axes': '[1, 2]', 'keepdims': '(True)'}), '(x, axes=[1, 2], keepdims=True)\n', (1155, 1186), True, 'import tensorflow as tf\n'), ((1202, 1240), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['(variance + self.epsilon)'], {}), '(variance + self.epsilon)\n', (1215, 1240), True, 'import tensorflow as tf\n'), ((1971, 2001), 'os.path.isdir', 'os.path.isdir', (['self.sourcePath'], {}), '(self.sourcePath)\n', (1984, 2001), False, 'import os\n'), ((2989, 3028), 'numpy.zeros', 'np.zeros', (['final_shape'], {'dtype': 'np.float32'}), '(final_shape, dtype=np.float32)\n', (2997, 3028), True, 'import numpy as np\n'), ((3509, 3575), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['n_filters', '(3, 3)'], {'padding': '"""same"""', 'kernel_initializer': 'init'}), "(n_filters, (3, 3), padding='same', kernel_initializer=init)\n", (3515, 3575), False, 'from tensorflow.keras.layers import Conv2D, Activation, Concatenate\n'), ((3634, 3652), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3644, 3652), False, 'from tensorflow.keras.layers import Conv2D, Activation, Concatenate\n'), ((3699, 3765), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['n_filters', '(3, 3)'], {'padding': '"""same"""', 'kernel_initializer': 'init'}), "(n_filters, (3, 3), padding='same', kernel_initializer=init)\n", (3705, 3765), False, 'from tensorflow.keras.layers import Conv2D, Activation, Concatenate\n'), ((3869, 3882), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (3880, 3882), False, 'from tensorflow.keras.layers import Conv2D, Activation, Concatenate\n'), ((3124, 3148), 'skimage.transform.resize', 'resize', (['img', 'self.resize'], {}), '(img, self.resize)\n', (3130, 3148), False, 'from skimage.transform import resize\n'), ((773, 812), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(1.0)', '(0.02)'], {}), '(1.0, 0.02)\n', (801, 812), True, 'import tensorflow as tf\n'), ((2516, 2555), 'numpy.asarray', 'np.asarray', (['self.data'], {'dtype': 'np.float32'}), '(self.data, dtype=np.float32)\n', (2526, 2555), True, 'import numpy as np\n'), ((2044, 2076), 'os.path.join', 'os.path.join', (['self.sourcePath', 'f'], {}), '(self.sourcePath, f)\n', (2056, 2076), False, 'import os\n'), ((2087, 2114), 'os.listdir', 'os.listdir', (['self.sourcePath'], {}), '(self.sourcePath)\n', (2097, 2114), False, 'import os\n'), ((3244, 3289), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['self.data'], {}), '(self.data)\n', (3278, 3289), True, 'import tensorflow as tf\n')] |
import numpy as np
def to_2darray(x: np.array, copy: bool = True, trans: bool = False,
flip: bool = False) -> np.array:
"""
Assumption:
-----------
x is assumed to be numpy 2D array or matrix.
(please convert x accordingly). For example,
x = nptweak.to_2darray(x)
The newest or most recent observation is the
last row. The oldest observation is stored in
the first row. For example,
x = nptweak.to_2darray(x, flip=True)
"""
if copy:
y = x.copy()
else:
y = x
# convert to 1D to 2D array
if len(y.shape) == 1:
y = y.reshape(-1, 1)
# transpose the matrix
if trans:
y = y.T
# flip matrix upside down
if flip:
y = np.flipud(y)
# done
return y
| [
"numpy.flipud"
] | [((757, 769), 'numpy.flipud', 'np.flipud', (['y'], {}), '(y)\n', (766, 769), True, 'import numpy as np\n')] |
#!/usr/bin/python
#-*- coding:utf-8 -*-
__author__ = 'david'
import numpy as np
import nibabel as nib
import resources as rs
from vispy import app
from plot import Canvas
import matplotlib.pyplot as plt
import gc
np.random.seed()
class Clarity(object):
def __init__(self,token,imgfile=None,pointsfile=None):
if token not in rs.TOKENS:
raise ValueError("Token %s not found."%(token))
self._token = token
self._imgfile = imgfile
self._pointsfile = pointsfile
self._img = None # img data
self._points = None # [[x],[y],[z],[v]]
self._shape = None # (x,y,z)
self._max = None # max value
def loadImg(self, path=None, info=False):
if path is None:
path = rs.RAW_DATA_PATH
pathname = path+self._token+".img"
img = nib.load(pathname)
if info:
print(img)
self._img = img.get_data()[:,:,:,0]
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(pathname))
return self
def loadEqImg(self, path=None, info=False):
if path is None:
path = rs.RAW_DATA_PATH
pathname = path+self._token+".nii"
img = nib.load(pathname)
if info:
print(img)
self._img = img.get_data()[:,:,:,0]
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(pathname))
return self
def getShape(self):
return self._shape
def getMax(self):
return self._max
def discardImg(self):
del self._img
gc.collect()
return self
def getHistogram(self,bins,range,density=True):
if self._img is None:
raise ValueError("Img haven't loaded, please call loadImg() first.")
return np.histogram(self._img.flatten(), bins=bins, range=range, density=density)
def imgToPoints(self, threshold=0.1, sample=0.5, optimize=True):
if not 0 <= threshold < 1:
raise ValueError("Threshold should be within [0,1).")
if not 0 < sample <= 1:
raise ValueError("Sample rate should be within (0,1].")
if self._img is None:
raise ValueError("Img haven't loaded, please call loadImg() first.")
total = self._shape[0]*self._shape[1]*self._shape[2]
print("Coverting to points...\ntoken=%s\ntotal=%d\nmax=%f\nthreshold=%f\nsample=%f"\
%(self._token,total,self._max,threshold,sample))
print("(This will take couple minutes)")
# threshold
filt = self._img > threshold * self._max
x, y, z = np.where(filt)
v = self._img[filt]
if optimize:
self.discardImg()
v = np.int16(255*(np.float32(v)/np.float32(self._max)))
l = v.shape
print("Above threshold=%d"%(l))
# sample
if sample < 1.0:
filt = np.random.random(size=l) < sample
x = x[filt]
y = y[filt]
z = z[filt]
v = v[filt]
self._points = np.vstack([x,y,z,v])
self._points = np.transpose(self._points)
print("Samples=%d"%(self._points.shape[0]))
print("Finished")
return self
def loadPoints(self,path=None):
if path is None:
path = rs.POINTS_DATA_PATH
pathname = path+self._token+".csv"
self._points = np.loadtxt(pathname,dtype=np.int16,delimiter=',')
print("Points Loaded: %s"%(pathname))
return self
def savePoints(self,path=None):
if self._points is None:
raise ValueError("Points is empty, please call imgToPoints() first.")
if path is None:
path = rs.POINTS_DATA_PATH
pathname = path+self._token+".csv"
np.savetxt(pathname,self._points,fmt='%d',delimiter=',')
return self
def centralize(self):
# Centralize the data
# use mean or median
centerX = np.mean(self._points[:,0])
centerY = np.mean(self._points[:,1])
centerZ = np.mean(self._points[:,2])
self._points[:,0] -= np.int16(centerX)
self._points[:,1] -= np.int16(centerY)
self._points[:,2] -= np.int16(centerZ)
return self
def histogramEqualize(self,scale=30):
# get image histogram
imhist, bins = np.histogram(self._points[:,3],256,density=True)
cdf = imhist.cumsum() # cumulative distribution function
cdf = scale * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
return np.interp(self._points[:,3],bins[:-1],cdf)
def showHistogram(self,bins=255):
plt.hist(self._points[:,3],bins=bins)
plt.title("%s Points Histogram"%(self._token))
plt.ylabel("count")
plt.xlabel("level")
plt.grid()
plt.show()
def show(self):
if self._points is None:
raise ValueError("Points is empty, please call imgToPoints() first.")
# centralize
self.centralize()
# colors
colors = np.array(np.abs(self._points[:,:3]),dtype=np.float32)
mx = np.max(colors[:,0])
my = np.max(colors[:,1])
mz = np.max(colors[:,2])
brighter = 0.1
colors[:,0]/=mx+brighter
colors[:,1]/=my+brighter
colors[:,2]/=mz+brighter
alpha = np.empty((len(colors[:,0]),1))
alpha.fill(0.8)
colors = np.hstack([colors,alpha])
# sizes
sizes = self.histogramEqualize()
# visualize
c = Canvas(self._points[:,:3],colors,sizes)
app.run()
if __name__ == '__main__':
pass
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"nibabel.load",
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"numpy.mean",
"numpy.histogram",
"numpy.where",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"numpy.max",
"numpy.random.seed",
"numpy.vstack",
"numpy.abs",
"numpy.int16",... | [((215, 231), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (229, 231), True, 'import numpy as np\n'), ((844, 862), 'nibabel.load', 'nib.load', (['pathname'], {}), '(pathname)\n', (852, 862), True, 'import nibabel as nib\n'), ((1005, 1022), 'numpy.max', 'np.max', (['self._img'], {}), '(self._img)\n', (1011, 1022), True, 'import numpy as np\n'), ((1256, 1274), 'nibabel.load', 'nib.load', (['pathname'], {}), '(pathname)\n', (1264, 1274), True, 'import nibabel as nib\n'), ((1417, 1434), 'numpy.max', 'np.max', (['self._img'], {}), '(self._img)\n', (1423, 1434), True, 'import numpy as np\n'), ((1657, 1669), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1667, 1669), False, 'import gc\n'), ((2682, 2696), 'numpy.where', 'np.where', (['filt'], {}), '(filt)\n', (2690, 2696), True, 'import numpy as np\n'), ((3114, 3137), 'numpy.vstack', 'np.vstack', (['[x, y, z, v]'], {}), '([x, y, z, v])\n', (3123, 3137), True, 'import numpy as np\n'), ((3158, 3184), 'numpy.transpose', 'np.transpose', (['self._points'], {}), '(self._points)\n', (3170, 3184), True, 'import numpy as np\n'), ((3450, 3501), 'numpy.loadtxt', 'np.loadtxt', (['pathname'], {'dtype': 'np.int16', 'delimiter': '""","""'}), "(pathname, dtype=np.int16, delimiter=',')\n", (3460, 3501), True, 'import numpy as np\n'), ((3834, 3893), 'numpy.savetxt', 'np.savetxt', (['pathname', 'self._points'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "(pathname, self._points, fmt='%d', delimiter=',')\n", (3844, 3893), True, 'import numpy as np\n'), ((4015, 4042), 'numpy.mean', 'np.mean', (['self._points[:, 0]'], {}), '(self._points[:, 0])\n', (4022, 4042), True, 'import numpy as np\n'), ((4060, 4087), 'numpy.mean', 'np.mean', (['self._points[:, 1]'], {}), '(self._points[:, 1])\n', (4067, 4087), True, 'import numpy as np\n'), ((4105, 4132), 'numpy.mean', 'np.mean', (['self._points[:, 2]'], {}), '(self._points[:, 2])\n', (4112, 4132), True, 'import numpy as np\n'), ((4161, 4178), 'numpy.int16', 'np.int16', (['centerX'], {}), '(centerX)\n', (4169, 4178), True, 'import numpy as np\n'), ((4208, 4225), 'numpy.int16', 'np.int16', (['centerY'], {}), '(centerY)\n', (4216, 4225), True, 'import numpy as np\n'), ((4255, 4272), 'numpy.int16', 'np.int16', (['centerZ'], {}), '(centerZ)\n', (4263, 4272), True, 'import numpy as np\n'), ((4389, 4440), 'numpy.histogram', 'np.histogram', (['self._points[:, 3]', '(256)'], {'density': '(True)'}), '(self._points[:, 3], 256, density=True)\n', (4401, 4440), True, 'import numpy as np\n'), ((4643, 4688), 'numpy.interp', 'np.interp', (['self._points[:, 3]', 'bins[:-1]', 'cdf'], {}), '(self._points[:, 3], bins[:-1], cdf)\n', (4652, 4688), True, 'import numpy as np\n'), ((4733, 4772), 'matplotlib.pyplot.hist', 'plt.hist', (['self._points[:, 3]'], {'bins': 'bins'}), '(self._points[:, 3], bins=bins)\n', (4741, 4772), True, 'import matplotlib.pyplot as plt\n'), ((4779, 4825), 'matplotlib.pyplot.title', 'plt.title', (["('%s Points Histogram' % self._token)"], {}), "('%s Points Histogram' % self._token)\n", (4788, 4825), True, 'import matplotlib.pyplot as plt\n'), ((4834, 4853), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""count"""'], {}), "('count')\n", (4844, 4853), True, 'import matplotlib.pyplot as plt\n'), ((4862, 4881), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""level"""'], {}), "('level')\n", (4872, 4881), True, 'import matplotlib.pyplot as plt\n'), ((4890, 4900), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4898, 4900), True, 'import matplotlib.pyplot as plt\n'), ((4909, 4919), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4917, 4919), True, 'import matplotlib.pyplot as plt\n'), ((5205, 5225), 'numpy.max', 'np.max', (['colors[:, 0]'], {}), '(colors[:, 0])\n', (5211, 5225), True, 'import numpy as np\n'), ((5238, 5258), 'numpy.max', 'np.max', (['colors[:, 1]'], {}), '(colors[:, 1])\n', (5244, 5258), True, 'import numpy as np\n'), ((5271, 5291), 'numpy.max', 'np.max', (['colors[:, 2]'], {}), '(colors[:, 2])\n', (5277, 5291), True, 'import numpy as np\n'), ((5503, 5529), 'numpy.hstack', 'np.hstack', (['[colors, alpha]'], {}), '([colors, alpha])\n', (5512, 5529), True, 'import numpy as np\n'), ((5619, 5661), 'plot.Canvas', 'Canvas', (['self._points[:, :3]', 'colors', 'sizes'], {}), '(self._points[:, :3], colors, sizes)\n', (5625, 5661), False, 'from plot import Canvas\n'), ((5667, 5676), 'vispy.app.run', 'app.run', ([], {}), '()\n', (5674, 5676), False, 'from vispy import app\n'), ((5147, 5174), 'numpy.abs', 'np.abs', (['self._points[:, :3]'], {}), '(self._points[:, :3])\n', (5153, 5174), True, 'import numpy as np\n'), ((2961, 2985), 'numpy.random.random', 'np.random.random', ([], {'size': 'l'}), '(size=l)\n', (2977, 2985), True, 'import numpy as np\n'), ((2802, 2815), 'numpy.float32', 'np.float32', (['v'], {}), '(v)\n', (2812, 2815), True, 'import numpy as np\n'), ((2816, 2837), 'numpy.float32', 'np.float32', (['self._max'], {}), '(self._max)\n', (2826, 2837), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import netCDF4 as nc
import pandas as pd
import multiprocessing
import textwrap
import matplotlib.pyplot as plt
import lhsmdu
import glob
import json
import os
import ast
import shutil
import subprocess
from contextlib import contextmanager
import param_util as pu
import output_utils as ou
@contextmanager
def log_wrapper(message,tag=''):
'''
Likely will abandon or repurpose this function.
Not super helpful as a log printer.'''
print('[SA:{}] {}'.format(tag, message))
try:
yield
finally:
print()
def generate_uniform(N, param_props):
'''
Generate sample matrix using uniform method.
Sample matrix will have one row for each "sample" of the
parameters. There will be one column for each parameter in
the `param_props` list.
Parameters
----------
N : int
number of samples (rows) to create
param_props : list of dicts
Each item in `param_props` list will be a dictionary
with at least the following:
>>> param_props = {
... 'name': 'rhq10', # name in dvmdostem parameter file (cmt_*.txt)
... 'bounds': [5.2, 6.4], # the min and max values the parameter can have
... }
Returns
-------
df : pandas.DataFrame, shape (N, len(param_props))
There will be one column for each parameter in the
`param_props` list and N rows (samples).
'''
print(param_props)
l = np.random.uniform(size=(N, len(param_props)))
# Generate bounds, based on specification in params list
lows = np.array([p['bounds'][0] for p in param_props])
highs = np.array([p['bounds'][1] for p in param_props])
# Figure out the spread, or difference between bounds
spreads = highs - lows
sm = l * spreads + lows
return pd.DataFrame(sm, columns=[p['name'] for p in param_props])
def generate_lhc(N, param_props):
'''
Generate sample matrix using Latin Hyper Cube method.
Sample matrix will have one row for each "sample" of the
parameters. There will be one column for each parameter in
the `param_props` list.
Parameters
----------
N : int
number of samples (rows) to create
param_props : list of dicts
Each item in `param_props` list will be a dictionary
with at least the following:
>>> param_props = {
... 'name': 'cmax', # name in dvmdostem parameter file (cmt_*.txt)
... 'bounds': [100.1, 105.1], # the min and max values the parameter can have
... }
Returns
-------
df : pandas.DataFrame, shape (N, len(param_props))
There will be one column for each parameter in the
`param_props` list and N rows (samples).
'''
# Generate bounds, based on specification in params list
lo_bounds = np.array([p['bounds'][0] for p in param_props])
hi_bounds = np.array([p['bounds'][1] for p in param_props])
# Figure out the spread, or difference between bounds
spreads = hi_bounds - lo_bounds
# ??
l = lhsmdu.sample(len(param_props), N)
# ??
l = lhsmdu.resample().T
# ??
mat_diff = np.diag(spreads)
# ??
sample_matrix = l * mat_diff + lo_bounds
return pd.DataFrame(sample_matrix, columns=[p['name'] for p in param_props])
class SensitivityDriver(object):
'''
Sensitivity Analysis Driver class.
Driver class for conducting dvmdostem SensitivityAnalysis.
Methods for cleaning, setup, running model, collecting outputs.
Basic overview of use is like this:
1. Instantiate driver object.
2. Setup/design the experiment (parameters, to use,
number of samples, etc)
3. Use driver object to setup the run folders.
4. Use driver object to carry out model runs.
5. Use driver object to summarize/collect outputs.
6. Use driver object to make plots, do analysis.
Parameters
----------
See Also
--------
Examples
--------
Instantiate object, sets pixel, outputs, working directory,
site selection (input data path)
>>> driver = SensitivityDriver()
Show info about the driver object:
>>> driver.design_experiment(5, 4, params=['cmax','rhq10','nfall(1)'], pftnums=[2,None,2])
>>> driver.sample_matrix
cmax rhq10 nfall(1)
0 63.536594 1.919504 0.000162
1 62.528847 2.161819 0.000159
2 67.606747 1.834203 0.000145
3 59.671967 2.042034 0.000171
4 57.711999 1.968631 0.000155
'''
def __init__(self, clean=False):
'''
Constructor
Hard code a bunch of stuff for now...
'''
# Made this one private because I don't want it to get confused with
# the later params directories that will be created in each run folder.
self.__initial_params = '/work/parameters'
self.work_dir = '/data/workflows/sensitivity_analysis'
self.site = '/data/input-catalog/cru-ts40_ar5_rcp85_ncar-ccsm4_CALM_Toolik_LTER_10x10/'
self.PXx = 0
self.PXy = 0
self.outputs = [
{ 'name': 'GPP', 'type': 'flux',},
{ 'name': 'VEGC','type': 'pool',},
]
if not os.path.isdir(self.work_dir):
os.mkdir(self.work_dir)
if clean:
self.clean()
def get_initial_params_dir(self):
'''Read only accessor to private member variable.'''
return self.__initial_params
def design_experiment(self, Nsamples, cmtnum, params, pftnums,
percent_diffs=None, sampling_method='lhc'):
'''
Builds bounds based on initial values found in dvmdostem parameter
files (cmt_*.txt files) and the `percent_diffs` array.
The `percent_diffs` array gets used to figure out how far
the bounds should be from the initial value. Defaults to initial
value +/-10%.
Sets instance values for `self.params` and `self.sample_matrix`.
Parameters
----------
Nsamples : int
How many samples to draw. One sample equates to one run to be done with
the parameter values in the sample.
cmtnum : int
Which community type number to use for initial parameter values, for
doing runs and analyzing outputs.
params : list of strings
List of parameter names to use in the experiment. Each name must be
in one of the dvmdostem parameter files (cmt_*.txt).
pftnums : list of ints
List of PFT numbers, one number for each parameter in `params`. Use
`None` in the list for any non-pft parameter (i.e. a soil parameter).
percent_diffs: list of floats
List values, one for each parameter in `params`. The value is used to
the bounds with respect to the intial parameter value. I.e. passing
a value in the percent_diff array of .3 would mean that bounds should
be +/-30% of the initial value of the parameter.
Returns
-------
None
'''
if not percent_diffs:
percent_diffs = np.ones(len(params)) * 0.1 # use 10% for default perturbation
assert len(params) == len(pftnums), "params list and pftnums list must be same length!"
assert len(params) == len(percent_diffs), "params list and percent_diffs list must be same length"
self.params = []
plu = pu.build_param_lookup(self.__initial_params)
for pname, pftnum, perturbation in zip(params, pftnums, percent_diffs):
original_pdata_file = pu.which_file(self.__initial_params, pname, lookup_struct=plu)
p_db = pu.get_CMT_datablock(original_pdata_file, cmtnum)
p_dd = pu.cmtdatablock2dict(p_db)
if pname in p_dd.keys():
p_initial = p_dd[pname]
else:
p_initial = p_dd['pft{}'.format(pftnum)][pname]
p_bounds = [p_initial - (p_initial*perturbation), p_initial + (p_initial*perturbation)]
self.params.append(dict(name=pname, bounds=p_bounds, initial=p_initial, cmtnum=cmtnum, pftnum=pftnum))
if sampling_method == 'lhc':
self.sample_matrix = generate_lhc(Nsamples, self.params)
elif sampling_method == 'uniform':
self.sample_matrix = self.generate_uniform(Nsamples, self.params)
def save_experiment(self, name=''):
'''Write the parameter properties and sensitivity matrix to files.'''
if name == '':
sm_fname = os.path.join(self.work_dir, 'sample_matrix.csv')
pp_fname = os.path.join(self.work_dir, 'param_props.csv')
else:
sm_fname = "{}_sample_matrix.csv".format(name)
pp_fname = '{}_param_props.csv'.format(name)
self.sample_matrix.to_csv(sm_fname, index=False)
pd.DataFrame(self.params).to_csv(pp_fname, index=False)
def load_experiment(self, param_props_path, sample_matrix_path):
'''Load parameter properties and sample matrix from files.'''
self.sample_matrix = pd.read_csv(sample_matrix_path)
self.params = pd.read_csv(param_props_path,
dtype={'name':'S10','cmtnum':np.int32,},
converters={'bounds': ast.literal_eval}
)
self.params = self.params.to_dict(orient='records')
# nan to None so that self.pftnum() function works later
for x in self.params:
if 'name' in x.keys():
x['name'] = x['name'].decode('utf-8')
if 'pftnum' in x.keys():
if pd.isna(x['pftnum']): # could try np.isnan
x['pftnum'] = None
else:
x['pftnum'] = int(x['pftnum'])
def clean(self):
'''
Remove the entire tree at `self.work_dir`.
This function is not careful, so be careful using it!
'''
shutil.rmtree(self.work_dir, ignore_errors=True)
os.makedirs(self.work_dir)
def get_sensitivity_csvs(self):
'''
Looks for all the sensitivity.csv files that are present in
the run directories. The sensitivity.csv files are created
using the extract_data_for_sensitivity_analysis(..) funciton.
Returns
-------
file_list : list of strings
list of paths to sensitivity.csv files, one for each file run folder
'''
pattern = '{}/*/sensitivity.csv'.format(self.work_dir)
file_list = sorted(glob.glob(pattern, recursive=True))
return file_list
def info(self):
'''
Print some summary info about the SensitivityDriver object.
Not sure how to best handle the summary of outputs yet. Maybe
a separate method. The problem is that existing outputs may
be leftover from prior runs and thus may not match the existing
params and sample_matrix data. But I don't want to be too
aggressive in cleaning up old outputs incase they are expensive
to re-create.
Returns
-------
None
'''
try:
pft_verbose_name = pu.get_pft_verbose_name(
cmtnum=self.cmtnum(), pftnum=self.pftnum(),
lookup_path=self.get_initial_params_dir()
)
except (AttributeError, ValueError) as e:
pft_verbose_name = ''
# Not all class attributes might be initialized, so if an
# attribute is not set, then print empty string.
try:
# DataFrame prints nicely
df = pd.DataFrame(self.params)
# prevents printing nan
# Might want to make this more specific to PFT column,
# in case there somehow ends up being bad data in one of the
# number columns that buggers things farther along?
df = df.where(df.notna(), '')
ps = df.to_string()
except AttributeError:
ps = "[not set]"
try:
#sms = self.sample_matrix.head()
sms = self.sample_matrix.shape
except AttributeError:
sms = "[not set]"
info_str = textwrap.dedent('''\
--- Setup ---
work_dir: {}
site: {}
pixel(y,x): ({},{})
cmtnum: {}
pftnum: {} ({})
'''.format(
self.work_dir, self.site, self.PXy, self.PXx, self.cmtnum(),
self.pftnum(), pft_verbose_name))
info_str += textwrap.dedent('''\
--- Parameters ---
'''.format())
info_str += '{}\n\n'.format(ps)
info_str += textwrap.dedent('''\
--- Sample Matrix ---
sample_matrix shape: {}
'''.format(sms))
info_str += textwrap.dedent('''\
--- Outputs ---
> NOTE - these may be leftover from a prior run!
found {} existing sensitivity csv files.
'''.format(len(self.get_sensitivity_csvs())))
return info_str
def core_setup(self, row, idx, initial=False):
'''
Do all the work to setup and configure a model run.
Uses the `row` parameter (one row of the sample matrix) to
set the parameter values for the run.
Currently relies on command line API for various dvmdostem
helper scripts. Would be nice to transition to using a Python
API for all these helper scripts (modules).
Parameters
----------
row : dict
One row of the sample matrix, in dict form. So like this:
`{'cmax': 108.2, 'rhq10': 34.24}`
with one key for each parameter name.
idx : int
The row index of the `sample_matrix` being worked on. Gets
used to set the run specific folder name, i.e. sample_000001.
Returns
-------
None
'''
print("PROC:{} ".format(multiprocessing.current_process()), row)
if initial:
print("Ignoring idx, it is not really relevant here.")
print("Ignoring row dict, not really relevant here.")
# Build our own row dict, based on initial values in params
row = {x['name']:x['initial'] for x in self.params}
sample_specific_folder = os.path.join(self.work_dir, 'inital_value_run')
if os.path.isdir(sample_specific_folder):
shutil.rmtree(sample_specific_folder)
else:
sample_specific_folder = self._ssrf_name(idx)
program = '/work/scripts/setup_working_directory.py'
opt_str = '--input-data-path {} {}'.format(self.site, sample_specific_folder)
cmdline = program + ' ' + opt_str
with log_wrapper(cmdline, tag='setup') as lw:
comp_proc = subprocess.run(cmdline, shell=True, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
program = '/work/scripts/runmask-util.py'
opt_str = '--reset --yx {} {} {}/run-mask.nc'.format(self.PXy, self.PXx, sample_specific_folder)
cmdline = program + ' ' + opt_str
with log_wrapper(cmdline, tag='setup') as lw:
comp_proc = subprocess.run(cmdline, shell=True, check=True)
for output_spec in self.outputs:
program = '/work/scripts/outspec_utils.py'
opt_str = '{}/config/output_spec.csv --on {} m p'.format(sample_specific_folder, output_spec['name'])
cmdline = program + ' ' + opt_str
with log_wrapper(cmdline, tag='setup') as lw:
comp_proc = subprocess.run(cmdline, shell=True, check=True)
program = '/work/scripts/outspec_utils.py'
opt_str = '{}/config/output_spec.csv --on CMTNUM y'.format(sample_specific_folder)
cmdline = program + ' ' + opt_str
with log_wrapper(cmdline, tag='setup') as lw:
comp_proc = subprocess.run(cmdline, shell=True, check=True)
CONFIG_FILE = os.path.join(sample_specific_folder, 'config/config.js')
# Read the existing data into memory
with open(CONFIG_FILE, 'r') as f:
config = json.load(f)
config['IO']['output_nc_eq'] = 1 # Modify value...
# Write it back..
with open(CONFIG_FILE, 'w') as f:
json.dump(config, f, indent=2)
# modify parameters according to sample_matrix (param values)
# and the param_spec (cmtnum, pftnum)
# Iterating over sample_matrix, which is a pandas.DataFrame, we use
# itertuples() which coughs up a named tuple. So here we get the
# name, and the sample value outof the named tuple for use in
# calling param_utils update function.
for pname, pval in row.items():
#for pname, pval in zip(row._fields[1:], row[1:]):
pdict = list(filter(lambda x: x['name'] == pname, self.params))[0]
pu.update_inplace(
pval, os.path.join(sample_specific_folder, 'parameters'),
pname, pdict['cmtnum'], pdict['pftnum']
)
def setup_multi(self):
'''
Makes one run directory for each row in sample matrix.
This is essentially a wrapper around `core_setup(..)`
that allows for parallelization.
Returns
-------
None
'''
# Start fresh...
self.clean()
# Make a special directory for the "initial values" run.
# row and idx args are ignored when setting up initial value run.
self.core_setup(row={'ignore this and idx':None}, idx=324234, initial=True)
args = list(zip(self.sample_matrix.to_dict(orient='records'),
range(0,len(self.sample_matrix)),
np.zeros(len(self.sample_matrix), dtype=bool)))
with multiprocessing.Pool(processes=(os.cpu_count()-1)) as pool:
results = pool.starmap(self.core_setup, args)
print(results)
# Still need to make a directory for default case
def cmtnum(self):
'''
Enforces that there is only one cmtnum specified
amongst all the param specifications in `self.params`.
Returns
-------
cmtnum : int or None
The cmtnum specified, or None if cmt not set.
Raises
------
RuntimeError - if there in valid specification of
cmtnum in the params list.
'''
try:
c = set([x['cmtnum'] for x in self.params])
if not (len(c) == 1):
raise RuntimeError("Problem with cmt specification in param_spec!")
c = c.pop()
except AttributeError:
c = None
except KeyError:
c = None
return c
def pftnum(self):
'''
NOTE! Not really sure how this should work long term.
For now assume that all parameters must have the
same pftnum (or None for non-pft params).
So this ensures that all the parameters are set to the same
PFT (for pft params). If there are no PFT params, then
we return None, and if there is a problem (i.e. params
set for different PFTs), then we raise an exception.
This is only a problem for processing the outputs. Presumably
if we are adjusting PFT 3 we want to look at outputs for PFT 3.
Not sure how to handle a case where we have parameters adjusted
for several PFTs??? What outputs would we want??
'''
try:
pftnums = set([x['pftnum'] for x in self.params])
pftnums.discard(None)
except (AttributeError, KeyError) as e:
# AttributeError occurs when params attribute not set yet
# KeyError occurs if params does not have a column for pfts
# Not sure what the use case is for this...
pftnums = ''
if len(pftnums) == 1:
return pftnums.pop()
elif len(pftnums) == 0:
return None
else:
# For now
raise RuntimeError("Invalid pftnum specificaiton in params dictionary!")
def _ssrf_name(self, idx):
'''generate the Sample Specific Run Folder name.'''
return os.path.join(self.work_dir, 'sample_{:09d}'.format(idx))
def _ssrf_names(self):
'''Generate a list of Sample Specific Run Folder names.'''
return [self._ssrf_name(i) for i in range(0,len(self.sample_matrix))]
def run_all_samples(self):
'''
Starts run in each Sample Specific Run Folder.
Wrapper around `run_model(..)` that allows for parallelization.
'''
folders = self._ssrf_names()
with multiprocessing.Pool(processes=(os.cpu_count()-1)) as pool:
results = pool.map(self.run_model, folders)
print()
def run_model(self, rundirectory):
'''
Run the model.
Assumes everything is setup in a "Sample Specific Run Folder".
Returns
-------
None
'''
program = '/work/dvmdostem'
ctrl_file = os.path.join(rundirectory, 'config','config.js')
opt_str = '-p 5 -e 5 -s 5 -t 5 -n 5 -l err --force-cmt {} --ctrl-file {}'.format(self.cmtnum(), ctrl_file)
cmdline = program + ' ' + opt_str
with log_wrapper(cmdline, tag='run') as lw:
completed_process = subprocess.run(
cmdline, # The program + options
shell=True, # must be used if passing options as str and not list
check=True, # raise CalledProcessError on failure
#capture_output=True,# collect stdout and stderr; causes memory problems I think
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
cwd=rundirectory) # control context
if not completed_process.returncode == 0:
print(completed_process.stdout)
print(completed_process.stderr)
def first_steps_sensitivity_analysis(self):
'''
Grab the summarized sensitivity csvs and glue them
together, then make correlation matrix. When glued together,
the data will look like this, with one row for each sample,
and one column for each parameter followed by one column for
each output:
p_cmax, p_rhq10, p_micbnup, o_GPP, o_VEGC
1.215, 2.108, 0.432, 0.533, 5.112
1.315, 3.208, 0.632, 0.721, 8.325
1.295, 1.949, 0.468, 0.560, 5.201
1.189, 2.076, 0.420, 0.592, 5.310
1.138, 2.035, 0.441, 0.537, 5.132
1.156, 1.911, 0.433, 0.557, 5.192
Return
------
None
'''
# Elchin: please improve or comment on this function.
# Feel free to change the name of the function as you see fit!
# Is there more we need to do to collect the data in a meaningful
# way?
# pattern = '{}/*/sensitivity.csv'.format(self.work_dir)
# file_list = sorted(glob.glob(pattern, recursive=True))
file_list = self.get_sensitivity_csvs()
df = pd.concat( map(pd.read_csv, file_list), ignore_index=True)
#df = df.sort_values('p_cmax')
#print(df)
#print()
corr = df.corr()
print(corr)
print("Make some cool plot here....")
def make_cool_plot_2(self):
'''
stitches all run stages together and plots one
line for each sample run.
Return
------
None
'''
# Elchin please improve or comment on this plot!
# It is meant mostly as an exmaple of how you might
# access and process the dvmdostem output data.
fig, axes = plt.subplots(len(self.outputs))
for r in os.listdir(self.work_dir):
for i, o in enumerate(self.outputs):
pattern = os.path.join(self.work_dir, r, 'output', '{}_monthly_*.nc'.format(o['name']))
results = glob.glob(pattern, recursive=True)
all_data = pd.DataFrame({}, columns=[o['name']])
def sort_stage(x):
STAGE_ORDER = {'pr':0, 'eq':1, 'sp':2, 'tr':3, 'sc':4}
stg = os.path.splitext(os.path.basename(x))[0][-2:]
return STAGE_ORDER[stg]
for f in sorted(results, key=sort_stage):
stg_data = nc.Dataset(f, 'r')
d = stg_data.variables[o['name']][:]
# if o['type'] == 'pool':
# d = ou.average_monthly_pool_to_yearly(d)
# elif o['type'] == 'flux':
# d = ou.sum_monthly_flux_to_yearly(d)
d = pd.DataFrame(d[:,self.pftnum(),self.PXy,self.PXx], columns=[o['name']])
all_data = all_data.append(d, ignore_index=True)
axes[i].plot(all_data)
axes[i].set_ylabel(o['name'])
def extract_data_for_sensitivity_analysis(self, posthoc=True, multi=True):
'''
Creates a csv file in each run directory that summarizes
the run's parameters and outut. The csv file will look
something like this:
p_cmax, p_rhq10, p_micbnup, o_GPP, o_VEGC
1.215, 2.108, 0.432, 0.533, 5.112
with one columns for each parameter and one column for
each output. The
For each row in the sensitivity matrix (and corresponding
run folder),
For each variable specified in self.outputs:
- opens the NetCDF files that were output from
dvmdostem
- grabs the last datapoint
- writes it to the sensitivity.csv file
Parameters
----------
posthoc : bool (Not implemented yet)
Flag for controlling whether this step should be run after the
model run or as part of the model run
multi : bool (Not impolemented yet)
Flag for if the runs are done all in one directory or each in
its own directory. If all runs are done in one directory
then paralleization is harder and this step of collating the
output data must be done at the end of the run before the next run
overwrites it.
Returns
-------
None
'''
for row in self.sample_matrix.itertuples():
sample_specific_folder = self._ssrf_name(row.Index)
sensitivity_outfile = os.path.join(sample_specific_folder, 'sensitivity.csv')
# Not sure if this is how we want to set things up...
# Seems like each of these files will have only one row, but the
# advantage of having a sensitivity file per sample directory is that
# we can leverage the same parallelism for running this function
# Then we'll need another step to collect all the csv files into one
# at the end.
pstr = ','.join(['p_{}'.format(p['name']) for p in self.params])
ostr = ','.join(['o_{}'.format(o['name']) for o in self.outputs])
hdrline = pstr + ',' + ostr + '\n'
with open(sensitivity_outfile, 'w') as f:
f.write(hdrline)
# Now do the parameters and output values...
pstr = ','.join(['{:2.3f}'.format(x) for x in self.sample_matrix.iloc[row.Index]])
ostr = ''
for output in self.outputs:
ds = nc.Dataset(sample_specific_folder + '/output/' + '{}_monthly_eq.nc'.format(output['name']))
data_m = ds.variables[output['name']][:]
if output['type'] == 'pool':
data_y = ou.average_monthly_pool_to_yearly(data_m)
elif output['type'] == 'flux':
data_y = ou.sum_monthly_flux_to_yearly(data_m)
# TODO: Need to handle non-PFT outputs!
ostr += '{:2.3f},'.format(data_y[-1,self.pftnum(),self.PXy,self.PXx])
ostr = ostr.rstrip(',') # remove trailing comma...
with open(sensitivity_outfile, 'a') as f:
f.write(pstr + ',' + ostr + '\n')
def plot_sensitivity_matrix(self):
'''
Make a quick plot showing the properties of the sensitivity matrix.
One row for each parameter:
Left column is sample values with bounds marked in red.
Middle column is histogram of sample values.
Right column is boxplot of sample values
'''
# Elchin: please improve or comment on this plot. I am not sure
# what the standard, meaningful ways to visualize the sample matrix
# data are!
fig, axes = plt.subplots(len(self.params),3)
for i, (p, ax) in enumerate(zip(self.params, axes)):
ax[0].plot(self.sample_matrix.iloc[:, i], marker='.', linewidth=0)
ax[0].set_ylabel(p['name'])
ax[0].hlines(p['bounds'], 0, len(self.sample_matrix)-1, linestyles='dotted', colors='red')
ax[1].hist(self.sample_matrix.iloc[:, i], range=p['bounds'], orientation='horizontal', alpha=0.75, rwidth=0.8)
ax[2].boxplot(self.sample_matrix.iloc[:, i])
ax[2].set_ylim(ax[0].get_ylim())
plt.tight_layout()
if __name__ == '__main__':
import doctest
doctest.testmod()
doctest.testfile("doctests_Sensitivity.md") | [
"pandas.read_csv",
"param_util.get_CMT_datablock",
"numpy.array",
"os.cpu_count",
"os.listdir",
"param_util.cmtdatablock2dict",
"subprocess.run",
"netCDF4.Dataset",
"os.path.isdir",
"doctest.testmod",
"os.mkdir",
"pandas.DataFrame",
"glob.glob",
"param_util.build_param_lookup",
"lhsmdu.r... | [((1521, 1568), 'numpy.array', 'np.array', (["[p['bounds'][0] for p in param_props]"], {}), "([p['bounds'][0] for p in param_props])\n", (1529, 1568), True, 'import numpy as np\n'), ((1579, 1626), 'numpy.array', 'np.array', (["[p['bounds'][1] for p in param_props]"], {}), "([p['bounds'][1] for p in param_props])\n", (1587, 1626), True, 'import numpy as np\n'), ((1746, 1804), 'pandas.DataFrame', 'pd.DataFrame', (['sm'], {'columns': "[p['name'] for p in param_props]"}), "(sm, columns=[p['name'] for p in param_props])\n", (1758, 1804), True, 'import pandas as pd\n'), ((2710, 2757), 'numpy.array', 'np.array', (["[p['bounds'][0] for p in param_props]"], {}), "([p['bounds'][0] for p in param_props])\n", (2718, 2757), True, 'import numpy as np\n'), ((2772, 2819), 'numpy.array', 'np.array', (["[p['bounds'][1] for p in param_props]"], {}), "([p['bounds'][1] for p in param_props])\n", (2780, 2819), True, 'import numpy as np\n'), ((3017, 3033), 'numpy.diag', 'np.diag', (['spreads'], {}), '(spreads)\n', (3024, 3033), True, 'import numpy as np\n'), ((3095, 3164), 'pandas.DataFrame', 'pd.DataFrame', (['sample_matrix'], {'columns': "[p['name'] for p in param_props]"}), "(sample_matrix, columns=[p['name'] for p in param_props])\n", (3107, 3164), True, 'import pandas as pd\n'), ((26811, 26828), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (26826, 26828), False, 'import doctest\n'), ((26831, 26874), 'doctest.testfile', 'doctest.testfile', (['"""doctests_Sensitivity.md"""'], {}), "('doctests_Sensitivity.md')\n", (26847, 26874), False, 'import doctest\n'), ((2976, 2993), 'lhsmdu.resample', 'lhsmdu.resample', ([], {}), '()\n', (2991, 2993), False, 'import lhsmdu\n'), ((6989, 7033), 'param_util.build_param_lookup', 'pu.build_param_lookup', (['self.__initial_params'], {}), '(self.__initial_params)\n', (7010, 7033), True, 'import param_util as pu\n'), ((8503, 8534), 'pandas.read_csv', 'pd.read_csv', (['sample_matrix_path'], {}), '(sample_matrix_path)\n', (8514, 8534), True, 'import pandas as pd\n'), ((8553, 8670), 'pandas.read_csv', 'pd.read_csv', (['param_props_path'], {'dtype': "{'name': 'S10', 'cmtnum': np.int32}", 'converters': "{'bounds': ast.literal_eval}"}), "(param_props_path, dtype={'name': 'S10', 'cmtnum': np.int32},\n converters={'bounds': ast.literal_eval})\n", (8564, 8670), True, 'import pandas as pd\n'), ((9229, 9277), 'shutil.rmtree', 'shutil.rmtree', (['self.work_dir'], {'ignore_errors': '(True)'}), '(self.work_dir, ignore_errors=True)\n', (9242, 9277), False, 'import shutil\n'), ((9282, 9308), 'os.makedirs', 'os.makedirs', (['self.work_dir'], {}), '(self.work_dir)\n', (9293, 9308), False, 'import os\n'), ((14646, 14702), 'os.path.join', 'os.path.join', (['sample_specific_folder', '"""config/config.js"""'], {}), "(sample_specific_folder, 'config/config.js')\n", (14658, 14702), False, 'import os\n'), ((19267, 19316), 'os.path.join', 'os.path.join', (['rundirectory', '"""config"""', '"""config.js"""'], {}), "(rundirectory, 'config', 'config.js')\n", (19279, 19316), False, 'import os\n'), ((21790, 21815), 'os.listdir', 'os.listdir', (['self.work_dir'], {}), '(self.work_dir)\n', (21800, 21815), False, 'import os\n'), ((26745, 26763), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26761, 26763), True, 'import matplotlib.pyplot as plt\n'), ((4934, 4962), 'os.path.isdir', 'os.path.isdir', (['self.work_dir'], {}), '(self.work_dir)\n', (4947, 4962), False, 'import os\n'), ((4970, 4993), 'os.mkdir', 'os.mkdir', (['self.work_dir'], {}), '(self.work_dir)\n', (4978, 4993), False, 'import os\n'), ((7139, 7201), 'param_util.which_file', 'pu.which_file', (['self.__initial_params', 'pname'], {'lookup_struct': 'plu'}), '(self.__initial_params, pname, lookup_struct=plu)\n', (7152, 7201), True, 'import param_util as pu\n'), ((7216, 7265), 'param_util.get_CMT_datablock', 'pu.get_CMT_datablock', (['original_pdata_file', 'cmtnum'], {}), '(original_pdata_file, cmtnum)\n', (7236, 7265), True, 'import param_util as pu\n'), ((7279, 7305), 'param_util.cmtdatablock2dict', 'pu.cmtdatablock2dict', (['p_db'], {}), '(p_db)\n', (7299, 7305), True, 'import param_util as pu\n'), ((8000, 8048), 'os.path.join', 'os.path.join', (['self.work_dir', '"""sample_matrix.csv"""'], {}), "(self.work_dir, 'sample_matrix.csv')\n", (8012, 8048), False, 'import os\n'), ((8066, 8112), 'os.path.join', 'os.path.join', (['self.work_dir', '"""param_props.csv"""'], {}), "(self.work_dir, 'param_props.csv')\n", (8078, 8112), False, 'import os\n'), ((9767, 9801), 'glob.glob', 'glob.glob', (['pattern'], {'recursive': '(True)'}), '(pattern, recursive=True)\n', (9776, 9801), False, 'import glob\n'), ((10719, 10744), 'pandas.DataFrame', 'pd.DataFrame', (['self.params'], {}), '(self.params)\n', (10731, 10744), True, 'import pandas as pd\n'), ((13126, 13173), 'os.path.join', 'os.path.join', (['self.work_dir', '"""inital_value_run"""'], {}), "(self.work_dir, 'inital_value_run')\n", (13138, 13173), False, 'import os\n'), ((13183, 13220), 'os.path.isdir', 'os.path.isdir', (['sample_specific_folder'], {}), '(sample_specific_folder)\n', (13196, 13220), False, 'import os\n'), ((13578, 13682), 'subprocess.run', 'subprocess.run', (['cmdline'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.STDOUT'}), '(cmdline, shell=True, check=True, stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT)\n', (13592, 13682), False, 'import subprocess\n'), ((13935, 13982), 'subprocess.run', 'subprocess.run', (['cmdline'], {'shell': '(True)', 'check': '(True)'}), '(cmdline, shell=True, check=True)\n', (13949, 13982), False, 'import subprocess\n'), ((14579, 14626), 'subprocess.run', 'subprocess.run', (['cmdline'], {'shell': '(True)', 'check': '(True)'}), '(cmdline, shell=True, check=True)\n', (14593, 14626), False, 'import subprocess\n'), ((14797, 14809), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14806, 14809), False, 'import json\n'), ((14937, 14967), 'json.dump', 'json.dump', (['config', 'f'], {'indent': '(2)'}), '(config, f, indent=2)\n', (14946, 14967), False, 'import json\n'), ((19539, 19662), 'subprocess.run', 'subprocess.run', (['cmdline'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL', 'cwd': 'rundirectory'}), '(cmdline, shell=True, check=True, stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL, cwd=rundirectory)\n', (19553, 19662), False, 'import subprocess\n'), ((24222, 24277), 'os.path.join', 'os.path.join', (['sample_specific_folder', '"""sensitivity.csv"""'], {}), "(sample_specific_folder, 'sensitivity.csv')\n", (24234, 24277), False, 'import os\n'), ((8287, 8312), 'pandas.DataFrame', 'pd.DataFrame', (['self.params'], {}), '(self.params)\n', (8299, 8312), True, 'import pandas as pd\n'), ((8951, 8971), 'pandas.isna', 'pd.isna', (["x['pftnum']"], {}), "(x['pftnum'])\n", (8958, 8971), True, 'import pandas as pd\n'), ((12792, 12825), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (12823, 12825), False, 'import multiprocessing\n'), ((13230, 13267), 'shutil.rmtree', 'shutil.rmtree', (['sample_specific_folder'], {}), '(sample_specific_folder)\n', (13243, 13267), False, 'import shutil\n'), ((14290, 14337), 'subprocess.run', 'subprocess.run', (['cmdline'], {'shell': '(True)', 'check': '(True)'}), '(cmdline, shell=True, check=True)\n', (14304, 14337), False, 'import subprocess\n'), ((15534, 15584), 'os.path.join', 'os.path.join', (['sample_specific_folder', '"""parameters"""'], {}), "(sample_specific_folder, 'parameters')\n", (15546, 15584), False, 'import os\n'), ((21974, 22008), 'glob.glob', 'glob.glob', (['pattern'], {'recursive': '(True)'}), '(pattern, recursive=True)\n', (21983, 22008), False, 'import glob\n'), ((22029, 22066), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {'columns': "[o['name']]"}), "({}, columns=[o['name']])\n", (22041, 22066), True, 'import pandas as pd\n'), ((22327, 22345), 'netCDF4.Dataset', 'nc.Dataset', (['f', '"""r"""'], {}), "(f, 'r')\n", (22337, 22345), True, 'import netCDF4 as nc\n'), ((25319, 25360), 'output_utils.average_monthly_pool_to_yearly', 'ou.average_monthly_pool_to_yearly', (['data_m'], {}), '(data_m)\n', (25352, 25360), True, 'import output_utils as ou\n'), ((16354, 16368), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (16366, 16368), False, 'import os\n'), ((18945, 18959), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (18957, 18959), False, 'import os\n'), ((25419, 25456), 'output_utils.sum_monthly_flux_to_yearly', 'ou.sum_monthly_flux_to_yearly', (['data_m'], {}), '(data_m)\n', (25448, 25456), True, 'import output_utils as ou\n'), ((22192, 22211), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (22208, 22211), False, 'import os\n')] |
import numpy as np
# import matplotlib.pyplot as plt
import pickle
from pathlib import Path
import torch
from google.protobuf import text_format
from second.utils import simplevis
from second.pytorch.train import build_network
from second.protos import pipeline_pb2
from second.utils import config_tool
import time
import cv2
# config_path = "/home/mayank_sati/pycharm_projects/pytorch/second.pytorch_traveller59_date_9_05/second/configs/car.fhd.config"
# config_path = "/home/mayank_sati/pycharm_projects/pytorch/second.pytorch_traveller59_date_9_05/second/configs/nuscenes/all.fhd_Aws.config"
config_path = "../configs/nuscenes/all.pp.largea.config"
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
input_cfg = config.eval_input_reader
model_cfg = config.model.second
# config_tool.change_detection_range(model_cfg, [-50, -50, 50, 50])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
# print("i was here")
# /home/mayank_sati/pycharm_projects/pytorch/second.pytorch_traveller59_date_9_05/second/mayank_pc_trained/voxelnet-5865.tckpt
# ckpt_path = "/home/mayank_sati/pycharm_projects/pytorch/second.pytorch_traveller59_date_9_05/second/mayank_all_fhpd/voxelnet-29325.tckpt"
ckpt_path = "/home/mayank_sati/codebase/python/lidar/pointpillars/second.pytorch/second/checkpoint/voxelnet-140670.tckpt"
# ckpt_path = "/home/mayank_sati/pycharm_projects/pytorch/second.pytorch_traveller59_date_9_05/second/eval_result/pretrained_models_v1.5/pp_model_for_nuscenes_pretrain/voxelnet-296960.tckpt"
net = build_network(model_cfg).to(device).eval()
net.load_state_dict(torch.load(ckpt_path))
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
grid_size = voxel_generator.grid_size
feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(model_cfg)
feature_map_size = [*feature_map_size, 1][::-1]
anchors = target_assigner.generate_anchors(feature_map_size)["anchors"]
anchors = torch.tensor(anchors, dtype=torch.float32, device=device)
anchors = anchors.view(1, -1, 7)
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
feature_map_size = [1, 50, 50]
ret = target_assigner.generate_anchors(feature_map_size)
class_names = target_assigner.classes
anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
anchors_list = []
for k, v in anchors_dict.items():
anchors_list.append(v["anchors"])
# anchors = ret["anchors"]
anchors = np.concatenate(anchors_list, axis=0)
anchors = anchors.reshape([-1, target_assigner.box_ndim])
assert np.allclose(anchors, ret["anchors"].reshape(-1, target_assigner.box_ndim))
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
# anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:, [0, 1, 3, 4, 6]])
anchors_bv=2
anchor_cache = {
"anchors": anchors,
"anchors_bv": anchors_bv,
"matched_thresholds": matched_thresholds,
"unmatched_thresholds": unmatched_thresholds,
"anchors_dict": anchors_dict,
}
anchors = torch.tensor(anchors, dtype=torch.float32, device=device)
anchors = anchors.view(1, -1, 7)
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
info_path = input_cfg.dataset.kitti_info_path
root_path = Path(input_cfg.dataset.kitti_root_path)
with open(info_path, 'rb') as f:
infos = pickle.load(f)
# points = np.fromfile(
# '/home/mayank_sati/Documents/point_clouds/nucene_pickle/nuscene/n008-2018-05-21-11-06-59-0400__LIDAR_TOP__1526915243047392.pcd.bin',
# dtype=np.float32)
t = time.time()
# v_path ="/home/mayank_sati/Documents/point_clouds/000000.bin"
# v_path='/home/mayank_sati/Documents/point_clouds/nuscene_v1.0-mini/samples/LIDAR_TOP/n008-2018-08-01-15-16-36-0400__LIDAR_TOP__1533151622448916.pcd.bin'
v_path='/home/mayank_sati/Downloads/v1.0-mini/samples/LIDAR_TOP/n008-2018-08-01-15-16-36-0400__LIDAR_TOP__1533151605548192.pcd.bin'
#points = np.fromfile(v_path, dtype=np.float32, count=-1).reshape(-1, 5])
points = np.fromfile(v_path, dtype=np.float32)
points = points.reshape((-1, 5))[:, :4]
# points = points.reshape((-1, 4))
# points = points.reshape((-1, 5))[:, :4]
# voxels, coords, num_points,voxel_num = voxel_generator.generate(points, max_voxels=20000)
####################################################3
# points = np.fromfile(str(v_path), dtype=np.float32, count=-1).reshape([-1, 5])
points[:, 3] /= 255
# points[:, 4] = 0
#########################################################333
res = voxel_generator.generate(points, max_voxels=80000)
voxels = res["voxels"]
coords = res["coordinates"]
num_points = res["num_points_per_voxel"]
num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
print("voxel_generator_time",(time.time() - t)*1000)
###############################################################
# print(voxels.shape)
# add batch idx to coords
coords = np.pad(coords, ((0, 0), (1, 0)), mode='constant', constant_values=0)
voxels = torch.tensor(voxels, dtype=torch.float32, device=device)
coords = torch.tensor(coords, dtype=torch.int32, device=device)
num_points = torch.tensor(num_points, dtype=torch.int32, device=device)
print("conversion time",(time.time() - t)*1000)
example = {"anchors": anchors, "voxels": voxels, "num_points": num_points, "coordinates": coords,}
t2 = time.time()
pred = net(example)[0]
print("prediction",(time.time() - t2)*1000)
print("total_time",(time.time() - t)*1000)
boxes_lidar = pred["box3d_lidar"].detach().cpu().numpy()
scores_lidar = pred["scores"].detach().cpu().numpy()
##############################3333
threshold=.3
keep = np.where((scores_lidar >= threshold))[0]
scores_lidar = scores_lidar[keep]
boxes_lidar = boxes_lidar[keep]
#######################################
with open("result_nu1_scores.pkl", 'wb') as f:
pickle.dump(scores_lidar, f)
with open("result_nu1.pkl", 'wb') as f:
pickle.dump(boxes_lidar, f)
vis_voxel_size = [0.1, 0.1, 0.1]
vis_point_range = [-50, -30, -3, 50, 30, 1]
bev_map = simplevis.point_to_vis_bev(points, vis_voxel_size, vis_point_range)
bev_map = simplevis.draw_box_in_bev(bev_map, vis_point_range, boxes_lidar, [0, 255, 0], 2)
print(bev_map)
cv2.imshow('color image',bev_map)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"numpy.fromfile",
"second.utils.simplevis.draw_box_in_bev",
"cv2.imshow",
"numpy.array",
"torch.cuda.is_available",
"cv2.destroyAllWindows",
"pathlib.Path",
"numpy.where",
"second.protos.pipeline_pb2.TrainEvalPipelineConfig",
"numpy.concatenate",
"cv2.waitKey",
"pickle.load",
"second.pytorch... | [((661, 699), 'second.protos.pipeline_pb2.TrainEvalPipelineConfig', 'pipeline_pb2.TrainEvalPipelineConfig', ([], {}), '()\n', (697, 699), False, 'from second.protos import pipeline_pb2\n'), ((2060, 2117), 'torch.tensor', 'torch.tensor', (['anchors'], {'dtype': 'torch.float32', 'device': 'device'}), '(anchors, dtype=torch.float32, device=device)\n', (2072, 2117), False, 'import torch\n'), ((2551, 2587), 'numpy.concatenate', 'np.concatenate', (['anchors_list'], {'axis': '(0)'}), '(anchors_list, axis=0)\n', (2565, 2587), True, 'import numpy as np\n'), ((3128, 3185), 'torch.tensor', 'torch.tensor', (['anchors'], {'dtype': 'torch.float32', 'device': 'device'}), '(anchors, dtype=torch.float32, device=device)\n', (3140, 3185), False, 'import torch\n'), ((3355, 3394), 'pathlib.Path', 'Path', (['input_cfg.dataset.kitti_root_path'], {}), '(input_cfg.dataset.kitti_root_path)\n', (3359, 3394), False, 'from pathlib import Path\n'), ((3655, 3666), 'time.time', 'time.time', ([], {}), '()\n', (3664, 3666), False, 'import time\n'), ((4101, 4138), 'numpy.fromfile', 'np.fromfile', (['v_path'], {'dtype': 'np.float32'}), '(v_path, dtype=np.float32)\n', (4112, 4138), True, 'import numpy as np\n'), ((4746, 4789), 'numpy.array', 'np.array', (['[voxels.shape[0]]'], {'dtype': 'np.int64'}), '([voxels.shape[0]], dtype=np.int64)\n', (4754, 4789), True, 'import numpy as np\n'), ((4964, 5032), 'numpy.pad', 'np.pad', (['coords', '((0, 0), (1, 0))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(coords, ((0, 0), (1, 0)), mode='constant', constant_values=0)\n", (4970, 5032), True, 'import numpy as np\n'), ((5042, 5098), 'torch.tensor', 'torch.tensor', (['voxels'], {'dtype': 'torch.float32', 'device': 'device'}), '(voxels, dtype=torch.float32, device=device)\n', (5054, 5098), False, 'import torch\n'), ((5108, 5162), 'torch.tensor', 'torch.tensor', (['coords'], {'dtype': 'torch.int32', 'device': 'device'}), '(coords, dtype=torch.int32, device=device)\n', (5120, 5162), False, 'import torch\n'), ((5176, 5234), 'torch.tensor', 'torch.tensor', (['num_points'], {'dtype': 'torch.int32', 'device': 'device'}), '(num_points, dtype=torch.int32, device=device)\n', (5188, 5234), False, 'import torch\n'), ((5387, 5398), 'time.time', 'time.time', ([], {}), '()\n', (5396, 5398), False, 'import time\n'), ((6061, 6128), 'second.utils.simplevis.point_to_vis_bev', 'simplevis.point_to_vis_bev', (['points', 'vis_voxel_size', 'vis_point_range'], {}), '(points, vis_voxel_size, vis_point_range)\n', (6087, 6128), False, 'from second.utils import simplevis\n'), ((6139, 6224), 'second.utils.simplevis.draw_box_in_bev', 'simplevis.draw_box_in_bev', (['bev_map', 'vis_point_range', 'boxes_lidar', '[0, 255, 0]', '(2)'], {}), '(bev_map, vis_point_range, boxes_lidar, [0, 255, 0], 2\n )\n', (6164, 6224), False, 'from second.utils import simplevis\n'), ((6235, 6269), 'cv2.imshow', 'cv2.imshow', (['"""color image"""', 'bev_map'], {}), "('color image', bev_map)\n", (6245, 6269), False, 'import cv2\n'), ((6269, 6283), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6280, 6283), False, 'import cv2\n'), ((6284, 6307), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6305, 6307), False, 'import cv2\n'), ((763, 799), 'google.protobuf.text_format.Merge', 'text_format.Merge', (['proto_str', 'config'], {}), '(proto_str, config)\n', (780, 799), False, 'from google.protobuf import text_format\n'), ((1709, 1730), 'torch.load', 'torch.load', (['ckpt_path'], {}), '(ckpt_path)\n', (1719, 1730), False, 'import torch\n'), ((1884, 1928), 'second.utils.config_tool.get_downsample_factor', 'config_tool.get_downsample_factor', (['model_cfg'], {}), '(model_cfg)\n', (1917, 1928), False, 'from second.utils import config_tool\n'), ((3440, 3454), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3451, 3454), False, 'import pickle\n'), ((5674, 5709), 'numpy.where', 'np.where', (['(scores_lidar >= threshold)'], {}), '(scores_lidar >= threshold)\n', (5682, 5709), True, 'import numpy as np\n'), ((5872, 5900), 'pickle.dump', 'pickle.dump', (['scores_lidar', 'f'], {}), '(scores_lidar, f)\n', (5883, 5900), False, 'import pickle\n'), ((5945, 5972), 'pickle.dump', 'pickle.dump', (['boxes_lidar', 'f'], {}), '(boxes_lidar, f)\n', (5956, 5972), False, 'import pickle\n'), ((969, 994), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (992, 994), False, 'import torch\n'), ((4820, 4831), 'time.time', 'time.time', ([], {}), '()\n', (4829, 4831), False, 'import time\n'), ((5260, 5271), 'time.time', 'time.time', ([], {}), '()\n', (5269, 5271), False, 'import time\n'), ((5442, 5453), 'time.time', 'time.time', ([], {}), '()\n', (5451, 5453), False, 'import time\n'), ((5486, 5497), 'time.time', 'time.time', ([], {}), '()\n', (5495, 5497), False, 'import time\n'), ((1646, 1670), 'second.pytorch.train.build_network', 'build_network', (['model_cfg'], {}), '(model_cfg)\n', (1659, 1670), False, 'from second.pytorch.train import build_network\n')] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import numpy as np
import math
def ClassifyColor( BGR, width, height ): ##分類顏色 (BGR, width, height)
r_threshold = 20 ##r閾值 before 10
b_threshold = 20 ##b閾值 before 10
FortyFive_degree = math.pi / 4 ## 45度
grey_threshold = 10.0 * math.pi / 180.0 ##色角上下限 before 5
for y in range(0, height, 1):
cur_row = BGR[y]
for x in range(0, width, 1):
[b1, g1, r1] = cur_row[x] #b1, g1 and r1 are type unsigned integer 8 bits
#Convert to 32 bits integer #b1,g1和r1是無符號整數8位類型 轉換為32位整數
b = int(b1)
g = int(g1)
r = int(r1)
#Red color
if r - b > r_threshold:
if r - g > r_threshold:
cur_row[x] = [255, 255, 255]
continue ##跳过某些循环
#Blue color
if b - r > b_threshold:
if b - g > b_threshold:
cur_row[x] = [0, 0, 0]
continue
#Other colors
cur_row[x] = [0, 0, 0]
return BGR
cap = cv2.VideoCapture(0)
while(1):
if __name__ == '__main__':
ret, frame = cap.read()
if frame.size == 0:
##if ret == False:
print(f"Fail to read image {filename}")
else:
cv2.imshow('Original frame',frame)
(height, width, channels) = frame.shape
print(f"frame dimension ( {height} , {width}, {channels})\n" )
if channels != 3:
print("Image is not a color image ##################")
if frame.dtype != "uint8":
print("Image is not of type uint8 #################")
ms = frame.copy()
kernel = np.ones((5,5), np.uint8)
##dilation = cv2.dilate(test, kernel, iterations = 3)
dilation = cv2.dilate(ms, kernel, iterations = 7)
cv2.imshow('dilation', dilation)
kernel = np.ones((7,7), np.uint8)
##erosion = cv2.erode(dilation, kernel, iterations = 3)
erosion = cv2.erode(dilation, kernel, iterations = 9)
cv2.imshow('erosion', erosion)
#Convert image to NumPy array (Create a new 2D array)
# Note: The order is in BGR format! 將圖像轉換為NumPy數組(創建新的2D數組)
BGR_array = np.array( erosion )
#Classify red, blue and grey color
Result_array = ClassifyColor( BGR_array, width, height )
cv2.imshow('BGR',Result_array)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
# Close the window
cap.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
| [
"numpy.ones",
"cv2.erode",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.dilate",
"cv2.waitKey"
] | [((1180, 1199), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1196, 1199), False, 'import cv2\n'), ((2914, 2937), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2935, 2937), False, 'import cv2\n'), ((1440, 1475), 'cv2.imshow', 'cv2.imshow', (['"""Original frame"""', 'frame'], {}), "('Original frame', frame)\n", (1450, 1475), False, 'import cv2\n'), ((1887, 1912), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (1894, 1912), True, 'import numpy as np\n'), ((2003, 2039), 'cv2.dilate', 'cv2.dilate', (['ms', 'kernel'], {'iterations': '(7)'}), '(ms, kernel, iterations=7)\n', (2013, 2039), False, 'import cv2\n'), ((2057, 2089), 'cv2.imshow', 'cv2.imshow', (['"""dilation"""', 'dilation'], {}), "('dilation', dilation)\n", (2067, 2089), False, 'import cv2\n'), ((2116, 2141), 'numpy.ones', 'np.ones', (['(7, 7)', 'np.uint8'], {}), '((7, 7), np.uint8)\n', (2123, 2141), True, 'import numpy as np\n'), ((2233, 2274), 'cv2.erode', 'cv2.erode', (['dilation', 'kernel'], {'iterations': '(9)'}), '(dilation, kernel, iterations=9)\n', (2242, 2274), False, 'import cv2\n'), ((2290, 2320), 'cv2.imshow', 'cv2.imshow', (['"""erosion"""', 'erosion'], {}), "('erosion', erosion)\n", (2300, 2320), False, 'import cv2\n'), ((2504, 2521), 'numpy.array', 'np.array', (['erosion'], {}), '(erosion)\n', (2512, 2521), True, 'import numpy as np\n'), ((2687, 2718), 'cv2.imshow', 'cv2.imshow', (['"""BGR"""', 'Result_array'], {}), "('BGR', Result_array)\n", (2697, 2718), False, 'import cv2\n'), ((2737, 2751), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (2748, 2751), False, 'import cv2\n')] |
#!/usr/bin/env python
#
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the pyxir TF executor"""
import unittest
import numpy as np
import pyxir as px
from pyxir.shapes import TensorShape
from pyxir.runtime import base
from pyxir.graph.layer import xlayer
from pyxir.graph.io import xlayer_io
try:
from pyxir.runtime.tensorflow.x_2_tf_registry import X_2_TF
from pyxir.runtime.tensorflow.ops.tf_l0_input_and_other import *
from pyxir.runtime.tensorflow.ops.tf_l3_math_and_transform import *
except ModuleNotFoundError:
raise unittest.SkipTest("Skipping Tensorflow related test because Tensorflow is not available")
from .ops_infra import build_exec_layers, execute_layers
class TestTfL3MathAndTransform(unittest.TestCase):
def test_prelu(self):
def _create_prelu_layer(in_name, in_shape, alpha, axis):
tf.compat.v1.reset_default_graph()
inX = px.ops.input(in_name, shape=in_shape)
alphaX = px.ops.constant("alpha", alpha)
X = px.ops.prelu("prelu", [inX, alphaX], axis=axis)
input_shapes = {in_name: TensorShape(in_shape)}
layers = build_exec_layers([inX, alphaX, X], input_shapes, {})
return layers
def _test_prelu(x, alpha, expected, axis=1):
in_name = "x"
in_shape = list(x.shape)
layers = _create_prelu_layer(in_name, in_shape[:], alpha, axis=axis)
inputs = {in_name: x}
out = execute_layers(layers, inputs)
np.testing.assert_array_equal(out, expected)
_test_prelu(
-1. * np.ones((1, 2, 4, 4), dtype=np.float32),
np.array([.1, .1], dtype=np.float32),
-0.1 * np.ones((1, 2, 4, 4), dtype=np.float32),
)
_test_prelu(
np.ones((1, 2, 4, 4), dtype=np.float32),
np.array([.1, .1], dtype=np.float32),
np.ones((1, 2, 4, 4), dtype=np.float32),
)
_test_prelu(
np.array([1., -1.], dtype=np.float32).reshape(1, 2, 1, 1),
np.array([.1, .2], dtype=np.float32),
np.array([1., -.2], dtype=np.float32).reshape(1, 2, 1, 1),
)
_test_prelu(
-1. * np.ones((1, 2, 4, 4), dtype=np.float32),
np.array([1.1, 1.1], dtype=np.float32),
-1.1 * np.ones((1, 2, 4, 4), dtype=np.float32),
)
_test_prelu(
-2. * np.ones((1, 4, 4, 2), dtype=np.float32),
np.array([1.1, 1.1], dtype=np.float32),
-2.2 * np.ones((1, 4, 4, 2), dtype=np.float32),
axis=3
)
_test_prelu(
-2. * np.ones((1, 4, 4, 2), dtype=np.float32),
np.array([1.1, 1.1], dtype=np.float32),
-2.2 * np.ones((1, 4, 4, 2), dtype=np.float32),
axis=-1
)
def test_split_int(self):
X = xlayer.XLayer(
type=['Split'],
name='split1',
shapes=[[-1, 2, 4, 4], [-1, 2, 4, 4], [-1, 2, 4, 4]],
sizes=[32, 32, 32],
bottoms=['in1'],
tops=[],
targets=[],
attrs={'axis': 1, 'indices': 3}
)
input_shapes = {'in1': TensorShape([1, 6, 4, 4])}
layers = X_2_TF['Split'](X, input_shapes, {})
assert len(layers) == 1
inpt = np.ones((1, 6, 4, 4))
inputs = [inpt]
out = layers[0].forward_exec(inputs)
assert isinstance(out, list)
assert len(out) == 3
assert out[0].shape == (1, 2, 4, 4)
assert out[1].shape == (1, 2, 4, 4)
assert out[2].shape == (1, 2, 4, 4)
def test_split_tuple(self):
X = xlayer.XLayer(
type=['Split'],
name='split1',
shapes=[[-1, 1, 4, 4], [-1, 3, 4, 4], [-1, 1, 4, 4]],
sizes=[32, 32, 32],
bottoms=['in1'],
tops=[],
targets=[],
attrs={'axis': 1, 'indices': [1, 4]}
)
input_shapes = {'in1': TensorShape([1, 5, 4, 4])}
layers = X_2_TF['Split'](X, input_shapes, {})
assert len(layers) == 1
inpt = np.ones((1, 5, 4, 4))
inputs = [inpt]
out = layers[0].forward_exec(inputs)
assert isinstance(out, list)
assert len(out) == 3
assert out[0].shape == (1, 1, 4, 4)
assert out[1].shape == (1, 3, 4, 4)
assert out[2].shape == (1, 1, 4, 4)
def test_take(self):
X = xlayer.XLayer(
type=['Take'],
name='take1',
shapes=[-1, 1, 4],
sizes=[4],
bottoms=['in1', 'indices'],
tops=[],
targets=[],
attrs={'axis': 1, 'mode': 'clip'}
)
input_shapes = {'in1': TensorShape([1, 3, 4]),
'indices': TensorShape([])}
layers = X_2_TF['Take'](X, input_shapes, {})
assert len(layers) == 1
inpt = np.reshape(
np.array([
[[1, 1], [1, 1]],
[[2, 2], [2, 2]],
[[2, 2], [2, 2]]], dtype=np.float32),
(1, 3, 4))
indices = np.array(0, np.int32)
inputs = [inpt, indices]
out = layers[0].forward_exec(inputs)
assert (out.shape) == (1, 4)
np.testing.assert_array_equal(
out, np.array([[1, 1, 1, 1]], dtype=np.float32))
| [
"pyxir.ops.input",
"numpy.ones",
"pyxir.ops.prelu",
"pyxir.graph.layer.xlayer.XLayer",
"pyxir.shapes.TensorShape",
"pyxir.ops.constant",
"numpy.array",
"unittest.SkipTest",
"numpy.testing.assert_array_equal"
] | [((1095, 1189), 'unittest.SkipTest', 'unittest.SkipTest', (['"""Skipping Tensorflow related test because Tensorflow is not available"""'], {}), "(\n 'Skipping Tensorflow related test because Tensorflow is not available')\n", (1112, 1189), False, 'import unittest\n'), ((3407, 3604), 'pyxir.graph.layer.xlayer.XLayer', 'xlayer.XLayer', ([], {'type': "['Split']", 'name': '"""split1"""', 'shapes': '[[-1, 2, 4, 4], [-1, 2, 4, 4], [-1, 2, 4, 4]]', 'sizes': '[32, 32, 32]', 'bottoms': "['in1']", 'tops': '[]', 'targets': '[]', 'attrs': "{'axis': 1, 'indices': 3}"}), "(type=['Split'], name='split1', shapes=[[-1, 2, 4, 4], [-1, 2,\n 4, 4], [-1, 2, 4, 4]], sizes=[32, 32, 32], bottoms=['in1'], tops=[],\n targets=[], attrs={'axis': 1, 'indices': 3})\n", (3420, 3604), False, 'from pyxir.graph.layer import xlayer\n'), ((3865, 3886), 'numpy.ones', 'np.ones', (['(1, 6, 4, 4)'], {}), '((1, 6, 4, 4))\n', (3872, 3886), True, 'import numpy as np\n'), ((4203, 4405), 'pyxir.graph.layer.xlayer.XLayer', 'xlayer.XLayer', ([], {'type': "['Split']", 'name': '"""split1"""', 'shapes': '[[-1, 1, 4, 4], [-1, 3, 4, 4], [-1, 1, 4, 4]]', 'sizes': '[32, 32, 32]', 'bottoms': "['in1']", 'tops': '[]', 'targets': '[]', 'attrs': "{'axis': 1, 'indices': [1, 4]}"}), "(type=['Split'], name='split1', shapes=[[-1, 1, 4, 4], [-1, 3,\n 4, 4], [-1, 1, 4, 4]], sizes=[32, 32, 32], bottoms=['in1'], tops=[],\n targets=[], attrs={'axis': 1, 'indices': [1, 4]})\n", (4216, 4405), False, 'from pyxir.graph.layer import xlayer\n'), ((4666, 4687), 'numpy.ones', 'np.ones', (['(1, 5, 4, 4)'], {}), '((1, 5, 4, 4))\n', (4673, 4687), True, 'import numpy as np\n'), ((4997, 5161), 'pyxir.graph.layer.xlayer.XLayer', 'xlayer.XLayer', ([], {'type': "['Take']", 'name': '"""take1"""', 'shapes': '[-1, 1, 4]', 'sizes': '[4]', 'bottoms': "['in1', 'indices']", 'tops': '[]', 'targets': '[]', 'attrs': "{'axis': 1, 'mode': 'clip'}"}), "(type=['Take'], name='take1', shapes=[-1, 1, 4], sizes=[4],\n bottoms=['in1', 'indices'], tops=[], targets=[], attrs={'axis': 1,\n 'mode': 'clip'})\n", (5010, 5161), False, 'from pyxir.graph.layer import xlayer\n'), ((5668, 5689), 'numpy.array', 'np.array', (['(0)', 'np.int32'], {}), '(0, np.int32)\n', (5676, 5689), True, 'import numpy as np\n'), ((1453, 1490), 'pyxir.ops.input', 'px.ops.input', (['in_name'], {'shape': 'in_shape'}), '(in_name, shape=in_shape)\n', (1465, 1490), True, 'import pyxir as px\n'), ((1512, 1543), 'pyxir.ops.constant', 'px.ops.constant', (['"""alpha"""', 'alpha'], {}), "('alpha', alpha)\n", (1527, 1543), True, 'import pyxir as px\n'), ((1560, 1607), 'pyxir.ops.prelu', 'px.ops.prelu', (['"""prelu"""', '[inX, alphaX]'], {'axis': 'axis'}), "('prelu', [inX, alphaX], axis=axis)\n", (1572, 1607), True, 'import pyxir as px\n'), ((2062, 2106), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['out', 'expected'], {}), '(out, expected)\n', (2091, 2106), True, 'import numpy as np\n'), ((2200, 2238), 'numpy.array', 'np.array', (['[0.1, 0.1]'], {'dtype': 'np.float32'}), '([0.1, 0.1], dtype=np.float32)\n', (2208, 2238), True, 'import numpy as np\n'), ((2341, 2380), 'numpy.ones', 'np.ones', (['(1, 2, 4, 4)'], {'dtype': 'np.float32'}), '((1, 2, 4, 4), dtype=np.float32)\n', (2348, 2380), True, 'import numpy as np\n'), ((2394, 2432), 'numpy.array', 'np.array', (['[0.1, 0.1]'], {'dtype': 'np.float32'}), '([0.1, 0.1], dtype=np.float32)\n', (2402, 2432), True, 'import numpy as np\n'), ((2444, 2483), 'numpy.ones', 'np.ones', (['(1, 2, 4, 4)'], {'dtype': 'np.float32'}), '((1, 2, 4, 4), dtype=np.float32)\n', (2451, 2483), True, 'import numpy as np\n'), ((2599, 2637), 'numpy.array', 'np.array', (['[0.1, 0.2]'], {'dtype': 'np.float32'}), '([0.1, 0.2], dtype=np.float32)\n', (2607, 2637), True, 'import numpy as np\n'), ((2810, 2848), 'numpy.array', 'np.array', (['[1.1, 1.1]'], {'dtype': 'np.float32'}), '([1.1, 1.1], dtype=np.float32)\n', (2818, 2848), True, 'import numpy as np\n'), ((3012, 3050), 'numpy.array', 'np.array', (['[1.1, 1.1]'], {'dtype': 'np.float32'}), '([1.1, 1.1], dtype=np.float32)\n', (3020, 3050), True, 'import numpy as np\n'), ((3233, 3271), 'numpy.array', 'np.array', (['[1.1, 1.1]'], {'dtype': 'np.float32'}), '([1.1, 1.1], dtype=np.float32)\n', (3241, 3271), True, 'import numpy as np\n'), ((3735, 3760), 'pyxir.shapes.TensorShape', 'TensorShape', (['[1, 6, 4, 4]'], {}), '([1, 6, 4, 4])\n', (3746, 3760), False, 'from pyxir.shapes import TensorShape\n'), ((4536, 4561), 'pyxir.shapes.TensorShape', 'TensorShape', (['[1, 5, 4, 4]'], {}), '([1, 5, 4, 4])\n', (4547, 4561), False, 'from pyxir.shapes import TensorShape\n'), ((5292, 5314), 'pyxir.shapes.TensorShape', 'TensorShape', (['[1, 3, 4]'], {}), '([1, 3, 4])\n', (5303, 5314), False, 'from pyxir.shapes import TensorShape\n'), ((5351, 5366), 'pyxir.shapes.TensorShape', 'TensorShape', (['[]'], {}), '([])\n', (5362, 5366), False, 'from pyxir.shapes import TensorShape\n'), ((5494, 5581), 'numpy.array', 'np.array', (['[[[1, 1], [1, 1]], [[2, 2], [2, 2]], [[2, 2], [2, 2]]]'], {'dtype': 'np.float32'}), '([[[1, 1], [1, 1]], [[2, 2], [2, 2]], [[2, 2], [2, 2]]], dtype=np.\n float32)\n', (5502, 5581), True, 'import numpy as np\n'), ((5863, 5905), 'numpy.array', 'np.array', (['[[1, 1, 1, 1]]'], {'dtype': 'np.float32'}), '([[1, 1, 1, 1]], dtype=np.float32)\n', (5871, 5905), True, 'import numpy as np\n'), ((1645, 1666), 'pyxir.shapes.TensorShape', 'TensorShape', (['in_shape'], {}), '(in_shape)\n', (1656, 1666), False, 'from pyxir.shapes import TensorShape\n'), ((2147, 2186), 'numpy.ones', 'np.ones', (['(1, 2, 4, 4)'], {'dtype': 'np.float32'}), '((1, 2, 4, 4), dtype=np.float32)\n', (2154, 2186), True, 'import numpy as np\n'), ((2257, 2296), 'numpy.ones', 'np.ones', (['(1, 2, 4, 4)'], {'dtype': 'np.float32'}), '((1, 2, 4, 4), dtype=np.float32)\n', (2264, 2296), True, 'import numpy as np\n'), ((2757, 2796), 'numpy.ones', 'np.ones', (['(1, 2, 4, 4)'], {'dtype': 'np.float32'}), '((1, 2, 4, 4), dtype=np.float32)\n', (2764, 2796), True, 'import numpy as np\n'), ((2869, 2908), 'numpy.ones', 'np.ones', (['(1, 2, 4, 4)'], {'dtype': 'np.float32'}), '((1, 2, 4, 4), dtype=np.float32)\n', (2876, 2908), True, 'import numpy as np\n'), ((2959, 2998), 'numpy.ones', 'np.ones', (['(1, 4, 4, 2)'], {'dtype': 'np.float32'}), '((1, 4, 4, 2), dtype=np.float32)\n', (2966, 2998), True, 'import numpy as np\n'), ((3071, 3110), 'numpy.ones', 'np.ones', (['(1, 4, 4, 2)'], {'dtype': 'np.float32'}), '((1, 4, 4, 2), dtype=np.float32)\n', (3078, 3110), True, 'import numpy as np\n'), ((3180, 3219), 'numpy.ones', 'np.ones', (['(1, 4, 4, 2)'], {'dtype': 'np.float32'}), '((1, 4, 4, 2), dtype=np.float32)\n', (3187, 3219), True, 'import numpy as np\n'), ((3292, 3331), 'numpy.ones', 'np.ones', (['(1, 4, 4, 2)'], {'dtype': 'np.float32'}), '((1, 4, 4, 2), dtype=np.float32)\n', (3299, 3331), True, 'import numpy as np\n'), ((2528, 2567), 'numpy.array', 'np.array', (['[1.0, -1.0]'], {'dtype': 'np.float32'}), '([1.0, -1.0], dtype=np.float32)\n', (2536, 2567), True, 'import numpy as np\n'), ((2649, 2688), 'numpy.array', 'np.array', (['[1.0, -0.2]'], {'dtype': 'np.float32'}), '([1.0, -0.2], dtype=np.float32)\n', (2657, 2688), True, 'import numpy as np\n')] |
##########################################################################
# NSAp - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Registration utilities.
"""
# System import
import os
import glob
# Package import
from pyconnectome import DEFAULT_FSL_PATH
from pyconnectome.wrapper import FSLWrapper
# Third patry
import numpy
import nibabel
from pyfreesurfer import DEFAULT_FREESURFER_PATH
from pyfreesurfer.wrapper import FSWrapper
from pyfreesurfer.utils.filetools import get_or_check_freesurfer_subjects_dir
def freesurfer_bbregister_t1todif(
outdir,
subject_id,
nodif_brain,
subjects_dir=None,
fs_sh=DEFAULT_FREESURFER_PATH,
fsl_sh=DEFAULT_FSL_PATH):
""" Compute DWI to T1 transformation and project the T1 to the diffusion
space without resampling.
Parameters
----------
outdir: str
Directory where to output.
subject_id: str
Subject id used with FreeSurfer 'recon-all' command.
nodif_brain: str
Path to the preprocessed brain-only DWI volume.
subjects_dir: str or None, default None
Path to the FreeSurfer subjects directory. Required if the FreeSurfer
environment variable (i.e. $SUBJECTS_DIR) is not set.
fs_sh: str, default NeuroSpin path
Path to the Bash script setting the FreeSurfer environment
fsl_sh: str, default NeuroSpin path
Path to the Bash script setting the FSL environment.
Returns
-------
t1_brain_to_dif: str
The anatomical image in the diffusion space (without resampling).
dif2anat_dat, dif2anat_mat: str
The DWI to T1 transformation in FreeSurfer or FSL space respectivelly.
"""
# -------------------------------------------------------------------------
# STEP 0 - Check arguments
# FreeSurfer subjects_dir
subjects_dir = get_or_check_freesurfer_subjects_dir(subjects_dir)
# Check input paths
paths_to_check = [nodif_brain, fs_sh, fsl_sh]
for p in paths_to_check:
if not os.path.exists(p):
raise ValueError("File or directory does not exist: %s" % p)
# -------------------------------------------------------------------------
# STEP 1 - Compute T1 <-> DWI rigid transformation
# Register diffusion to T1
dif2anat_dat = os.path.join(outdir, "dif2anat.dat")
dif2anat_mat = os.path.join(outdir, "dif2anat.mat")
cmd_1a = ["bbregister",
"--s", subject_id,
"--mov", nodif_brain,
"--reg", dif2anat_dat,
"--fslmat", dif2anat_mat,
"--dti",
"--init-fsl"]
FSWrapper(cmd_1a, subjects_dir=subjects_dir, shfile=fs_sh,
add_fsl_env=True, fsl_sh=fsl_sh)()
# Align FreeSurfer T1 brain to diffusion without downsampling
fs_t1_brain = os.path.join(subjects_dir, subject_id, "mri", "brain.mgz")
t1_brain_to_dif = os.path.join(outdir, "fs_t1_brain_to_dif.nii.gz")
cmd_1b = ["mri_vol2vol",
"--mov", nodif_brain,
"--targ", fs_t1_brain,
"--inv",
"--no-resample",
"--o", t1_brain_to_dif,
"--reg", dif2anat_dat,
"--no-save-reg"]
FSWrapper(cmd_1b, shfile=fs_sh)()
return t1_brain_to_dif, dif2anat_dat, dif2anat_mat
def mcflirt(in_file, out_fileroot, cost="normcorr", bins=256, dof=6,
refvol=None, reffile=None, reg_to_mean=True, mats=False,
plots=True, verbose=0, shfile=DEFAULT_FSL_PATH):
""" Wraps command mcflirt.
MCFLIRT is an intra-modal motion correction tool designed for use on
fMRI time series and based on optimization and registration techniques
used in FLIRT, a fully automated robust and accurate tool for linear
(affine) inter- and inter-modal brain image registration.
Parameters
----------
in_file: str (mandatory)
Input serie file path.
out_fileroot: str (mandatory)
Output serie file path without extension.
cost: str(optional, default None)
The optimization cost function.
Choose the most appropriate option: "mutualinfo", "woods",
"corratio", "normcorr", "normmi", "leastsquares".
bins: int (optional, default 256)
Number of histogram bins.
dof: int (optional, default 6)
Number of transform degrees of freedom.
refvol: int (optional, default None)
the reference volume index, default is no_vols/2.
reffile: str (optional, default None)
use a separate 3d image file as the target for registration.
reg_to_mean: bool (optional, default True)
If set, register to mean, otherwise to middle volume of the serie.
mats: bool (optional, default False)
If set save transformation matricies in subdirectory outfilename.mat
plot: bool (optional, default True)
If set save transformation parameters in file outputfilename.par
verbose: int (optional)
0 is least and default.
shfile: str (optional, default DEFAULT_FSL_PATH)
The FSL configuration batch.
Returns
-------
func_file: str
Output realigned serie.
mean_file: str
Mean serie tempalte.
par_file: str
The motion correction transformation parameters.
"""
# Check the input parameters
if not os.path.isfile(in_file):
raise ValueError(
"'{0}' is not a valid input file.".format(in_file))
if cost not in ["mutualinfo", "woods", "corratio", "normcorr", "normmi",
"leastsquares"]:
raise ValueError(
"'{0}' is not a valid optimization cost function.".format(cost))
# Define the FSL command
cmd = ["mcflirt",
"-in", in_file,
"-out", out_fileroot,
"-cost", cost,
"-bins", str(bins),
"-dof", str(dof),
"-verbose", str(verbose)]
if refvol is not None:
cmd.extend(["-refvol", str(refvol)])
if reffile is not None:
cmd.extend(["-reffile", reffile])
if reg_to_mean:
cmd.append("-meanvol")
if mats:
cmd.append("-mats")
if plots:
cmd.append("-plots")
# Call mcflirt
fslprocess = FSLWrapper(shfile=shfile)
fslprocess(cmd=cmd)
# Get generated outputs
func_files = [elem for elem in glob.glob(out_fileroot + ".*")
if not elem.endswith(".par") and os.path.isfile(elem)]
if len(func_files) != 1:
raise ValueError(
"Expect only one mcflirt output file, not {0}.".format(func_files))
func_file = func_files[0]
if reg_to_mean:
mean_file = glob.glob(out_fileroot + "_mean_reg.*")[0]
else:
im = nibabel.load(func_file)
mean_data = numpy.mean(im.get_data(), axis=-1)
im_mean = nibabel.Nifti1Image(mean_data, im.affine)
mean_file = out_fileroot + "_mean_reg.nii.gz"
nibabel.save(im_mean, mean_file)
par_file = None
if plots:
par_file = out_fileroot + ".par"
return func_file, mean_file, par_file
def flirt(in_file, ref_file, omat=None, out=None, init=None, cost="corratio",
usesqform=False, displayinit=False, anglerep="euler", bins=256,
interp="trilinear", dof=12, applyxfm=False, applyisoxfm=None,
nosearch=False, wmseg=None, verbose=0, shfile=DEFAULT_FSL_PATH):
""" Wraps command flirt.
The basic usage is:
flirt [options] -in <inputvol> -ref <refvol> -out <outputvol>
flirt [options] -in <inputvol> -ref <refvol> -omat <outputmatrix>
flirt [options] -in <inputvol> -ref <refvol> -applyxfm -init <matrix>
-out <outputvol>
Parameters
----------
in_file: str (mandatory)
Input volume.
ref_file: str (mandatory)
Reference volume.
omat: str (optional, default None)
Matrix filename. Output in 4x4 ascii format.
out: str (optional, default None)
Output volume.
init: (optional, default None)
Input 4x4 affine matrix
cost: str (optional, default "corratio")
Choose the most appropriate option: "mutualinfo", "corratio",
"normcorr", "normmi", "leastsq", "labeldiff", "bbr".
usesqform: bool (optional, default False)
Initialise using appropriate sform or qform.
displayinit: bool
Display initial matrix.
anglerep: str (optional default "euler")
Choose the most appropriate option: "quaternion", "euler".
bins: int (optional, default 256)
Number of histogram bins
interp: str (optional, default "trilinear")
Choose the most appropriate option: "trilinear", "nearestneighbour",
"sinc", "spline". (final interpolation: def - trilinear)
dof: int (optional, default 12)
Number of transform dofs.
applyxfm: bool
Applies transform (no optimisation) - requires -init.
applyisoxfm: float (optional)
The integer defines the scale. As applyxfm but forces isotropic
resampling.
verbose: int (optional)
0 is least and default.
nosearch: bool (optional, default False)
if set perform no search to initializa the optimization.
wmseg: str (optional)
White matter segmentation volume needed by BBR cost function.
shfile: str (optional, default DEFAULT_FSL_PATH)
The FSL configuration batch.
Returns
-------
out: str
Output volume.
omat: str
Output matrix filename. Output in 4x4 ascii format.
"""
# Check the input parameters
for filename in (in_file, ref_file):
if not os.path.isfile(filename):
raise ValueError(
"'{0}' is not a valid input file.".format(filename))
# Define the FSL command
cmd = ["flirt",
"-in", in_file,
"-ref", ref_file,
"-cost", cost,
"-searchcost", cost,
"-anglerep", anglerep,
"-bins", str(bins),
"-interp", interp,
"-dof", str(dof),
"-verbose", str(verbose)]
# Set default parameters
if usesqform:
cmd += ["-usesqform"]
if displayinit:
cmd += ["-displayinit"]
if applyxfm:
cmd += ["-applyxfm"]
if nosearch:
cmd += ["-nosearch"]
if init is not None:
cmd += ["-init", init]
if applyisoxfm is not None:
cmd += ["-applyisoxfm", str(applyisoxfm)]
if cost == "bbr":
cmd += ["-wmseg", wmseg]
dirname = os.path.dirname(in_file)
basename = os.path.basename(in_file).split(".")[0]
if out is None:
out = os.path.join(dirname, "flirt_out_{0}.nii.gz".format(basename))
cmd += ["-out", out]
else:
cmd += ["-out", out]
if omat is None:
if not applyxfm:
omat = os.path.join(dirname, "flirt_omat_{0}.txt".format(basename))
cmd += ["-omat", omat]
else:
cmd += ["-omat", omat]
# Call flirt
fslprocess = FSLWrapper(shfile=shfile)
fslprocess(cmd=cmd)
return out, omat
def fnirt(in_file, ref_file, affine_file, outdir, inmask_file=None, verbose=0,
shfile=DEFAULT_FSL_PATH):
""" Wraps command fnirt.
Parameters
----------
in_file: str (mandatory)
Input volume.
ref_file: str (mandatory)
Reference volume.
affine_file: str (optional, default None)
Affine matrix filename in 4x4 ascii format.
outdir: str
The destination folder.
inmask_file: str (optional, default None)
Name of file with mask in input image space.
verbose: int (optional)
0 is least and default.
shfile: str (optional, default DEFAULT_FSL_PATH)
The FSL configuration batch.
Returns
-------
cout: str
Name of output file with field coefficients.
iout: str
Name of output image.
fout: str
Name of output file with field.
jout: str
Name of file for writing out the Jacobian of the field.
refout: str
Name of file for writing out intensity modulated.
intout: str
Name of files for writing information pertaining to intensity mapping
logout: str
Name of log-file.
"""
# Check the input parameters
for filename in (in_file, ref_file, affine_file, inmask_file):
if filename is not None and not os.path.isfile(filename):
raise ValueError(
"'{0}' is not a valid input file.".format(filename))
# Define the FSL command
cmd = ["fnirt",
"--ref={0}".format(ref_file),
"--in={0}".format(in_file),
"--aff={0}".format(affine_file),
"--verbose={0}".format(verbose)]
if inmask_file is not None:
cmd += ["--inmask={0}".format(inmask_file)]
basename = os.path.basename(in_file).split(".")[0]
outputs = []
for param in ("cout", "iout", "fout", "jout", "refout", "intout",
"logout"):
ext = ".nii.gz"
if param in ("logout"):
ext = ".txt"
outputs.append(
os.path.join(outdir, "{0}_{1}{2}".format(param, basename, ext)))
cmd += ["--{0}={1}".format(param, outputs[-1])]
# Call fnirt
fslprocess = FSLWrapper(shfile=shfile)
fslprocess(cmd=cmd)
return outputs
def applywarp(in_file, ref_file, out_file, warp_file, pre_affine_file=None,
post_affine_file=None, interp="trilinear", verbose=0,
shfile=DEFAULT_FSL_PATH):
""" Apply FSL deformation field.
Parameters
----------
in_file: str
filename of input image (to be warped).
ref_file: str
filename for reference image.
out_file: str
filename for output (warped) image.
warp_file: str
filename for warp/coefficient (volume).
pre_affine_file: str
filename for pre-transform (affine matrix).
post_affine_file: str
filename for post-transform (affine matrix).
interp: str (optional, default "trilinear")
interpolation method {nn,trilinear,sinc,spline}
verbose: int, default 0
the verbosity level.
shfile: str, default DEFAULT_FSL_PATH
The FSL configuration batch.
"""
# Check the input parameters
for filename in (in_file, ref_file, pre_affine_file, post_affine_file):
if filename is not None and not os.path.isfile(filename):
raise ValueError(
"'{0}' is not a valid input file.".format(filename))
# Define the FSL command
cmd = ["applywarp",
"-i", in_file,
"-r", ref_file,
"-o", out_file,
"-w", warp_file,
"--interp={0}".format(interp),
"--verbose={0}".format(verbose)]
if pre_affine_file is not None:
cmd.append("--premat={0}".format(pre_affine_file))
if post_affine_file is not None:
cmd.append("--postmat={0}".format(post_affine_file))
# Call fnirt
fslprocess = FSLWrapper(shfile=shfile)
fslprocess(cmd=cmd)
def flirt2aff(mat_file, in_file, ref_file):
""" Map from 'in_file' image voxels to 'ref_file' voxels given `omat` FSL
affine transformation.
Parameters
------------
mat_file: str (mandatory)
filename of output '-omat' transformation file from FSL flirt.
in_file: str (mandatory)
filename of the image passed to flirt as the '-in' image.
ref_file: str (mandatory)
filename of the image passed to flirt as the '-ref' image.
Returns
-------
omat: array (4, 4)
array containing the transform from voxel coordinates in image
for 'in_file' to voxel coordinates in image for 'ref_file'.
"""
# Check the input parameters
for filename in (mat_file, in_file, ref_file):
if not os.path.isfile(filename):
raise ValueError("'{0}' is not a valid input "
"file.".format(filename))
# Load dataset
flirt_affine = numpy.loadtxt(mat_file)
in_img = nibabel.load(in_file)
ref_img = nibabel.load(ref_file)
in_hdr = in_img.get_header()
ref_hdr = ref_img.get_header()
# Define a function to flip x
def _x_flipper(n):
flipr = numpy.diag([-1, 1, 1, 1])
flipr[0, 3] = n - 1
return flipr
# Check image orientation
inspace = numpy.diag(in_hdr.get_zooms()[:3] + (1, ))
refspace = numpy.diag(ref_hdr.get_zooms()[:3] + (1, ))
if numpy.linalg.det(in_img.get_affine()) >= 0:
inspace = numpy.dot(inspace, _x_flipper(in_hdr.get_data_shape()[0]))
if numpy.linalg.det(ref_img.get_affine()) >= 0:
refspace = numpy.dot(refspace, _x_flipper(ref_hdr.get_data_shape()[0]))
# Get the voxel to voxel mapping
omat = numpy.dot(
numpy.linalg.inv(refspace), numpy.dot(flirt_affine, inspace))
return omat
| [
"os.path.exists",
"nibabel.save",
"nibabel.load",
"pyfreesurfer.utils.filetools.get_or_check_freesurfer_subjects_dir",
"pyfreesurfer.wrapper.FSWrapper",
"os.path.join",
"numpy.diag",
"os.path.isfile",
"os.path.dirname",
"numpy.loadtxt",
"numpy.linalg.inv",
"numpy.dot",
"os.path.basename",
... | [((2114, 2164), 'pyfreesurfer.utils.filetools.get_or_check_freesurfer_subjects_dir', 'get_or_check_freesurfer_subjects_dir', (['subjects_dir'], {}), '(subjects_dir)\n', (2150, 2164), False, 'from pyfreesurfer.utils.filetools import get_or_check_freesurfer_subjects_dir\n'), ((2563, 2599), 'os.path.join', 'os.path.join', (['outdir', '"""dif2anat.dat"""'], {}), "(outdir, 'dif2anat.dat')\n", (2575, 2599), False, 'import os\n'), ((2619, 2655), 'os.path.join', 'os.path.join', (['outdir', '"""dif2anat.mat"""'], {}), "(outdir, 'dif2anat.mat')\n", (2631, 2655), False, 'import os\n'), ((3089, 3147), 'os.path.join', 'os.path.join', (['subjects_dir', 'subject_id', '"""mri"""', '"""brain.mgz"""'], {}), "(subjects_dir, subject_id, 'mri', 'brain.mgz')\n", (3101, 3147), False, 'import os\n'), ((3170, 3219), 'os.path.join', 'os.path.join', (['outdir', '"""fs_t1_brain_to_dif.nii.gz"""'], {}), "(outdir, 'fs_t1_brain_to_dif.nii.gz')\n", (3182, 3219), False, 'import os\n'), ((6470, 6495), 'pyconnectome.wrapper.FSLWrapper', 'FSLWrapper', ([], {'shfile': 'shfile'}), '(shfile=shfile)\n', (6480, 6495), False, 'from pyconnectome.wrapper import FSLWrapper\n'), ((10702, 10726), 'os.path.dirname', 'os.path.dirname', (['in_file'], {}), '(in_file)\n', (10717, 10726), False, 'import os\n'), ((11185, 11210), 'pyconnectome.wrapper.FSLWrapper', 'FSLWrapper', ([], {'shfile': 'shfile'}), '(shfile=shfile)\n', (11195, 11210), False, 'from pyconnectome.wrapper import FSLWrapper\n'), ((13433, 13458), 'pyconnectome.wrapper.FSLWrapper', 'FSLWrapper', ([], {'shfile': 'shfile'}), '(shfile=shfile)\n', (13443, 13458), False, 'from pyconnectome.wrapper import FSLWrapper\n'), ((15161, 15186), 'pyconnectome.wrapper.FSLWrapper', 'FSLWrapper', ([], {'shfile': 'shfile'}), '(shfile=shfile)\n', (15171, 15186), False, 'from pyconnectome.wrapper import FSLWrapper\n'), ((16161, 16184), 'numpy.loadtxt', 'numpy.loadtxt', (['mat_file'], {}), '(mat_file)\n', (16174, 16184), False, 'import numpy\n'), ((16198, 16219), 'nibabel.load', 'nibabel.load', (['in_file'], {}), '(in_file)\n', (16210, 16219), False, 'import nibabel\n'), ((16234, 16256), 'nibabel.load', 'nibabel.load', (['ref_file'], {}), '(ref_file)\n', (16246, 16256), False, 'import nibabel\n'), ((2896, 2991), 'pyfreesurfer.wrapper.FSWrapper', 'FSWrapper', (['cmd_1a'], {'subjects_dir': 'subjects_dir', 'shfile': 'fs_sh', 'add_fsl_env': '(True)', 'fsl_sh': 'fsl_sh'}), '(cmd_1a, subjects_dir=subjects_dir, shfile=fs_sh, add_fsl_env=True,\n fsl_sh=fsl_sh)\n', (2905, 2991), False, 'from pyfreesurfer.wrapper import FSWrapper\n'), ((3486, 3517), 'pyfreesurfer.wrapper.FSWrapper', 'FSWrapper', (['cmd_1b'], {'shfile': 'fs_sh'}), '(cmd_1b, shfile=fs_sh)\n', (3495, 3517), False, 'from pyfreesurfer.wrapper import FSWrapper\n'), ((5589, 5612), 'os.path.isfile', 'os.path.isfile', (['in_file'], {}), '(in_file)\n', (5603, 5612), False, 'import os\n'), ((6959, 6982), 'nibabel.load', 'nibabel.load', (['func_file'], {}), '(func_file)\n', (6971, 6982), False, 'import nibabel\n'), ((7056, 7097), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['mean_data', 'im.affine'], {}), '(mean_data, im.affine)\n', (7075, 7097), False, 'import nibabel\n'), ((7160, 7192), 'nibabel.save', 'nibabel.save', (['im_mean', 'mean_file'], {}), '(im_mean, mean_file)\n', (7172, 7192), False, 'import nibabel\n'), ((16399, 16424), 'numpy.diag', 'numpy.diag', (['[-1, 1, 1, 1]'], {}), '([-1, 1, 1, 1])\n', (16409, 16424), False, 'import numpy\n'), ((16949, 16975), 'numpy.linalg.inv', 'numpy.linalg.inv', (['refspace'], {}), '(refspace)\n', (16965, 16975), False, 'import numpy\n'), ((16977, 17009), 'numpy.dot', 'numpy.dot', (['flirt_affine', 'inspace'], {}), '(flirt_affine, inspace)\n', (16986, 17009), False, 'import numpy\n'), ((2284, 2301), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (2298, 2301), False, 'import os\n'), ((6584, 6614), 'glob.glob', 'glob.glob', (["(out_fileroot + '.*')"], {}), "(out_fileroot + '.*')\n", (6593, 6614), False, 'import glob\n'), ((6893, 6932), 'glob.glob', 'glob.glob', (["(out_fileroot + '_mean_reg.*')"], {}), "(out_fileroot + '_mean_reg.*')\n", (6902, 6932), False, 'import glob\n'), ((9822, 9846), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (9836, 9846), False, 'import os\n'), ((15982, 16006), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (15996, 16006), False, 'import os\n'), ((6666, 6686), 'os.path.isfile', 'os.path.isfile', (['elem'], {}), '(elem)\n', (6680, 6686), False, 'import os\n'), ((10742, 10767), 'os.path.basename', 'os.path.basename', (['in_file'], {}), '(in_file)\n', (10758, 10767), False, 'import os\n'), ((12562, 12586), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (12576, 12586), False, 'import os\n'), ((13004, 13029), 'os.path.basename', 'os.path.basename', (['in_file'], {}), '(in_file)\n', (13020, 13029), False, 'import os\n'), ((14560, 14584), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (14574, 14584), False, 'import os\n')] |
#np39.py
#39.Zipfの法則
"「単語の出現頻度順位を横軸,その出現頻度を縦軸として,両対数グラフをプロットせよ.」"
cat = 'neko.txt.mecab'#catに格納
with open(cat)as f:
#1文ずつ区切って読み込み
text = f.read().splitlines()
import re
#「\t」と「,」で分割してリスト化
nlist = [re.split("[\t|,]", lines) for lines in text]
catlist = []
for line in nlist:
linelist = []
if line[0] != "EOS":
# 全角空白を除外
#if line[0] != "\u3000":
dic = {"surface": line[0],
"base": line[7].replace('\n',''),
"pos": line[1],
"pos1": line[2]}
#catlist.append(dic)
linelist.append(dic)
catlist.append(linelist)
dolist = []
for zip_ in catlist:
for d in zip_:
dolist.append(d['base'])
#collections.Counter()でリスト内の単語の出現頻度を出力し、
#most_common()で出現回数順に要素を取得。
from collections import Counter
f_most_common = [f[1] for f in Counter(dolist).most_common()]
#両対数グラフを表示。
import matplotlib.pyplot as plt
import numpy as np
plt.scatter(np.log(range(1, len(f_most_common)+1)), np.log(f_most_common))
plt.show(); | [
"collections.Counter",
"re.split",
"numpy.log",
"matplotlib.pyplot.show"
] | [((999, 1009), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1007, 1009), True, 'import matplotlib.pyplot as plt\n'), ((201, 226), 're.split', 're.split', (['"""[\t|,]"""', 'lines'], {}), "('[\\t|,]', lines)\n", (209, 226), False, 'import re\n'), ((976, 997), 'numpy.log', 'np.log', (['f_most_common'], {}), '(f_most_common)\n', (982, 997), True, 'import numpy as np\n'), ((828, 843), 'collections.Counter', 'Counter', (['dolist'], {}), '(dolist)\n', (835, 843), False, 'from collections import Counter\n')] |
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example that infers a 2D embedding using real human similarity data.
Similarity judgment data comes from an experiment using images
from 16 bird species, with 13 images per species (208 total).
Results are saved in the directory specified by `fp_example`. By
default, the beginning of this path is `~/psiz_examples` where `~`
is determined by `Path.home()`.
"""
import copy
import os
from pathlib import Path
import shutil
import imageio
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import psiz
# Uncomment the following line to force eager execution.
# tf.config.run_functions_eagerly(True)
# Uncomment and edit the following to control GPU visibility.
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def main():
"""Run script."""
# Settings.
fp_example = Path.home() / Path('psiz_examples', 'rank', 'vi_birds_16')
fp_board = fp_example / Path('logs', 'fit', 'r0')
n_dim = 2
n_restart = 1
epochs = 1000
batch_size = 128
# Directory preparation.
fp_example.mkdir(parents=True, exist_ok=True)
# Remove existing TensorBoard logs.
if fp_board.exists():
shutil.rmtree(fp_board)
# Plot settings.
small_size = 6
medium_size = 8
large_size = 10
plt.rc('font', size=small_size)
plt.rc('axes', titlesize=medium_size)
plt.rc('axes', labelsize=small_size)
plt.rc('xtick', labelsize=small_size)
plt.rc('ytick', labelsize=small_size)
plt.rc('legend', fontsize=small_size)
plt.rc('figure', titlesize=large_size)
# Import hosted rank dataset of 16 bird species.
(obs, catalog) = psiz.datasets.load('birds-16', verbose=1)
# Partition observations into 80% train, 10% validation and 10% test set.
obs_train, obs_val, obs_test = psiz.utils.standard_split(obs)
print(
'\nData Split\n obs_train:'
' {0}\n obs_val: {1}\n obs_test: {2}'.format(
obs_train.n_trial, obs_val.n_trial, obs_test.n_trial
)
)
# Convert observations to TF dataset.
ds_obs_train = obs_train.as_dataset().shuffle(
buffer_size=obs_train.n_trial, reshuffle_each_iteration=True
).batch(batch_size, drop_remainder=False)
ds_obs_val = obs_val.as_dataset().batch(
batch_size, drop_remainder=False
)
ds_obs_test = obs_test.as_dataset().batch(
batch_size, drop_remainder=False
)
# Build VI model.
model = build_model(catalog.n_stimuli, n_dim, obs_train.n_trial)
# Compile settings.
compile_kwargs = {
'loss': tf.keras.losses.CategoricalCrossentropy(),
'optimizer': tf.keras.optimizers.Adam(learning_rate=.001),
'weighted_metrics': [
tf.keras.metrics.CategoricalCrossentropy(name='cce')
]
}
# Define callbacks.
cb_board = psiz.keras.callbacks.TensorBoardRe(
log_dir=fp_board, histogram_freq=0,
write_graph=False, write_images=False, update_freq='epoch',
profile_batch=0, embeddings_freq=0, embeddings_metadata=None
)
cb_early = psiz.keras.callbacks.EarlyStoppingRe(
'loss', patience=100, mode='min', restore_best_weights=False,
verbose=1
)
callbacks = [cb_board, cb_early]
# Infer embedding with restarts.
restarter = psiz.keras.Restarter(
model, compile_kwargs=compile_kwargs, monitor='val_loss',
n_restart=n_restart
)
restart_record = restarter.fit(
x=ds_obs_train, validation_data=ds_obs_val, epochs=epochs,
callbacks=callbacks, verbose=0
)
model = restarter.model
train_loss = restart_record.record['loss'][0]
train_time = restart_record.record['ms_per_epoch'][0]
val_loss = restart_record.record['val_loss'][0]
# Evaluate test set by taking multiple samples.
tf.keras.backend.clear_session()
model.n_sample = 100
model.compile(**compile_kwargs)
test_metrics = model.evaluate(ds_obs_test, verbose=0, return_dict=True)
test_loss = test_metrics['loss']
print(
' train_loss: {0:.2f} | val_loss: {1:.2f} | '
'test_loss: {2:.2f} | '.format(train_loss, val_loss, test_loss)
)
# Create visual.
fig = plt.figure(figsize=(6.5, 4), dpi=200)
draw_figure(
fig, model, catalog
)
fname = fp_example / Path('visual.tiff')
plt.savefig(
os.fspath(fname), format='tiff', bbox_inches="tight", dpi=300
)
def build_model(n_stimuli, n_dim, n_obs_train):
"""Build model.
Arguments:
n_stimuli: Integer indicating the number of stimuli in the
embedding.
n_dim: Integer indicating the dimensionality of the embedding.
n_obs_train: Integer indicating the number of training
observations. Used to determine KL weight for variational
inference.
Returns:
model: A TensorFlow Keras model.
"""
kl_weight = 1. / n_obs_train
# Note that scale of the prior can be misspecified. The true scale
# is .17, but halving (.085) or doubling (.34) still works well. When
# the prior scale is much smaller than appropriate and there is
# little data, the posterior will be driven by an incorrect prior.
prior_scale = .2 # Mispecified to demonstrate robustness.
embedding_posterior = psiz.keras.layers.EmbeddingNormalDiag(
n_stimuli+1, n_dim, mask_zero=True,
scale_initializer=tf.keras.initializers.Constant(
tfp.math.softplus_inverse(prior_scale).numpy()
)
)
embedding_prior = psiz.keras.layers.EmbeddingShared(
n_stimuli+1, n_dim, mask_zero=True,
embedding=psiz.keras.layers.EmbeddingNormalDiag(
1, 1,
loc_initializer=tf.keras.initializers.Constant(0.),
scale_initializer=tf.keras.initializers.Constant(
tfp.math.softplus_inverse(prior_scale).numpy()
),
loc_trainable=False,
)
)
stimuli = psiz.keras.layers.EmbeddingVariational(
posterior=embedding_posterior, prior=embedding_prior,
kl_weight=kl_weight, kl_n_sample=30
)
kernel = psiz.keras.layers.DistanceBased(
distance=psiz.keras.layers.Minkowski(
rho_initializer=tf.keras.initializers.Constant(2.),
w_initializer=tf.keras.initializers.Constant(1.),
trainable=False
),
similarity=psiz.keras.layers.ExponentialSimilarity(
trainable=False,
beta_initializer=tf.keras.initializers.Constant(10.),
tau_initializer=tf.keras.initializers.Constant(1.),
gamma_initializer=tf.keras.initializers.Constant(0.),
)
)
model = psiz.keras.models.Rank(stimuli=stimuli, kernel=kernel, n_sample=1)
return model
def draw_figure(fig, model, catalog):
"""Draw figure."""
# Settings
s = 5
lw = .5
alpha = .5
gs = fig.add_gridspec(1, 1)
class_arr = catalog.stimuli.class_id.values
unique_class_arr = np.unique(class_arr)
# Define one color per class for plots.
n_class = len(unique_class_arr)
cmap = matplotlib.cm.get_cmap('jet')
norm = matplotlib.colors.Normalize(vmin=0., vmax=n_class)
class_color_array = cmap(norm(range(n_class)))
# Plot embeddings.
ax = fig.add_subplot(gs[0, 0])
# Determine embedding limits.
dist = model.stimuli.embeddings
loc, cov = unpack_mvn(dist)
if model.stimuli.mask_zero:
# Drop placeholder stimulus.
loc = loc[1:]
cov = cov[1:]
z_max = 1.3 * np.max(np.abs(loc))
z_limits = [-z_max, z_max]
# Draw stimuli 95% HDI ellipses.
exemplar_color_array = class_color_array[squeeze_indices(class_arr)]
psiz.mplot.hdi_bvn(
loc, cov, ax, p=.95, edgecolor=exemplar_color_array, lw=lw,
alpha=alpha, fill=False
)
# Draw stimuli modes.
for idx_class in unique_class_arr:
class_locs = np.equal(class_arr, idx_class)
class_label = catalog.class_label[idx_class]
ax.scatter(
loc[class_locs, 0], loc[class_locs, 1], s=s,
c=exemplar_color_array[class_locs], marker='o', edgecolors='none',
zorder=100, label=class_label
)
ax.set_xlim(z_limits)
ax.set_ylim(z_limits)
ax.set_aspect('equal')
ax.set_xticks([])
ax.set_yticks([])
ax.set_title('Embeddings (95% HDI)')
ax.legend(bbox_to_anchor=(1.35, 0.9), shadow=True, title="Bird Species")
gs.tight_layout(fig)
def squeeze_indices(idx_arr):
"""Squeeze indices of array."""
uniq_idx_arr = np.unique(idx_arr)
idx_arr_2 = np.zeros(idx_arr.shape, dtype=int)
for counter, uniq_idx in enumerate(uniq_idx_arr):
loc = np.equal(idx_arr, uniq_idx)
idx_arr_2[loc] = counter
return idx_arr_2
def unpack_mvn(dist):
"""Unpack multivariate normal distribution."""
def diag_to_full_cov(v):
"""Convert diagonal variance to full covariance matrix.
Assumes `v` represents diagonal variance elements only.
"""
n_stimuli = v.shape[0]
n_dim = v.shape[1]
cov = np.zeros([n_stimuli, n_dim, n_dim])
for i_stimulus in range(n_stimuli):
cov[i_stimulus] = np.eye(n_dim) * v[i_stimulus]
return cov
loc = dist.mean().numpy()
v = dist.variance().numpy()
# Convert to full covariance matrix.
cov = diag_to_full_cov(v)
return loc, cov
if __name__ == "__main__":
main()
| [
"psiz.utils.standard_split",
"tensorflow_probability.math.softplus_inverse",
"pathlib.Path.home",
"psiz.keras.Restarter",
"numpy.equal",
"tensorflow.keras.backend.clear_session",
"psiz.keras.callbacks.EarlyStoppingRe",
"tensorflow.keras.losses.CategoricalCrossentropy",
"os.fspath",
"pathlib.Path",... | [((2063, 2094), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'small_size'}), "('font', size=small_size)\n", (2069, 2094), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2136), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'medium_size'}), "('axes', titlesize=medium_size)\n", (2105, 2136), True, 'import matplotlib.pyplot as plt\n'), ((2141, 2177), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'small_size'}), "('axes', labelsize=small_size)\n", (2147, 2177), True, 'import matplotlib.pyplot as plt\n'), ((2182, 2219), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'small_size'}), "('xtick', labelsize=small_size)\n", (2188, 2219), True, 'import matplotlib.pyplot as plt\n'), ((2224, 2261), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'small_size'}), "('ytick', labelsize=small_size)\n", (2230, 2261), True, 'import matplotlib.pyplot as plt\n'), ((2266, 2303), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'small_size'}), "('legend', fontsize=small_size)\n", (2272, 2303), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2346), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'large_size'}), "('figure', titlesize=large_size)\n", (2314, 2346), True, 'import matplotlib.pyplot as plt\n'), ((2422, 2463), 'psiz.datasets.load', 'psiz.datasets.load', (['"""birds-16"""'], {'verbose': '(1)'}), "('birds-16', verbose=1)\n", (2440, 2463), False, 'import psiz\n'), ((2578, 2608), 'psiz.utils.standard_split', 'psiz.utils.standard_split', (['obs'], {}), '(obs)\n', (2603, 2608), False, 'import psiz\n'), ((3606, 3806), 'psiz.keras.callbacks.TensorBoardRe', 'psiz.keras.callbacks.TensorBoardRe', ([], {'log_dir': 'fp_board', 'histogram_freq': '(0)', 'write_graph': '(False)', 'write_images': '(False)', 'update_freq': '"""epoch"""', 'profile_batch': '(0)', 'embeddings_freq': '(0)', 'embeddings_metadata': 'None'}), "(log_dir=fp_board, histogram_freq=0,\n write_graph=False, write_images=False, update_freq='epoch',\n profile_batch=0, embeddings_freq=0, embeddings_metadata=None)\n", (3640, 3806), False, 'import psiz\n'), ((3844, 3957), 'psiz.keras.callbacks.EarlyStoppingRe', 'psiz.keras.callbacks.EarlyStoppingRe', (['"""loss"""'], {'patience': '(100)', 'mode': '"""min"""', 'restore_best_weights': '(False)', 'verbose': '(1)'}), "('loss', patience=100, mode='min',\n restore_best_weights=False, verbose=1)\n", (3880, 3957), False, 'import psiz\n'), ((4067, 4171), 'psiz.keras.Restarter', 'psiz.keras.Restarter', (['model'], {'compile_kwargs': 'compile_kwargs', 'monitor': '"""val_loss"""', 'n_restart': 'n_restart'}), "(model, compile_kwargs=compile_kwargs, monitor=\n 'val_loss', n_restart=n_restart)\n", (4087, 4171), False, 'import psiz\n'), ((4583, 4615), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (4613, 4615), True, 'import tensorflow as tf\n'), ((4968, 5005), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.5, 4)', 'dpi': '(200)'}), '(figsize=(6.5, 4), dpi=200)\n', (4978, 5005), True, 'import matplotlib.pyplot as plt\n'), ((6727, 6861), 'psiz.keras.layers.EmbeddingVariational', 'psiz.keras.layers.EmbeddingVariational', ([], {'posterior': 'embedding_posterior', 'prior': 'embedding_prior', 'kl_weight': 'kl_weight', 'kl_n_sample': '(30)'}), '(posterior=embedding_posterior, prior\n =embedding_prior, kl_weight=kl_weight, kl_n_sample=30)\n', (6765, 6861), False, 'import psiz\n'), ((7450, 7516), 'psiz.keras.models.Rank', 'psiz.keras.models.Rank', ([], {'stimuli': 'stimuli', 'kernel': 'kernel', 'n_sample': '(1)'}), '(stimuli=stimuli, kernel=kernel, n_sample=1)\n', (7472, 7516), False, 'import psiz\n'), ((7753, 7773), 'numpy.unique', 'np.unique', (['class_arr'], {}), '(class_arr)\n', (7762, 7773), True, 'import numpy as np\n'), ((7866, 7895), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (7888, 7895), False, 'import matplotlib\n'), ((7907, 7958), 'matplotlib.colors.Normalize', 'matplotlib.colors.Normalize', ([], {'vmin': '(0.0)', 'vmax': 'n_class'}), '(vmin=0.0, vmax=n_class)\n', (7934, 7958), False, 'import matplotlib\n'), ((8469, 8578), 'psiz.mplot.hdi_bvn', 'psiz.mplot.hdi_bvn', (['loc', 'cov', 'ax'], {'p': '(0.95)', 'edgecolor': 'exemplar_color_array', 'lw': 'lw', 'alpha': 'alpha', 'fill': '(False)'}), '(loc, cov, ax, p=0.95, edgecolor=exemplar_color_array, lw\n =lw, alpha=alpha, fill=False)\n', (8487, 8578), False, 'import psiz\n'), ((9329, 9347), 'numpy.unique', 'np.unique', (['idx_arr'], {}), '(idx_arr)\n', (9338, 9347), True, 'import numpy as np\n'), ((9364, 9398), 'numpy.zeros', 'np.zeros', (['idx_arr.shape'], {'dtype': 'int'}), '(idx_arr.shape, dtype=int)\n', (9372, 9398), True, 'import numpy as np\n'), ((1616, 1627), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (1625, 1627), False, 'from pathlib import Path\n'), ((1630, 1674), 'pathlib.Path', 'Path', (['"""psiz_examples"""', '"""rank"""', '"""vi_birds_16"""'], {}), "('psiz_examples', 'rank', 'vi_birds_16')\n", (1634, 1674), False, 'from pathlib import Path\n'), ((1703, 1728), 'pathlib.Path', 'Path', (['"""logs"""', '"""fit"""', '"""r0"""'], {}), "('logs', 'fit', 'r0')\n", (1707, 1728), False, 'from pathlib import Path\n'), ((1954, 1977), 'shutil.rmtree', 'shutil.rmtree', (['fp_board'], {}), '(fp_board)\n', (1967, 1977), False, 'import shutil\n'), ((3345, 3386), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), '()\n', (3384, 3386), True, 'import tensorflow as tf\n'), ((3409, 3454), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (3433, 3454), True, 'import tensorflow as tf\n'), ((5082, 5101), 'pathlib.Path', 'Path', (['"""visual.tiff"""'], {}), "('visual.tiff')\n", (5086, 5101), False, 'from pathlib import Path\n'), ((5127, 5143), 'os.fspath', 'os.fspath', (['fname'], {}), '(fname)\n', (5136, 5143), False, 'import os\n'), ((8682, 8712), 'numpy.equal', 'np.equal', (['class_arr', 'idx_class'], {}), '(class_arr, idx_class)\n', (8690, 8712), True, 'import numpy as np\n'), ((9467, 9494), 'numpy.equal', 'np.equal', (['idx_arr', 'uniq_idx'], {}), '(idx_arr, uniq_idx)\n', (9475, 9494), True, 'import numpy as np\n'), ((9866, 9901), 'numpy.zeros', 'np.zeros', (['[n_stimuli, n_dim, n_dim]'], {}), '([n_stimuli, n_dim, n_dim])\n', (9874, 9901), True, 'import numpy as np\n'), ((3497, 3549), 'tensorflow.keras.metrics.CategoricalCrossentropy', 'tf.keras.metrics.CategoricalCrossentropy', ([], {'name': '"""cce"""'}), "(name='cce')\n", (3537, 3549), True, 'import tensorflow as tf\n'), ((8310, 8321), 'numpy.abs', 'np.abs', (['loc'], {}), '(loc)\n', (8316, 8321), True, 'import numpy as np\n'), ((9976, 9989), 'numpy.eye', 'np.eye', (['n_dim'], {}), '(n_dim)\n', (9982, 9989), True, 'import numpy as np\n'), ((6488, 6523), 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(0.0)'], {}), '(0.0)\n', (6518, 6523), True, 'import tensorflow as tf\n'), ((7000, 7035), 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(2.0)'], {}), '(2.0)\n', (7030, 7035), True, 'import tensorflow as tf\n'), ((7062, 7097), 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(1.0)'], {}), '(1.0)\n', (7092, 7097), True, 'import tensorflow as tf\n'), ((7255, 7291), 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(10.0)'], {}), '(10.0)\n', (7285, 7291), True, 'import tensorflow as tf\n'), ((7320, 7355), 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(1.0)'], {}), '(1.0)\n', (7350, 7355), True, 'import tensorflow as tf\n'), ((7386, 7421), 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(0.0)'], {}), '(0.0)\n', (7416, 7421), True, 'import tensorflow as tf\n'), ((6221, 6259), 'tensorflow_probability.math.softplus_inverse', 'tfp.math.softplus_inverse', (['prior_scale'], {}), '(prior_scale)\n', (6246, 6259), True, 'import tensorflow_probability as tfp\n'), ((6602, 6640), 'tensorflow_probability.math.softplus_inverse', 'tfp.math.softplus_inverse', (['prior_scale'], {}), '(prior_scale)\n', (6627, 6640), True, 'import tensorflow_probability as tfp\n')] |
# helper function that are used by the settings for multidomain
#
import numpy as np
import pickle
import sys
import struct
import scipy.stats
def load_mesh(fiber_file, sampling_stride_z, rank_no):
# get the mesh nodes, either from a .bin file or a python pickle file
if ".bin" in fiber_file:
# data input from bin files that contain fibers
try:
fiber_file_handle = open(fiber_file, "rb")
except:
print("Error: Could not open fiber file \"{}\"".format(fiber_file))
quit()
# parse fibers from a binary fiber file that was created by parallel_fiber_estimation
# parse file header to extract number of fibers
bytes_raw = fiber_file_handle.read(32)
header_str = struct.unpack('32s', bytes_raw)[0]
header_length_raw = fiber_file_handle.read(4)
header_length = struct.unpack('i', header_length_raw)[0]
parameters = []
for i in range(int(header_length/4.) - 1):
double_raw = fiber_file_handle.read(4)
value = struct.unpack('i', double_raw)[0]
parameters.append(value)
n_fibers_total = parameters[0]
n_fibers_x = (int)(np.round(np.sqrt(n_fibers_total)))
n_fibers_y = n_fibers_x
n_points_initial_whole_fiber = parameters[1]
# parse whole fiber file
fiber_data = []
mesh_node_positions = []
for fiber_no in range(n_fibers_total):
fiber = []
for point_no in range(n_points_initial_whole_fiber):
point = []
for i in range(3):
double_raw = fiber_file_handle.read(8)
value = struct.unpack('d', double_raw)[0]
point.append(value)
fiber.append(point)
# sample fiber in z direction
new_fiber = []
for point_no in range(0,n_points_initial_whole_fiber,sampling_stride_z):
point = fiber[point_no]
new_fiber.append(point)
fiber_data.append(new_fiber)
# set node positions
n_points_whole_fiber = len(fiber_data[0])
mesh_node_positions = [0 for _ in range(n_fibers_x*n_fibers_y*n_points_whole_fiber)]
n_linear_elements_per_coordinate_direction = [n_fibers_x-1, n_fibers_y-1, n_points_whole_fiber-1]
for k in range(n_points_whole_fiber):
for j in range(n_fibers_y):
for i in range(n_fibers_x):
mesh_node_positions[k*n_fibers_x*n_fibers_y + j*n_fibers_x + i] = fiber_data[j*n_fibers_x + i][k]
if rank_no == 0:
print("fiber_file: \"{}\"".format(fiber_file))
print("n fibers: {} ({} x {})".format(n_fibers_total, n_fibers_x, n_fibers_y))
print("n points per fiber: {}, sampling by stride {}".format(n_points_initial_whole_fiber, sampling_stride_z))
print("3D mesh: {} x {} x {} nodes".format(n_linear_elements_per_coordinate_direction[0]+1, n_linear_elements_per_coordinate_direction[1]+1, n_linear_elements_per_coordinate_direction[2]+1))
n_points_xy = n_fibers_x*n_fibers_y
bottom_node_indices = list(range(n_points_xy))
n_points = n_points_xy*n_points_whole_fiber
top_node_indices = list(range(n_points-n_points_xy,n_points))
else:
# data input from generating 3D meshes without fiber tracing
# load fibers
with open(fiber_file, "rb") as f:
fiber_data = pickle.load(f, encoding='latin1')
# list of fibers, fiber = list of points, point = list with 3 coordinate entries
# load mesh
with open(mesh_file, "rb") as f:
mesh_data = pickle.load(f, encoding='latin1')
n_linear_elements_per_coordinate_direction = mesh_data["n_linear_elements_per_coordinate_direction"]
mesh_node_positions = mesh_data["node_positions"]
bottom_node_indices = mesh_data["bottom_nodes"]
top_node_indices = mesh_data["top_nodes"]
#
# "node_positions": node_positions,
# "linear_elements": linear_elements,
# "quadratic_elements": quadratic_elements,
# "seed_points": seed_points,
# "bottom_nodes": bottom_node_indices,
# "top_nodes": top_node_indices,
# "n_linear_elements_per_coordinate_direction": n_linear_elements_per_coordinate_direction,
# "n_quadratic_elements_per_coordinate_direction": n_quadratic_elements_per_coordinate_direction,
#
# output bounding box for debugging
if rank_no == 0:
min_x = min([x for [x,y,z] in mesh_data["node_positions"]])
max_x = max([x for [x,y,z] in mesh_data["node_positions"]])
min_y = min([y for [x,y,z] in mesh_data["node_positions"]])
max_y = max([y for [x,y,z] in mesh_data["node_positions"]])
min_z = min([z for [x,y,z] in mesh_data["node_positions"]])
max_z = max([z for [x,y,z] in mesh_data["node_positions"]])
print("mesh bounding box x: [{},{}], y: [{},{}], z:[{},{}]".format(min_x, max_x, min_y, max_y, min_z, max_z))
for fiber_no in [10, 30, 50]:
data = fiber_data[fiber_no]
min_x = min([x for [x,y,z] in data])
max_x = max([x for [x,y,z] in data])
min_y = min([y for [x,y,z] in data])
max_y = max([y for [x,y,z] in data])
min_z = min([z for [x,y,z] in data])
max_z = max([z for [x,y,z] in data])
print("fiber {} bounding box x: [{},{}], y: [{},{}], z:[{},{}]".format(fiber_no, min_x, max_x, min_y, max_y, min_z, max_z))
return (mesh_node_positions,fiber_data,bottom_node_indices,top_node_indices,n_linear_elements_per_coordinate_direction)
####################################
# compute relative factors fr for compartments
def compute_compartment_relative_factors(mesh_node_positions, n_mesh_points_xy, n_mesh_points_z, fiber_data, motor_units):
"""
Compute the relative factors, f_r, that are needed in the multidomain formulation as a weighting for compartments.
Result is relative_factors[motor_unit_no][node_no] for the 3D mesh.
:param mesh_node_positions: list of (x,y,z) values, global node positions of the 3D mesh
:param fiber_data: list of fibers, each fiber is a list of points, i.e. point = fiber_data[xy_index][z_index]
:param motor_units: a list of dicts, settings for the motor units, [{"fiber_no": 0, "standard_deviation": 0.5, "maximum": 1}]
"""
# list of fibers, fiber = list of points, point = list with 3 coordinate entries
n_compartments = len(motor_units)
n_points_fiber = len(fiber_data[0])
n_fibers_x = int(np.sqrt(len(fiber_data)))
# create relative factors for compartments
#if rank_no == 0:
# print("determine relative factors for {} motor units:\n{}".format(n_compartments, motor_units))
# determine approximate diameter of muscle at every point is z direction
diameters = []
# loop over points in z direction
for z_index_mesh in range(n_mesh_points_z):
z_index_fiber = int(z_index_mesh / (float)(n_mesh_points_z) * n_points_fiber)
# get point on first and last fiber
point0 = np.array(fiber_data[0][z_index_fiber])
point4 = np.array(fiber_data[(n_fibers_x-1)//2][z_index_fiber])
point1 = np.array(fiber_data[n_fibers_x-1][z_index_fiber])
point2 = np.array(fiber_data[-n_fibers_x][z_index_fiber])
point5 = np.array(fiber_data[(-n_fibers_x)//2][z_index_fiber])
point3 = np.array(fiber_data[-1][z_index_fiber])
# their distance is an approximation for the diameter
distance01 = np.linalg.norm(point0 - point1)
distance02 = np.linalg.norm(point0 - point2)
distance03 = np.linalg.norm(point0 - point3)
distance04 = np.linalg.norm(point0 - point4)
distance05 = np.linalg.norm(point0 - point5)
distance12 = np.linalg.norm(point1 - point2)
distance13 = np.linalg.norm(point1 - point3)
distance14 = np.linalg.norm(point1 - point4)
distance15 = np.linalg.norm(point1 - point5)
distance23 = np.linalg.norm(point2 - point3)
distance24 = np.linalg.norm(point2 - point4)
distance25 = np.linalg.norm(point2 - point5)
distance34 = np.linalg.norm(point3 - point4)
distance35 = np.linalg.norm(point3 - point5)
distance45 = np.linalg.norm(point4 - point5)
distance = max(distance01,distance02,distance03,distance04,distance05,distance12,distance13,distance14,distance15,distance23,distance24,distance25,distance34,distance35,distance45)
diameters.append(distance)
#print("diameters: {}".format(diameters))
# create data structure with 0
relative_factors = np.zeros((n_compartments, len(mesh_node_positions))) # each row is one compartment
# loop over nodes of mesh
for node_no,node_position in enumerate(mesh_node_positions):
node_position = np.array(node_position)
z_index_mesh = int((float)(node_no) / n_mesh_points_xy)
z_index_fiber = int(z_index_mesh / (float)(n_mesh_points_z) * n_points_fiber)
# loop over motor units
for motor_unit_no,motor_unit in enumerate(motor_units):
# find point on fiber that is closest to current node
fiber_no = motor_unit["fiber_no"]
if fiber_no >= len(fiber_data):
new_fiber_no = fiber_no % len(fiber_data)
if node_no == 0:
print("\033[0;31mError with motor unit {} around fiber {}, only {} fibers available, now using fiber {} % {} = {} instead.\033[0m".format(motor_unit_no, fiber_no, len(fiber_data), fiber_no, len(fiber_data), new_fiber_no))
fiber_no = new_fiber_no
min_distance = None
search_range = int(1 / (float)(n_mesh_points_z) * n_points_fiber)
search_range = max(10,search_range)
z_start = max(0,z_index_fiber - search_range)
z_end = min(n_points_fiber, z_index_fiber + search_range)
#print("node_position: {}, z_index_fiber: {}, fiber at z index: {}, fiber: {}".format(node_position, z_index_fiber, fiber_data[fiber_no][z_index_fiber], fiber_data[fiber_no][z_start:z_end]))
#print("search_range: {}".format(search_range))
for k,fiber_point in enumerate(fiber_data[fiber_no][z_start:z_end]):
d = np.array(fiber_point) - node_position
distance = np.inner(d,d)
if min_distance is None or distance < min_distance:
min_distance = distance
#print("node_position {}, fiber_point {}, d={}, |d|={}".format(node_position, fiber_point, d, np.sqrt(distance)))
distance = np.sqrt(min_distance)
# compute value as gaussian with given standard_deviation and maximum
standard_deviation = motor_unit["standard_deviation"]*diameters[z_index_mesh]
gaussian = scipy.stats.norm(loc = 0., scale = standard_deviation)
value = gaussian.pdf(distance)*standard_deviation*np.sqrt(2*np.pi)*motor_unit["maximum"]
relative_factors[motor_unit_no][node_no] += value
#print("motor unit {}, fiber {}, distance {}, value {}".format(motor_unit_no, fiber_no, distance, value))
return relative_factors
| [
"numpy.sqrt",
"pickle.load",
"numpy.inner",
"numpy.array",
"struct.unpack",
"numpy.linalg.norm"
] | [((6831, 6869), 'numpy.array', 'np.array', (['fiber_data[0][z_index_fiber]'], {}), '(fiber_data[0][z_index_fiber])\n', (6839, 6869), True, 'import numpy as np\n'), ((6883, 6941), 'numpy.array', 'np.array', (['fiber_data[(n_fibers_x - 1) // 2][z_index_fiber]'], {}), '(fiber_data[(n_fibers_x - 1) // 2][z_index_fiber])\n', (6891, 6941), True, 'import numpy as np\n'), ((6951, 7002), 'numpy.array', 'np.array', (['fiber_data[n_fibers_x - 1][z_index_fiber]'], {}), '(fiber_data[n_fibers_x - 1][z_index_fiber])\n', (6959, 7002), True, 'import numpy as np\n'), ((7014, 7062), 'numpy.array', 'np.array', (['fiber_data[-n_fibers_x][z_index_fiber]'], {}), '(fiber_data[-n_fibers_x][z_index_fiber])\n', (7022, 7062), True, 'import numpy as np\n'), ((7076, 7129), 'numpy.array', 'np.array', (['fiber_data[-n_fibers_x // 2][z_index_fiber]'], {}), '(fiber_data[-n_fibers_x // 2][z_index_fiber])\n', (7084, 7129), True, 'import numpy as np\n'), ((7143, 7182), 'numpy.array', 'np.array', (['fiber_data[-1][z_index_fiber]'], {}), '(fiber_data[-1][z_index_fiber])\n', (7151, 7182), True, 'import numpy as np\n'), ((7263, 7294), 'numpy.linalg.norm', 'np.linalg.norm', (['(point0 - point1)'], {}), '(point0 - point1)\n', (7277, 7294), True, 'import numpy as np\n'), ((7312, 7343), 'numpy.linalg.norm', 'np.linalg.norm', (['(point0 - point2)'], {}), '(point0 - point2)\n', (7326, 7343), True, 'import numpy as np\n'), ((7361, 7392), 'numpy.linalg.norm', 'np.linalg.norm', (['(point0 - point3)'], {}), '(point0 - point3)\n', (7375, 7392), True, 'import numpy as np\n'), ((7410, 7441), 'numpy.linalg.norm', 'np.linalg.norm', (['(point0 - point4)'], {}), '(point0 - point4)\n', (7424, 7441), True, 'import numpy as np\n'), ((7459, 7490), 'numpy.linalg.norm', 'np.linalg.norm', (['(point0 - point5)'], {}), '(point0 - point5)\n', (7473, 7490), True, 'import numpy as np\n'), ((7508, 7539), 'numpy.linalg.norm', 'np.linalg.norm', (['(point1 - point2)'], {}), '(point1 - point2)\n', (7522, 7539), True, 'import numpy as np\n'), ((7557, 7588), 'numpy.linalg.norm', 'np.linalg.norm', (['(point1 - point3)'], {}), '(point1 - point3)\n', (7571, 7588), True, 'import numpy as np\n'), ((7606, 7637), 'numpy.linalg.norm', 'np.linalg.norm', (['(point1 - point4)'], {}), '(point1 - point4)\n', (7620, 7637), True, 'import numpy as np\n'), ((7655, 7686), 'numpy.linalg.norm', 'np.linalg.norm', (['(point1 - point5)'], {}), '(point1 - point5)\n', (7669, 7686), True, 'import numpy as np\n'), ((7704, 7735), 'numpy.linalg.norm', 'np.linalg.norm', (['(point2 - point3)'], {}), '(point2 - point3)\n', (7718, 7735), True, 'import numpy as np\n'), ((7753, 7784), 'numpy.linalg.norm', 'np.linalg.norm', (['(point2 - point4)'], {}), '(point2 - point4)\n', (7767, 7784), True, 'import numpy as np\n'), ((7802, 7833), 'numpy.linalg.norm', 'np.linalg.norm', (['(point2 - point5)'], {}), '(point2 - point5)\n', (7816, 7833), True, 'import numpy as np\n'), ((7851, 7882), 'numpy.linalg.norm', 'np.linalg.norm', (['(point3 - point4)'], {}), '(point3 - point4)\n', (7865, 7882), True, 'import numpy as np\n'), ((7900, 7931), 'numpy.linalg.norm', 'np.linalg.norm', (['(point3 - point5)'], {}), '(point3 - point5)\n', (7914, 7931), True, 'import numpy as np\n'), ((7949, 7980), 'numpy.linalg.norm', 'np.linalg.norm', (['(point4 - point5)'], {}), '(point4 - point5)\n', (7963, 7980), True, 'import numpy as np\n'), ((8494, 8517), 'numpy.array', 'np.array', (['node_position'], {}), '(node_position)\n', (8502, 8517), True, 'import numpy as np\n'), ((712, 743), 'struct.unpack', 'struct.unpack', (['"""32s"""', 'bytes_raw'], {}), "('32s', bytes_raw)\n", (725, 743), False, 'import struct\n'), ((817, 854), 'struct.unpack', 'struct.unpack', (['"""i"""', 'header_length_raw'], {}), "('i', header_length_raw)\n", (830, 854), False, 'import struct\n'), ((3271, 3304), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (3282, 3304), False, 'import pickle\n'), ((3462, 3495), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (3473, 3495), False, 'import pickle\n'), ((10170, 10191), 'numpy.sqrt', 'np.sqrt', (['min_distance'], {}), '(min_distance)\n', (10177, 10191), True, 'import numpy as np\n'), ((985, 1015), 'struct.unpack', 'struct.unpack', (['"""i"""', 'double_raw'], {}), "('i', double_raw)\n", (998, 1015), False, 'import struct\n'), ((1124, 1147), 'numpy.sqrt', 'np.sqrt', (['n_fibers_total'], {}), '(n_fibers_total)\n', (1131, 1147), True, 'import numpy as np\n'), ((9914, 9928), 'numpy.inner', 'np.inner', (['d', 'd'], {}), '(d, d)\n', (9922, 9928), True, 'import numpy as np\n'), ((9857, 9878), 'numpy.array', 'np.array', (['fiber_point'], {}), '(fiber_point)\n', (9865, 9878), True, 'import numpy as np\n'), ((10487, 10505), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (10494, 10505), True, 'import numpy as np\n'), ((1539, 1569), 'struct.unpack', 'struct.unpack', (['"""d"""', 'double_raw'], {}), "('d', double_raw)\n", (1552, 1569), False, 'import struct\n')] |
# -*- coding:utf-8 -*-
import numpy as np
def dropout(x, level):
if level < 0 or level >= 1:
raise ValueError("Dropout Level must be in interval[0, 1)")
retain_prob = 1. - level
random_tensor = np.random.binomial(n = 1, p = retain_prob, size = x.shape)
print(random_tensor)
x *= random_tensor
print(x)
x /= retain_prob
print(x)
return x
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype = np.float32)
dropout(x, 0.4)
| [
"numpy.array",
"numpy.random.binomial"
] | [((391, 446), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9]'], {'dtype': 'np.float32'}), '([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float32)\n', (399, 446), True, 'import numpy as np\n'), ((217, 269), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': 'retain_prob', 'size': 'x.shape'}), '(n=1, p=retain_prob, size=x.shape)\n', (235, 269), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
A simple general csv dataset wrapper for pylearn2.
Can do automatic one-hot encoding based on labels present in a file.
"""
__authors__ = "<NAME>"
__copyright__ = "Copyright 2013, <NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "3-clause BSD"
__maintainer__ = "?"
__email__ = "<EMAIL>"
import csv
import numpy as np
import pandas as pd
import os
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.utils import serial
from pylearn2.utils.string_utils import preprocess
class CSVDataset(DenseDesignMatrix):
"""A generic class for accessing CSV files
labels, if present, should be in the first column
if there's no labels, set expect_labels to False
if there's no header line in your file, set expect_headers to False
Parameters
----------
path : str
The path to the CSV file.
task : str
The type of task in which the dataset will be used -- either
"classification" or "regression". The task determines the shape of the
target variable. For classification, it is a vector; for regression, a
matrix.
expect_labels : bool
Whether the CSV file contains a target variable in the first column.
expect_headers : bool
Whether the CSV file contains column headers.
delimiter : bool
The CSV file's delimiter.
start : int
The first row of the CSV file to load.
stop : int
The last row of the CSV file to load.
start_fraction : float
The fraction of rows, starting at the beginning of the file, to load.
end_fraction : float
The fraction of rows, starting at the end of the file, to load.
"""
def __init__(self,
path='train.csv',
task='classification',
expect_labels=True,
expect_headers=True,
delimiter=',',
start=None,
stop=None,
start_fraction=None,
end_fraction=None,
drop_labels=None,
keep_labels=None,
y_label=None
):
"""
.. todo::
WRITEME
"""
self.path = path
self.task = task
self.expect_labels = expect_labels
self.expect_headers = expect_headers
self.delimiter = delimiter
self.start = start
self.stop = stop
self.start_fraction = start_fraction
self.end_fraction = end_fraction
self.drop_labels = drop_labels if drop_labels is not None else set()
self.keep_labels = keep_labels if keep_labels is not None else set()
self.y_label = y_label
self.view_converter = None
if task not in ['classification', 'regression']:
raise ValueError('task must be either "classification" or '
'"regression"; got ' + str(task))
if start_fraction is not None:
if end_fraction is not None:
raise ValueError("Use start_fraction or end_fraction, "
" not both.")
if start_fraction <= 0:
raise ValueError("start_fraction should be > 0")
if start_fraction >= 1:
raise ValueError("start_fraction should be < 1")
if end_fraction is not None:
if end_fraction <= 0:
raise ValueError("end_fraction should be > 0")
if end_fraction >= 1:
raise ValueError("end_fraction should be < 1")
if start is not None:
if start_fraction is not None or end_fraction is not None:
raise ValueError("Use start, start_fraction, or end_fraction,"
" just not together.")
if stop is not None:
if start_fraction is not None or end_fraction is not None:
raise ValueError("Use stop, start_fraction, or end_fraction,"
" just not together.")
# and go
self.path = preprocess(self.path)
X, y = self._load_data()
if self.task == 'regression':
super(CSVDataset, self).__init__(X=X, y=y)
else:
super(CSVDataset, self).__init__(X=X, y=y,
y_labels=len(set(list(y[:,0]))))
def _load_data(self):
"""
.. todo::
WRITEME
"""
assert self.path.endswith('.csv')
data = pd.read_csv(self.path)
data = data.get_values()
if self.expect_headers:
data = np.loadtxt(self.path,
delimiter=self.delimiter,
skiprows=1)
else:
data = np.loadtxt(self.path, delimiter=self.delimiter)
data = pd.read_csv(self.path)
cols = data.columns
cols = set(cols) + set(self.keep_labels) - self.drop_labels
def take_subset(X, y):
if self.start_fraction is not None:
n = X.shape[0]
subset_end = int(self.start_fraction * n)
X = X[0:subset_end, :]
y = y[0:subset_end]
elif self.end_fraction is not None:
n = X.shape[0]
subset_start = int((1 - self.end_fraction) * n)
X = X[subset_start:, ]
y = y[subset_start:]
elif self.start is not None:
X = X[self.start:self.stop, ]
if y is not None:
y = y[self.start:self.stop]
return X, y
if self.expect_labels:
y = [[example[self.labels_col]] for example in data]
X = [list(example[0:self.labels_col]) for example in data]
X = np.array(X)
y = np.array(y)
else:
X = [list(example) for example in data]
y = None
X, y = take_subset(X, y)
return X.astype(np.float32), y
| [
"pylearn2.utils.string_utils.preprocess",
"numpy.array",
"numpy.loadtxt",
"pandas.read_csv"
] | [((4090, 4111), 'pylearn2.utils.string_utils.preprocess', 'preprocess', (['self.path'], {}), '(self.path)\n', (4100, 4111), False, 'from pylearn2.utils.string_utils import preprocess\n'), ((4533, 4555), 'pandas.read_csv', 'pd.read_csv', (['self.path'], {}), '(self.path)\n', (4544, 4555), True, 'import pandas as pd\n'), ((4857, 4879), 'pandas.read_csv', 'pd.read_csv', (['self.path'], {}), '(self.path)\n', (4868, 4879), True, 'import pandas as pd\n'), ((4641, 4700), 'numpy.loadtxt', 'np.loadtxt', (['self.path'], {'delimiter': 'self.delimiter', 'skiprows': '(1)'}), '(self.path, delimiter=self.delimiter, skiprows=1)\n', (4651, 4700), True, 'import numpy as np\n'), ((4794, 4841), 'numpy.loadtxt', 'np.loadtxt', (['self.path'], {'delimiter': 'self.delimiter'}), '(self.path, delimiter=self.delimiter)\n', (4804, 4841), True, 'import numpy as np\n'), ((5819, 5830), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5827, 5830), True, 'import numpy as np\n'), ((5847, 5858), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5855, 5858), True, 'import numpy as np\n')] |
#coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
import PIL.Image as Image
import tensorflow as tf
import retrain as retrain
from count_ops import load_graph
from sklearn.decomposition import PCA
from glob import glob
import time
from tensorflow.python.framework.graph_util import convert_variables_to_constants
sys.path.append("/home/deepl/PHICOMM/FoodAI/FoodAi/tensorflow/tensorflow_models/models/research/PHICOMM/slim")
from nets import nets_factory
def get_graph(path):
clss = os.listdir(path)
clss.sort()
print(clss)
graphs = []
for cls in clss:
graph_classifier = load_graph(os.path.join(path, cls))
graphs.append(graph_classifier)
return graphs
def get_weights_biases(graphs, allw, selective):
weights = []
biases = []
for graph in graphs:
with graph.as_default() as g:
weight = graph.get_tensor_by_name('MobilenetV2/Logits/Conv2d_1c_1x1/weights:0')
biase = graph.get_tensor_by_name('MobilenetV2/Logits/Conv2d_1c_1x1/biases:0')
with tf.Session(graph=g) as sess:
ws = weight.eval({}, sess)
bs = biase.eval({}, sess)
weights.append(ws)
biases.append(bs)
mean_w = weights[allw[0]].copy()
mean_b = biases[allw[0]].copy()
for i in range(len(allw)):
mean_w += weights[allw[i]]
mean_b += biases[allw[i]]
mean_w -= weights[allw[0]]
mean_b -= biases[allw[0]]
mean_w /= (len(allw)*1.0)
mean_b /= (len(allw)*1.0)
weights.append(mean_w)
biases.append(mean_b)
mean_w = weights[selective[0]].copy()
mean_b = biases[selective[0]].copy()
for i in range(len(selective)):
mean_w += weights[selective[i]]
mean_b += biases[selective[i]]
mean_w -= weights[selective[0]]
mean_b -= biases[selective[0]]
mean_w /= (len(selective)*1.0)
mean_b /= (len(selective)*1.0)
weights.append(mean_w)
biases.append(mean_b)
return weights, biases
def get_parameters_w(weights):
w = []
weights = weights.transpose()
for weight in weights:
w.append(np.mean(weight))
return w
import xlsxwriter
def write_excel(new_params):
workbook = xlsxwriter.Workbook("parameters.xlsx")
worksheet = workbook.add_worksheet()
for i in range(len(new_params)):
for j in range(len(new_params[0])):
worksheet.write(i, j, new_params[i][j])
workbook.close()
def get_weight_and_bias(checkpoint_prefix,sess,graph,saver):
saver.restore(sess,checkpoint_prefix)
weight = graph.get_tensor_by_name('MobilenetV2/Logits/Conv2d_1c_1x1/weights:0')
biase = graph.get_tensor_by_name('MobilenetV2/Logits/Conv2d_1c_1x1/biases:0')
ws = weight.eval({}, sess)
bs = biase.eval({}, sess)
return ws,bs
def average(ensemble_checkpoints,select_ensemble_checkpoints,dataset,FLAGS):
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
is_training=False)
placeholder = tf.placeholder(name='input', dtype=tf.float32,
shape=[None, FLAGS.eval_image_size,
FLAGS.eval_image_size, 3])
logits, _ = network_fn(placeholder)
graph = tf.get_default_graph()
saver = tf.train.Saver()
sess = tf.Session()
weights = []
biases = []
weights_tmp = []
biases_tmp = []
all_checkpoints = glob(os.path.join(FLAGS.checkpoint_path, "*.data*"))
old_checkpoints = glob(os.path.join(FLAGS.checkpoint_path, "*.ckpt"))
all_checkpoints = all_checkpoints + old_checkpoints
all_checkpoints.sort()
#print(all_checkpoints)
#total checkpoints
for i,checkpoint in enumerate(all_checkpoints):
#print(checkpoint)
checkpoint_prefix = checkpoint.replace('.data-00000-of-00001', '')
#print(checkpoint_prefix)
ws,bs = get_weight_and_bias(checkpoint_prefix, sess, graph, saver)
weights.append(np.array(ws))
biases.append(np.array(bs))
#print(ensemble_checkpoints)
for i,checkpoint in enumerate(ensemble_checkpoints):
checkpoint = checkpoint.replace("model.ckpt-","")
checkpoint_prefix = FLAGS.checkpoint_path + "/" + "model.ckpt-" +checkpoint
#print(checkpoint_prefix)
ws,bs = get_weight_and_bias(checkpoint_prefix, sess, graph, saver)
weights_tmp.append(np.array(ws))
biases_tmp.append(np.array(bs))
weights_tmp_stack = np.stack(weights_tmp,0)
biases_tmp_stack = np.stack(biases_tmp,0)
weights.append(np.array(np.mean(weights_tmp_stack,axis = 0)))
biases.append(np.array(np.mean(biases_tmp_stack,axis = 0)))
weights_tmp = []
biases_tmp = []
for i,checkpoint in enumerate(select_ensemble_checkpoints):
checkpoint = checkpoint.replace("model.ckpt-","")
checkpoint_prefix = FLAGS.checkpoint_path + "/" + "model.ckpt-" +checkpoint
#print(checkpoint_prefix)
ws,bs = get_weight_and_bias(checkpoint_prefix, sess, graph, saver)
weights_tmp.append(np.array(ws))
biases_tmp.append(np.array(bs))
weights_tmp_stack = np.stack(weights_tmp,0)
biases_tmp_stack = np.stack(biases_tmp,0)
weights.append(np.array(np.mean(weights_tmp_stack,axis = 0)))
biases.append(np.array(np.mean(biases_tmp_stack,axis = 0)))
return weights,biases
if __name__ == "__main__":
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
graphs = get_graph('./classifier')
allw_idx = [3*50+4*5+4,3*50+6*5+4,3*50+7*5+4,3*50+8*5+4,3*50+3*5+4,3*50+0*5+4,3*50+1*5+4,3*50+9*5+4,3*50+5*5+4,3*50+2*5+4]
selective_idx = [3*50+4*5+4,3*50+6*5+4,3*50+3*5+4,3*50+0*5+4,3*50+1*5+4,3*50+9*5+4,3*50+5*5+4,3*50+2*5+4]
weights, get_weights_biases = get_weights_biases(graphs, allw_idx, selective_idx)
assert len(weights) == len(biases)
parameters = []
for i in range(len(biases)):
param = get_parameters_w(weights[i][0][0])
param += biases[i].tolist()
parameters.append(param)
X = np.array(parameters)
estimator = PCA(n_components=2)
new_params = estimator.fit_transform(X)
write_excel(new_params)
| [
"numpy.mean",
"os.listdir",
"sklearn.decomposition.PCA",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"tensorflow.Session",
"os.path.join",
"numpy.stack",
"numpy.array",
"nets.nets_factory.get_network_fn",
"sys.path.append",
"xlsxwriter.Workbook",
"tensorflow.get_default_graph"
] | [((450, 570), 'sys.path.append', 'sys.path.append', (['"""/home/deepl/PHICOMM/FoodAI/FoodAi/tensorflow/tensorflow_models/models/research/PHICOMM/slim"""'], {}), "(\n '/home/deepl/PHICOMM/FoodAI/FoodAi/tensorflow/tensorflow_models/models/research/PHICOMM/slim'\n )\n", (465, 570), False, 'import sys\n'), ((628, 644), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (638, 644), False, 'import os\n'), ((2326, 2364), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['"""parameters.xlsx"""'], {}), "('parameters.xlsx')\n", (2345, 2364), False, 'import xlsxwriter\n'), ((3006, 3130), 'nets.nets_factory.get_network_fn', 'nets_factory.get_network_fn', (['FLAGS.model_name'], {'num_classes': '(dataset.num_classes - FLAGS.labels_offset)', 'is_training': '(False)'}), '(FLAGS.model_name, num_classes=dataset.\n num_classes - FLAGS.labels_offset, is_training=False)\n', (3033, 3130), False, 'from nets import nets_factory\n'), ((3183, 3297), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""input"""', 'dtype': 'tf.float32', 'shape': '[None, FLAGS.eval_image_size, FLAGS.eval_image_size, 3]'}), "(name='input', dtype=tf.float32, shape=[None, FLAGS.\n eval_image_size, FLAGS.eval_image_size, 3])\n", (3197, 3297), True, 'import tensorflow as tf\n'), ((3403, 3425), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3423, 3425), True, 'import tensorflow as tf\n'), ((3438, 3454), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3452, 3454), True, 'import tensorflow as tf\n'), ((3466, 3478), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3476, 3478), True, 'import tensorflow as tf\n'), ((4624, 4648), 'numpy.stack', 'np.stack', (['weights_tmp', '(0)'], {}), '(weights_tmp, 0)\n', (4632, 4648), True, 'import numpy as np\n'), ((4671, 4694), 'numpy.stack', 'np.stack', (['biases_tmp', '(0)'], {}), '(biases_tmp, 0)\n', (4679, 4694), True, 'import numpy as np\n'), ((5289, 5313), 'numpy.stack', 'np.stack', (['weights_tmp', '(0)'], {}), '(weights_tmp, 0)\n', (5297, 5313), True, 'import numpy as np\n'), ((5336, 5359), 'numpy.stack', 'np.stack', (['biases_tmp', '(0)'], {}), '(biases_tmp, 0)\n', (5344, 5359), True, 'import numpy as np\n'), ((6178, 6198), 'numpy.array', 'np.array', (['parameters'], {}), '(parameters)\n', (6186, 6198), True, 'import numpy as np\n'), ((6215, 6234), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (6218, 6234), False, 'from sklearn.decomposition import PCA\n'), ((3581, 3627), 'os.path.join', 'os.path.join', (['FLAGS.checkpoint_path', '"""*.data*"""'], {}), "(FLAGS.checkpoint_path, '*.data*')\n", (3593, 3627), False, 'import os\n'), ((3656, 3701), 'os.path.join', 'os.path.join', (['FLAGS.checkpoint_path', '"""*.ckpt"""'], {}), "(FLAGS.checkpoint_path, '*.ckpt')\n", (3668, 3701), False, 'import os\n'), ((752, 775), 'os.path.join', 'os.path.join', (['path', 'cls'], {}), '(path, cls)\n', (764, 775), False, 'import os\n'), ((1179, 1198), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (1189, 1198), True, 'import tensorflow as tf\n'), ((2231, 2246), 'numpy.mean', 'np.mean', (['weight'], {}), '(weight)\n', (2238, 2246), True, 'import numpy as np\n'), ((4125, 4137), 'numpy.array', 'np.array', (['ws'], {}), '(ws)\n', (4133, 4137), True, 'import numpy as np\n'), ((4161, 4173), 'numpy.array', 'np.array', (['bs'], {}), '(bs)\n', (4169, 4173), True, 'import numpy as np\n'), ((4545, 4557), 'numpy.array', 'np.array', (['ws'], {}), '(ws)\n', (4553, 4557), True, 'import numpy as np\n'), ((4585, 4597), 'numpy.array', 'np.array', (['bs'], {}), '(bs)\n', (4593, 4597), True, 'import numpy as np\n'), ((4722, 4756), 'numpy.mean', 'np.mean', (['weights_tmp_stack'], {'axis': '(0)'}), '(weights_tmp_stack, axis=0)\n', (4729, 4756), True, 'import numpy as np\n'), ((4787, 4820), 'numpy.mean', 'np.mean', (['biases_tmp_stack'], {'axis': '(0)'}), '(biases_tmp_stack, axis=0)\n', (4794, 4820), True, 'import numpy as np\n'), ((5210, 5222), 'numpy.array', 'np.array', (['ws'], {}), '(ws)\n', (5218, 5222), True, 'import numpy as np\n'), ((5250, 5262), 'numpy.array', 'np.array', (['bs'], {}), '(bs)\n', (5258, 5262), True, 'import numpy as np\n'), ((5387, 5421), 'numpy.mean', 'np.mean', (['weights_tmp_stack'], {'axis': '(0)'}), '(weights_tmp_stack, axis=0)\n', (5394, 5421), True, 'import numpy as np\n'), ((5452, 5485), 'numpy.mean', 'np.mean', (['biases_tmp_stack'], {'axis': '(0)'}), '(biases_tmp_stack, axis=0)\n', (5459, 5485), True, 'import numpy as np\n')] |
import logging
import urllib.parse
from collections import defaultdict
import dask.array as da
import dask.dataframe as dd
import lightgbm
import numpy as np
import pandas as pd
from dask import delayed
from dask.distributed import wait, default_client, get_worker
from lightgbm.basic import _safe_call, _LIB
from toolz import first, assoc
try:
import sparse
except ImportError:
sparse = False
try:
import scipy.sparse as ss
except ImportError:
ss = False
logger = logging.getLogger(__name__)
def parse_host_port(address):
parsed = urllib.parse.urlparse(address)
return parsed.hostname, parsed.port
def build_network_params(worker_addresses, local_worker_ip, local_listen_port, time_out):
addr_port_map = {addr: (local_listen_port + i) for i, addr in enumerate(worker_addresses)}
params = {
'machines': ','.join(f'{parse_host_port(addr)[0]}:{port}' for addr, port in addr_port_map.items()),
'local_listen_port': addr_port_map[local_worker_ip],
'time_out': time_out,
'num_machines': len(addr_port_map)
}
return params
def concat(seq):
if isinstance(seq[0], np.ndarray):
return np.concatenate(seq, axis=0)
elif isinstance(seq[0], (pd.DataFrame, pd.Series)):
return pd.concat(seq, axis=0)
elif ss and isinstance(seq[0], ss.spmatrix):
return ss.vstack(seq, format='csr')
elif sparse and isinstance(seq[0], sparse.SparseArray):
return sparse.concatenate(seq, axis=0)
else:
raise TypeError('Data must be one of: numpy arrays, pandas dataframes, sparse matrices '
f'(from scipy or from sparse). Got {type(seq[0])}.')
def _train_part(params, model_factory, list_of_parts, worker_addresses, return_model, local_listen_port=12400,
time_out=120, **kwargs):
network_params = build_network_params(worker_addresses, get_worker().address, local_listen_port, time_out)
params.update(network_params)
# Concatenate many parts into one
parts = tuple(zip(*list_of_parts))
data = concat(parts[0])
label = concat(parts[1])
weight = concat(parts[2]) if len(parts) == 3 else None
try:
model = model_factory(**params)
model.fit(data, label, sample_weight=weight, **kwargs)
finally:
_safe_call(_LIB.LGBM_NetworkFree())
return model if return_model else None
def _split_to_parts(data, is_matrix):
parts = data.to_delayed()
if isinstance(parts, np.ndarray):
assert (parts.shape[1] == 1) if is_matrix else (parts.ndim == 1 or parts.shape[1] == 1)
parts = parts.flatten().tolist()
return parts
def train(client, data, label, params, model_factory, weight=None, **kwargs):
# Split arrays/dataframes into parts. Arrange parts into tuples to enforce co-locality
data_parts = _split_to_parts(data, is_matrix=True)
label_parts = _split_to_parts(label, is_matrix=False)
if weight is None:
parts = list(map(delayed, zip(data_parts, label_parts)))
else:
weight_parts = _split_to_parts(weight, is_matrix=False)
parts = list(map(delayed, zip(data_parts, label_parts, weight_parts)))
# Start computation in the background
parts = client.compute(parts)
wait(parts)
for part in parts:
if part.status == 'error':
return part # trigger error locally
# Find locations of all parts and map them to particular Dask workers
key_to_part_dict = dict([(part.key, part) for part in parts])
who_has = client.who_has(parts)
worker_map = defaultdict(list)
for key, workers in who_has.items():
worker_map[first(workers)].append(key_to_part_dict[key])
master_worker = first(worker_map)
worker_ncores = client.ncores()
if 'tree_learner' not in params or params['tree_learner'].lower() not in {'data', 'feature', 'voting'}:
logger.warning('Parameter tree_learner not set or set to incorrect value '
f'({params.get("tree_learner", None)}), using "data" as default')
params['tree_learner'] = 'data'
# Tell each worker to train on the parts that it has locally
futures_classifiers = [client.submit(_train_part,
model_factory=model_factory,
params=assoc(params, 'num_threads', worker_ncores[worker]),
list_of_parts=list_of_parts,
worker_addresses=list(worker_map.keys()),
local_listen_port=params.get('local_listen_port', 12400),
time_out=params.get('time_out', 120),
return_model=(worker == master_worker),
**kwargs)
for worker, list_of_parts in worker_map.items()]
results = client.gather(futures_classifiers)
results = [v for v in results if v]
return results[0]
def _predict_part(part, model, proba, **kwargs):
data = part.values if isinstance(part, pd.DataFrame) else part
if data.shape[0] == 0:
result = np.array([])
elif proba:
result = model.predict_proba(data, **kwargs)
else:
result = model.predict(data, **kwargs)
if isinstance(part, pd.DataFrame):
if proba:
result = pd.DataFrame(result, index=part.index)
else:
result = pd.Series(result, index=part.index, name='predictions')
return result
def predict(client, model, data, proba=False, dtype=np.float32, **kwargs):
if isinstance(data, dd._Frame):
return data.map_partitions(_predict_part, model=model, proba=proba, **kwargs).values
elif isinstance(data, da.Array):
if proba:
kwargs['chunks'] = (data.chunks[0], (model.n_classes_,))
else:
kwargs['drop_axis'] = 1
return data.map_blocks(_predict_part, model=model, proba=proba, dtype=dtype, **kwargs)
else:
raise TypeError(f'Data must be either Dask array or dataframe. Got {type(data)}.')
class _LGBMModel:
@staticmethod
def _copy_extra_params(source, dest):
params = source.get_params()
attributes = source.__dict__
extra_param_names = set(attributes.keys()).difference(params.keys())
for name in extra_param_names:
setattr(dest, name, attributes[name])
class LGBMClassifier(_LGBMModel, lightgbm.LGBMClassifier):
def fit(self, X, y=None, sample_weight=None, client=None, **kwargs):
if client is None:
client = default_client()
model_factory = lightgbm.LGBMClassifier
params = self.get_params(True)
model = train(client, X, y, params, model_factory, sample_weight, **kwargs)
self.set_params(**model.get_params())
self._copy_extra_params(model, self)
return self
fit.__doc__ = lightgbm.LGBMClassifier.fit.__doc__
def predict(self, X, client=None, **kwargs):
if client is None:
client = default_client()
return predict(client, self.to_local(), X, dtype=self.classes_.dtype, **kwargs)
predict.__doc__ = lightgbm.LGBMClassifier.predict.__doc__
def predict_proba(self, X, client=None, **kwargs):
if client is None:
client = default_client()
return predict(client, self.to_local(), X, proba=True, **kwargs)
predict_proba.__doc__ = lightgbm.LGBMClassifier.predict_proba.__doc__
def to_local(self):
model = lightgbm.LGBMClassifier(**self.get_params())
self._copy_extra_params(self, model)
return model
class LGBMRegressor(_LGBMModel, lightgbm.LGBMRegressor):
def fit(self, X, y=None, sample_weight=None, client=None, **kwargs):
if client is None:
client = default_client()
model_factory = lightgbm.LGBMRegressor
params = self.get_params(True)
model = train(client, X, y, params, model_factory, sample_weight, **kwargs)
self.set_params(**model.get_params())
self._copy_extra_params(model, self)
return self
fit.__doc__ = lightgbm.LGBMRegressor.fit.__doc__
def predict(self, X, client=None, **kwargs):
if client is None:
client = default_client()
return predict(client, self.to_local(), X, **kwargs)
predict.__doc__ = lightgbm.LGBMRegressor.predict.__doc__
def to_local(self):
model = lightgbm.LGBMRegressor(**self.get_params())
self._copy_extra_params(self, model)
return model
| [
"logging.getLogger",
"dask.distributed.get_worker",
"pandas.Series",
"dask.distributed.default_client",
"lightgbm.basic._LIB.LGBM_NetworkFree",
"dask.distributed.wait",
"numpy.array",
"sparse.concatenate",
"toolz.assoc",
"collections.defaultdict",
"toolz.first",
"numpy.concatenate",
"pandas.... | [((484, 511), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (501, 511), False, 'import logging\n'), ((3251, 3262), 'dask.distributed.wait', 'wait', (['parts'], {}), '(parts)\n', (3255, 3262), False, 'from dask.distributed import wait, default_client, get_worker\n'), ((3565, 3582), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3576, 3582), False, 'from collections import defaultdict\n'), ((3710, 3727), 'toolz.first', 'first', (['worker_map'], {}), '(worker_map)\n', (3715, 3727), False, 'from toolz import first, assoc\n'), ((1169, 1196), 'numpy.concatenate', 'np.concatenate', (['seq'], {'axis': '(0)'}), '(seq, axis=0)\n', (1183, 1196), True, 'import numpy as np\n'), ((5190, 5202), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5198, 5202), True, 'import numpy as np\n'), ((1268, 1290), 'pandas.concat', 'pd.concat', (['seq'], {'axis': '(0)'}), '(seq, axis=0)\n', (1277, 1290), True, 'import pandas as pd\n'), ((1890, 1902), 'dask.distributed.get_worker', 'get_worker', ([], {}), '()\n', (1900, 1902), False, 'from dask.distributed import wait, default_client, get_worker\n'), ((2314, 2337), 'lightgbm.basic._LIB.LGBM_NetworkFree', '_LIB.LGBM_NetworkFree', ([], {}), '()\n', (2335, 2337), False, 'from lightgbm.basic import _safe_call, _LIB\n'), ((5408, 5446), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'index': 'part.index'}), '(result, index=part.index)\n', (5420, 5446), True, 'import pandas as pd\n'), ((5482, 5537), 'pandas.Series', 'pd.Series', (['result'], {'index': 'part.index', 'name': '"""predictions"""'}), "(result, index=part.index, name='predictions')\n", (5491, 5537), True, 'import pandas as pd\n'), ((6637, 6653), 'dask.distributed.default_client', 'default_client', ([], {}), '()\n', (6651, 6653), False, 'from dask.distributed import wait, default_client, get_worker\n'), ((7091, 7107), 'dask.distributed.default_client', 'default_client', ([], {}), '()\n', (7105, 7107), False, 'from dask.distributed import wait, default_client, get_worker\n'), ((7362, 7378), 'dask.distributed.default_client', 'default_client', ([], {}), '()\n', (7376, 7378), False, 'from dask.distributed import wait, default_client, get_worker\n'), ((7859, 7875), 'dask.distributed.default_client', 'default_client', ([], {}), '()\n', (7873, 7875), False, 'from dask.distributed import wait, default_client, get_worker\n'), ((8311, 8327), 'dask.distributed.default_client', 'default_client', ([], {}), '()\n', (8325, 8327), False, 'from dask.distributed import wait, default_client, get_worker\n'), ((1355, 1383), 'scipy.sparse.vstack', 'ss.vstack', (['seq'], {'format': '"""csr"""'}), "(seq, format='csr')\n", (1364, 1383), True, 'import scipy.sparse as ss\n'), ((4323, 4374), 'toolz.assoc', 'assoc', (['params', '"""num_threads"""', 'worker_ncores[worker]'], {}), "(params, 'num_threads', worker_ncores[worker])\n", (4328, 4374), False, 'from toolz import first, assoc\n'), ((1459, 1490), 'sparse.concatenate', 'sparse.concatenate', (['seq'], {'axis': '(0)'}), '(seq, axis=0)\n', (1477, 1490), False, 'import sparse\n'), ((3643, 3657), 'toolz.first', 'first', (['workers'], {}), '(workers)\n', (3648, 3657), False, 'from toolz import first, assoc\n')] |
__author__ = 'igor'
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
import os
F_TRAIN = 'data/training.csv'
F_TEST = 'data/test.csv'
def load(test=False, cols=None):
'''
如果test为真则读取test数据,否则读取训练数据
'''
fname = F_TEST if test else F_TRAIN
df = read_csv(os.path.expanduser(fname)) # load pandas dataframe
# Image数据需要进行切分
df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=" "))
if cols: # 获取子列
df = df[list[cols] + ["Image"]]
print(df.count()) # 打印每一列的数量
df = df.dropna() # 删除有缺失值的行
X = np.vstack(df['Image'].values) / 255.
# normalize
X = X.astype(np.float32)
if not test: # 只有训练集才有目标列
y = df[df.columns[:-1]].values
y = (y - 48) / 48
y = y.astype(np.float32)
# y = MinMaxScaler(feature_range=(-1, 1)).fit_transform(y) # scale taget in [-1,1]
# 图像的像素大小是96*96
X, y = shuffle(X, y, random_state=42) # 打乱训练集
else:
y = None
return X, y
def load2d(test=False, cols=None):
X, y = load(test)
X = X.reshape(-1, 1, 96, 96)
return X, y
| [
"sklearn.utils.shuffle",
"numpy.fromstring",
"numpy.vstack",
"os.path.expanduser"
] | [((318, 343), 'os.path.expanduser', 'os.path.expanduser', (['fname'], {}), '(fname)\n', (336, 343), False, 'import os\n'), ((605, 634), 'numpy.vstack', 'np.vstack', (["df['Image'].values"], {}), "(df['Image'].values)\n", (614, 634), True, 'import numpy as np\n'), ((948, 978), 'sklearn.utils.shuffle', 'shuffle', (['X', 'y'], {'random_state': '(42)'}), '(X, y, random_state=42)\n', (955, 978), False, 'from sklearn.utils import shuffle\n'), ((438, 464), 'numpy.fromstring', 'np.fromstring', (['im'], {'sep': '""" """'}), "(im, sep=' ')\n", (451, 464), True, 'import numpy as np\n')] |
"""
Module: tfrecords
Tfrecords creation and reader for improved performance across multi-gpu
There were a tradeoffs made in this repo. It would be natural to save the generated prepreprocessed image to tfrecord from the generator. This results in enormous (100x) files.
"""
import tensorflow as tf
import os
import csv
import numpy as np
from PIL import Image
import cv2
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
from deepforest import preprocess
from deepforest.keras_retinanet.utils import image as keras_retinanet_image
def create_tf_example(fname, original_image):
#Save image information and metadata so that the tensors can be reshaped at runtime
example = tf.train.Example(features=tf.train.Features(
feature={
'image/filename':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[fname.encode('utf-8')])),
'image/height':
tf.train.Feature(int64_list=tf.train.Int64List(
value=[original_image.shape[0]])),
'image/width':
tf.train.Feature(int64_list=tf.train.Int64List(
value=[original_image.shape[1]])),
}))
return example
def create_tfrecords(tile_path,
patch_size=400,
patch_overlap=0.05,
savedir=".",
overwrite=True):
"""
Write crops to file and write a tfrecord file to use for tf dataset API
Args:
tile_path: Path on disk to .tif
size: Number of images per tfrecord
savedir: dir path to save tfrecords files
Returns:
written_files: A list of path names of written tfrecords
"""
#Load image
try:
raster = Image.open(tile_path)
except Exception as e:
print("Image {} is corrupt: {}".format(tile_path, e))
return None
numpy_image = np.array(raster)
image_name = os.path.splitext(os.path.basename(tile_path))[0]
#Create folder to hold crops
tile_dir = "{}/{}".format(savedir, image_name)
tfrecord_filename = os.path.join(tile_dir, "{}.tfrecord".format(image_name))
try:
os.mkdir(tile_dir)
except Exception as e:
if overwrite:
pass
else:
return tfrecord_filename
#Create window crop index
windows = preprocess.compute_windows(numpy_image, patch_size, patch_overlap)
written_files = []
tfwriter = tf.io.TFRecordWriter(tfrecord_filename)
print("There are {} windows".format(len(windows)))
metadata = []
for index, window in enumerate(windows):
#crop image
crop = numpy_image[windows[index].indices()]
#PIL reads RGB images, but retinanet follows cv2 image
crop = crop[:, :, ::-1]
#Crop and preprocess, resize
#crop = keras_retinanet_image.preprocess_image(crop)
#crop, scale = keras_retinanet_image.resize_image(crop)
filename = os.path.join(tile_dir, "{}_{}.png".format(image_name, index))
#Write crop to file
cv2.imwrite(img=crop, filename=filename)
#Write tfrecord
tf_example = create_tf_example(filename, original_image=crop)
tfwriter.write(tf_example.SerializeToString())
#Write metadata to csv
xmin, ymin, w, h = windows[index].getRect()
d = {"window": [index], "xmin": [xmin], "ymin": [ymin]}
df = pd.DataFrame(d)
metadata.append(df)
#Write metadata
df = pd.concat(metadata)
csv_filename = os.path.join(tile_dir, "{}.csv".format(image_name))
df.to_csv(csv_filename, index=False)
return tfrecord_filename
#Reading
def _parse_fn(example, image_size=800):
#Define features
features = {
'image/filename': tf.io.FixedLenFeature([], tf.string),
"image/height": tf.FixedLenFeature([], tf.int64),
"image/width": tf.FixedLenFeature([], tf.int64)
}
# Load one example and parse
example = tf.io.parse_single_example(example, features)
#Load image from file
filename = tf.cast(example["image/filename"], tf.string)
loaded_image = tf.read_file(filename)
loaded_image = tf.image.decode_image(loaded_image, 3)
#Reshape tensor
image_rows = tf.cast(example['image/height'], tf.int32)
image_cols = tf.cast(example['image/width'], tf.int32)
loaded_image = tf.reshape(loaded_image,
tf.stack([image_rows, image_cols, 3]),
name="cast_loaded_image")
loaded_image = tf.image.resize(loaded_image, (image_size, image_size),
align_corners=True)
#needs to be float to subtract weights below
loaded_image = tf.cast(loaded_image, tf.float32)
#Turn loaded image from rgb into bgr and subtract imagenet means, see keras_retinanet.utils.image.preprocess_image
red, green, blue = tf.unstack(loaded_image, axis=-1)
#Subtract imagenet means
blue = tf.subtract(blue, 103.939)
green = tf.subtract(green, 116.779)
red = tf.subtract(red, 123.68)
#Recombine as BGR image
loaded_image = tf.stack([blue, green, red], axis=-1)
return loaded_image
def create_dataset(filepath, batch_size=1):
"""
Args:
filepath: list of tfrecord files
batch_size: number of images per batch
Returns:
dataset: a tensorflow dataset object for model training or prediction
"""
# This works with arrays as well
dataset = tf.data.TFRecordDataset(filepath)
# Maps the parser on every filepath in the array. You can set the number of parallel loaders here
dataset = dataset.map(_parse_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
## Set the batchsize
dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)
#Collect a queue of data tensors
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
def create_tensors(list_of_tfrecords, batch_size):
"""Create a wired tensor target from a list of tfrecords
Args:
list_of_tfrecords: a list of tfrecord on disk to turn into a tfdataset
Returns:
inputs: input tensors of images
targets: target tensors of bounding boxes and classes
"""
#Create tensorflow iterator
dataset = create_dataset(list_of_tfrecords, batch_size=batch_size)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
return next_element
| [
"tensorflow.unstack",
"tensorflow.train.Int64List",
"numpy.array",
"tensorflow.io.FixedLenFeature",
"tensorflow.cast",
"pandas.concat",
"os.mkdir",
"pandas.DataFrame",
"tensorflow.stack",
"tensorflow.io.TFRecordWriter",
"tensorflow.subtract",
"deepforest.preprocess.compute_windows",
"tensorf... | [((1942, 1958), 'numpy.array', 'np.array', (['raster'], {}), '(raster)\n', (1950, 1958), True, 'import numpy as np\n'), ((2390, 2456), 'deepforest.preprocess.compute_windows', 'preprocess.compute_windows', (['numpy_image', 'patch_size', 'patch_overlap'], {}), '(numpy_image, patch_size, patch_overlap)\n', (2416, 2456), False, 'from deepforest import preprocess\n'), ((2496, 2535), 'tensorflow.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['tfrecord_filename'], {}), '(tfrecord_filename)\n', (2516, 2535), True, 'import tensorflow as tf\n'), ((3540, 3559), 'pandas.concat', 'pd.concat', (['metadata'], {}), '(metadata)\n', (3549, 3559), True, 'import pandas as pd\n'), ((4024, 4069), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example', 'features'], {}), '(example, features)\n', (4050, 4069), True, 'import tensorflow as tf\n'), ((4112, 4157), 'tensorflow.cast', 'tf.cast', (["example['image/filename']", 'tf.string'], {}), "(example['image/filename'], tf.string)\n", (4119, 4157), True, 'import tensorflow as tf\n'), ((4177, 4199), 'tensorflow.read_file', 'tf.read_file', (['filename'], {}), '(filename)\n', (4189, 4199), True, 'import tensorflow as tf\n'), ((4219, 4257), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['loaded_image', '(3)'], {}), '(loaded_image, 3)\n', (4240, 4257), True, 'import tensorflow as tf\n'), ((4295, 4337), 'tensorflow.cast', 'tf.cast', (["example['image/height']", 'tf.int32'], {}), "(example['image/height'], tf.int32)\n", (4302, 4337), True, 'import tensorflow as tf\n'), ((4355, 4396), 'tensorflow.cast', 'tf.cast', (["example['image/width']", 'tf.int32'], {}), "(example['image/width'], tf.int32)\n", (4362, 4396), True, 'import tensorflow as tf\n'), ((4585, 4660), 'tensorflow.image.resize', 'tf.image.resize', (['loaded_image', '(image_size, image_size)'], {'align_corners': '(True)'}), '(loaded_image, (image_size, image_size), align_corners=True)\n', (4600, 4660), True, 'import tensorflow as tf\n'), ((4765, 4798), 'tensorflow.cast', 'tf.cast', (['loaded_image', 'tf.float32'], {}), '(loaded_image, tf.float32)\n', (4772, 4798), True, 'import tensorflow as tf\n'), ((4942, 4975), 'tensorflow.unstack', 'tf.unstack', (['loaded_image'], {'axis': '(-1)'}), '(loaded_image, axis=-1)\n', (4952, 4975), True, 'import tensorflow as tf\n'), ((5017, 5043), 'tensorflow.subtract', 'tf.subtract', (['blue', '(103.939)'], {}), '(blue, 103.939)\n', (5028, 5043), True, 'import tensorflow as tf\n'), ((5056, 5083), 'tensorflow.subtract', 'tf.subtract', (['green', '(116.779)'], {}), '(green, 116.779)\n', (5067, 5083), True, 'import tensorflow as tf\n'), ((5094, 5118), 'tensorflow.subtract', 'tf.subtract', (['red', '(123.68)'], {}), '(red, 123.68)\n', (5105, 5118), True, 'import tensorflow as tf\n'), ((5167, 5204), 'tensorflow.stack', 'tf.stack', (['[blue, green, red]'], {'axis': '(-1)'}), '([blue, green, red], axis=-1)\n', (5175, 5204), True, 'import tensorflow as tf\n'), ((5541, 5574), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['filepath'], {}), '(filepath)\n', (5564, 5574), True, 'import tensorflow as tf\n'), ((1792, 1813), 'PIL.Image.open', 'Image.open', (['tile_path'], {}), '(tile_path)\n', (1802, 1813), False, 'from PIL import Image\n'), ((2209, 2227), 'os.mkdir', 'os.mkdir', (['tile_dir'], {}), '(tile_dir)\n', (2217, 2227), False, 'import os\n'), ((3113, 3153), 'cv2.imwrite', 'cv2.imwrite', ([], {'img': 'crop', 'filename': 'filename'}), '(img=crop, filename=filename)\n', (3124, 3153), False, 'import cv2\n'), ((3465, 3480), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (3477, 3480), True, 'import pandas as pd\n'), ((3818, 3854), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (3839, 3854), True, 'import tensorflow as tf\n'), ((3880, 3912), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (3898, 3912), True, 'import tensorflow as tf\n'), ((3937, 3969), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (3955, 3969), True, 'import tensorflow as tf\n'), ((4471, 4508), 'tensorflow.stack', 'tf.stack', (['[image_rows, image_cols, 3]'], {}), '([image_rows, image_cols, 3])\n', (4479, 4508), True, 'import tensorflow as tf\n'), ((1993, 2020), 'os.path.basename', 'os.path.basename', (['tile_path'], {}), '(tile_path)\n', (2009, 2020), False, 'import os\n'), ((994, 1045), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[original_image.shape[0]]'}), '(value=[original_image.shape[0]])\n', (1012, 1045), True, 'import tensorflow as tf\n'), ((1140, 1191), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[original_image.shape[1]]'}), '(value=[original_image.shape[1]])\n', (1158, 1191), True, 'import tensorflow as tf\n')] |
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import auc,precision_recall_curve,roc_curve,confusion_matrix
import os,sys
import pickle
def draw_ROC(y_true,y_pred):
fpr,tpr,_ = roc_curve(y_true,y_pred,pos_label=1)
area_mine = auc(fpr,tpr)
fig = plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % area_mine)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
def draw_PR(y_true,y_pred):
precision,recall,_ = precision_recall_curve(y_true,y_pred,pos_label=1)
area_PR = auc(recall,precision)
baseline = np.sum(np.array(y_true) == 1) / len(y_true)
plt.figure()
lw = 2
plt.plot(recall,precision, color='darkorange',
lw=lw, label='PR curve (area = %0.2f)' % area_PR)
plt.plot([0, 1], [baseline, baseline], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('PR curve example')
plt.legend(loc="lower right")
plt.show()
def seperateCNN():
input1 = keras.Input(shape=(10, 12, 1))
input2 = keras.Input(shape=(46, 12, 1))
x = layers.Conv2D(filters=16, kernel_size=(2, 12))(input1) # 9
x = layers.BatchNormalization()(x)
x = keras.activations.relu(x)
x = layers.Conv2D(filters=32, kernel_size=(2, 1))(x) # 8
x = layers.BatchNormalization()(x)
x = keras.activations.relu(x)
x = layers.MaxPool2D(pool_size=(2, 1), strides=(2, 1))(x) # 4
x = layers.Flatten()(x)
x = keras.Model(inputs=input1, outputs=x)
y = layers.Conv2D(filters=16, kernel_size=(15, 12))(input2) # 32
y = layers.BatchNormalization()(y)
y = keras.activations.relu(y)
y = layers.MaxPool2D(pool_size=(2, 1), strides=(2, 1))(y) # 16
y = layers.Conv2D(filters=32,kernel_size=(9,1))(y) # 8
y = layers.BatchNormalization()(y)
y = keras.activations.relu(y)
y = layers.MaxPool2D(pool_size=(2, 1),strides=(2,1))(y) # 4
y = layers.Flatten()(y)
y = keras.Model(inputs=input2,outputs=y)
combined = layers.concatenate([x.output,y.output])
z = layers.Dense(128,activation='relu')(combined)
z = layers.Dropout(0.2)(z)
z = layers.Dense(1,activation='sigmoid')(z)
model = keras.Model(inputs=[input1,input2],outputs=z)
return model
def pull_peptide_aaindex(dataset):
result = np.empty([len(dataset),10,12,1])
for i in range(len(dataset)):
result[i,:,:,:] = dataset[i][0]
return result
def pull_hla_aaindex(dataset):
result = np.empty([len(dataset),46,12,1])
for i in range(len(dataset)):
result[i,:,:,:] = dataset[i][1]
return result
def pull_label_aaindex(dataset):
col = [item[2] for item in dataset]
result = [0 if item == 'Negative' else 1 for item in col]
result = np.expand_dims(np.array(result),axis=1)
return result
def aaindex(peptide,after_pca):
amino = 'ARNDCQEGHILKMFPSTWYV-'
matrix = np.transpose(after_pca) # [12,21]
encoded = np.empty([len(peptide), 12]) # (seq_len,12)
for i in range(len(peptide)):
query = peptide[i]
if query == 'X': query = '-'
query = query.upper()
encoded[i, :] = matrix[:, amino.index(query)]
return encoded
def peptide_data_aaindex(peptide,after_pca): # return numpy array [10,12,1]
length = len(peptide)
if length == 10:
encode = aaindex(peptide,after_pca)
elif length == 9:
peptide = peptide[:5] + '-' + peptide[5:]
encode = aaindex(peptide,after_pca)
encode = encode.reshape(encode.shape[0], encode.shape[1], -1)
return encode
def dict_inventory(inventory):
dicA, dicB, dicC = {}, {}, {}
dic = {'A': dicA, 'B': dicB, 'C': dicC}
for hla in inventory:
type_ = hla[4] # A,B,C
first2 = hla[6:8] # 01
last2 = hla[8:] # 01
try:
dic[type_][first2].append(last2)
except KeyError:
dic[type_][first2] = []
dic[type_][first2].append(last2)
return dic
def rescue_unknown_hla(hla, dic_inventory):
type_ = hla[4]
first2 = hla[6:8]
last2 = hla[8:]
big_category = dic_inventory[type_]
#print(hla)
if not big_category.get(first2) == None:
small_category = big_category.get(first2)
distance = [abs(int(last2) - int(i)) for i in small_category]
optimal = min(zip(small_category, distance), key=lambda x: x[1])[0]
return 'HLA-' + str(type_) + '*' + str(first2) + str(optimal)
else:
small_category = list(big_category.keys())
distance = [abs(int(first2) - int(i)) for i in small_category]
optimal = min(zip(small_category, distance), key=lambda x: x[1])[0]
return 'HLA-' + str(type_) + '*' + str(optimal) + str(big_category[optimal][0])
def hla_data_aaindex(hla_dic,hla_type,after_pca): # return numpy array [34,12,1]
try:
seq = hla_dic[hla_type]
except KeyError:
hla_type = rescue_unknown_hla(hla_type,dic_inventory)
seq = hla_dic[hla_type]
encode = aaindex(seq,after_pca)
encode = encode.reshape(encode.shape[0], encode.shape[1], -1)
return encode
def construct_aaindex(ori,hla_dic,after_pca):
series = []
for i in range(ori.shape[0]):
peptide = ori['peptide'].iloc[i]
hla_type = ori['HLA'].iloc[i]
immuno = np.array(ori['immunogenicity'].iloc[i]).reshape(1,-1) # [1,1]
encode_pep = peptide_data_aaindex(peptide,after_pca) # [10,12]
encode_hla = hla_data_aaindex(hla_dic,hla_type,after_pca) # [46,12]
series.append((encode_pep, encode_hla, immuno))
return series
def hla_df_to_dic(hla):
dic = {}
for i in range(hla.shape[0]):
col1 = hla['HLA'].iloc[i] # HLA allele
col2 = hla['pseudo'].iloc[i] # pseudo sequence
dic[col1] = col2
return dic
def retain_910(ori):
cond = []
for i in range(ori.shape[0]):
peptide = ori['peptide'].iloc[i]
if len(peptide) == 9 or len(peptide) == 10:
cond.append(True)
else:
cond.append(False)
data = ori.loc[cond]
data = data.set_index(pd.Index(np.arange(data.shape[0])))
return data
def draw_history(history):
plt.subplot(211)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.legend()
# plot accuracy during training
plt.subplot(212)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='validation')
plt.legend()
plt.show()
if __name__ == '__main__':
os.chdir('/Users/ligk2e/Desktop/deepimmuno/reproduce')
after_pca = np.loadtxt('./data/after_pca.txt')
ori = pd.read_csv('./data/remove0123_sample100.csv')
ori = ori.sample(frac=1, replace=False).set_index(pd.Index(np.arange(ori.shape[0])))
hla = pd.read_csv('./data/hla2paratopeTable_aligned.txt', sep='\t')
hla_dic = hla_df_to_dic(hla)
inventory = list(hla_dic.keys())
dic_inventory = dict_inventory(inventory)
dataset = construct_aaindex(ori, hla_dic, after_pca)
input1 = pull_peptide_aaindex(dataset)
input2 = pull_hla_aaindex(dataset)
label = pull_label_aaindex(dataset)
# let's do a train/validation split
bucket_roc = []
bucket_pr = []
for i in range(10):
array = np.arange(len(dataset))
train_index = np.random.choice(array,int(len(dataset)*0.9),replace=False)
valid_index = [item for item in array if item not in train_index]
input1_train = input1[train_index]
input1_valid = input1[valid_index]
input2_train = input2[train_index]
input2_valid = input2[valid_index]
label_train = label[train_index]
label_valid = label[valid_index]
cnn_model = seperateCNN()
cnn_model.compile(
loss=keras.losses.MeanSquaredError(),
optimizer=keras.optimizers.Adam(lr=0.0001),
metrics=['accuracy'])
callback_val = keras.callbacks.EarlyStopping(monitor='val_loss', patience=15,restore_best_weights=False)
callback_train = keras.callbacks.EarlyStopping(monitor='loss',patience=2,restore_best_weights=False)
history = cnn_model.fit(
x=[input1_train,input2_train], # feed a list into
y=label_train,
validation_data = ([input1_valid,input2_valid],label_valid),
batch_size=128,
epochs=200,
class_weight = {0:0.5,1:0.5}, # I have 20% positive and 80% negative in my training data
callbacks = [callback_val,callback_train])
valid = ori.loc[valid_index]
valid['cnn_regress'] = cnn_model.predict([input1_valid,input2_valid])
valid = valid.sort_values(by='cnn_regress',ascending=False).set_index(pd.Index(np.arange(valid.shape[0])))
y_true = [1 if not item == 'Negative' else 0 for item in valid['immunogenicity']]
y_pred = valid['cnn_regress']
fpr,tpr,_ = roc_curve(y_true,y_pred)
area = auc(fpr,tpr)
bucket_roc.append((fpr,tpr,_,area))
precision, recall, _ = precision_recall_curve(y_true, y_pred)
area = auc(recall, precision)
bucket_pr.append((precision, recall, _, area))
# ROC
bucket = bucket_roc
fig,ax = plt.subplots()
for i in range(10):
ax.plot(bucket[i][0],bucket[i][1],lw=0.5,label='CV(Fold={0}), AUC={1:.2f}'.format(i+1,bucket[i][3]))
ax.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic')
ax.legend(loc="lower right",fontsize=9)
plt.show()
# PR
bucket = bucket_pr
fig,ax = plt.subplots()
for i in range(10):
ax.plot(bucket[i][1],bucket[i][0],lw=0.5,label='CV(Fold={0}),AUC={1:.2f}'.format(i+1,bucket[i][3]))
#baseline = np.sum(np.array(y_true) == 1) / len(y_true) # 0.4735
baseline = 0.4735
ax.plot([0, 1], [baseline, baseline], color='navy', lw=2, linestyle='--')
ax.set_xlim([0.0, 1.0])
#ax.set_ylim([0.0, 1.05])
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_title('PR curve example')
ax.legend(loc="lower left",fontsize=8)
plt.show()
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.losses.MeanSquaredError",
"sklearn.metrics.auc",
"tensorflow.keras.layers.BatchNormalization",
"numpy.array",
"tensorflow.keras.callbacks.EarlyStopping",
"sklearn.metrics.roc_curve",
"tensorflow.keras.layers.Dense",
"numpy.arange",
... | [((9766, 9780), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9778, 9780), True, 'import matplotlib.pyplot as plt\n'), ((10178, 10188), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10186, 10188), True, 'import matplotlib.pyplot as plt\n'), ((10223, 10237), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10235, 10237), True, 'import matplotlib.pyplot as plt\n'), ((10693, 10703), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10701, 10703), True, 'import matplotlib.pyplot as plt\n'), ((323, 361), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_true', 'y_pred'], {'pos_label': '(1)'}), '(y_true, y_pred, pos_label=1)\n', (332, 361), False, 'from sklearn.metrics import auc, precision_recall_curve, roc_curve, confusion_matrix\n'), ((376, 389), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (379, 389), False, 'from sklearn.metrics import auc, precision_recall_curve, roc_curve, confusion_matrix\n'), ((399, 411), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (409, 411), True, 'import matplotlib.pyplot as plt\n'), ((427, 523), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'color': '"""darkorange"""', 'lw': 'lw', 'label': "('ROC curve (area = %0.2f)' % area_mine)"}), "(fpr, tpr, color='darkorange', lw=lw, label=\n 'ROC curve (area = %0.2f)' % area_mine)\n", (435, 523), True, 'import matplotlib.pyplot as plt\n'), ((535, 596), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (543, 596), True, 'import matplotlib.pyplot as plt\n'), ((601, 621), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (609, 621), True, 'import matplotlib.pyplot as plt\n'), ((626, 647), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (634, 647), True, 'import matplotlib.pyplot as plt\n'), ((652, 685), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (662, 685), True, 'import matplotlib.pyplot as plt\n'), ((690, 722), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (700, 722), True, 'import matplotlib.pyplot as plt\n'), ((727, 781), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver operating characteristic example"""'], {}), "('Receiver operating characteristic example')\n", (736, 781), True, 'import matplotlib.pyplot as plt\n'), ((786, 815), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (796, 815), True, 'import matplotlib.pyplot as plt\n'), ((820, 830), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (828, 830), True, 'import matplotlib.pyplot as plt\n'), ((885, 936), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_true', 'y_pred'], {'pos_label': '(1)'}), '(y_true, y_pred, pos_label=1)\n', (907, 936), False, 'from sklearn.metrics import auc, precision_recall_curve, roc_curve, confusion_matrix\n'), ((949, 971), 'sklearn.metrics.auc', 'auc', (['recall', 'precision'], {}), '(recall, precision)\n', (952, 971), False, 'from sklearn.metrics import auc, precision_recall_curve, roc_curve, confusion_matrix\n'), ((1035, 1047), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1045, 1047), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1165), 'matplotlib.pyplot.plot', 'plt.plot', (['recall', 'precision'], {'color': '"""darkorange"""', 'lw': 'lw', 'label': "('PR curve (area = %0.2f)' % area_PR)"}), "(recall, precision, color='darkorange', lw=lw, label=\n 'PR curve (area = %0.2f)' % area_PR)\n", (1071, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1176, 1251), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[baseline, baseline]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [baseline, baseline], color='navy', lw=lw, linestyle='--')\n", (1184, 1251), True, 'import matplotlib.pyplot as plt\n'), ((1256, 1276), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (1264, 1276), True, 'import matplotlib.pyplot as plt\n'), ((1281, 1302), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (1289, 1302), True, 'import matplotlib.pyplot as plt\n'), ((1307, 1327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (1317, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1332, 1355), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (1342, 1355), True, 'import matplotlib.pyplot as plt\n'), ((1360, 1389), 'matplotlib.pyplot.title', 'plt.title', (['"""PR curve example"""'], {}), "('PR curve example')\n", (1369, 1389), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1423), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (1404, 1423), True, 'import matplotlib.pyplot as plt\n'), ((1428, 1438), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1436, 1438), True, 'import matplotlib.pyplot as plt\n'), ((1472, 1502), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(10, 12, 1)'}), '(shape=(10, 12, 1))\n', (1483, 1502), True, 'import tensorflow.keras as keras\n'), ((1516, 1546), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(46, 12, 1)'}), '(shape=(46, 12, 1))\n', (1527, 1546), True, 'import tensorflow.keras as keras\n'), ((1663, 1688), 'tensorflow.keras.activations.relu', 'keras.activations.relu', (['x'], {}), '(x)\n', (1685, 1688), True, 'import tensorflow.keras as keras\n'), ((1800, 1825), 'tensorflow.keras.activations.relu', 'keras.activations.relu', (['x'], {}), '(x)\n', (1822, 1825), True, 'import tensorflow.keras as keras\n'), ((1929, 1966), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'input1', 'outputs': 'x'}), '(inputs=input1, outputs=x)\n', (1940, 1966), True, 'import tensorflow.keras as keras\n'), ((2088, 2113), 'tensorflow.keras.activations.relu', 'keras.activations.relu', (['y'], {}), '(y)\n', (2110, 2113), True, 'import tensorflow.keras as keras\n'), ((2291, 2316), 'tensorflow.keras.activations.relu', 'keras.activations.relu', (['y'], {}), '(y)\n', (2313, 2316), True, 'import tensorflow.keras as keras\n'), ((2418, 2455), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'input2', 'outputs': 'y'}), '(inputs=input2, outputs=y)\n', (2429, 2455), True, 'import tensorflow.keras as keras\n'), ((2471, 2511), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[x.output, y.output]'], {}), '([x.output, y.output])\n', (2489, 2511), False, 'from tensorflow.keras import layers\n'), ((2657, 2704), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[input1, input2]', 'outputs': 'z'}), '(inputs=[input1, input2], outputs=z)\n', (2668, 2704), True, 'import tensorflow.keras as keras\n'), ((3356, 3379), 'numpy.transpose', 'np.transpose', (['after_pca'], {}), '(after_pca)\n', (3368, 3379), True, 'import numpy as np\n'), ((6641, 6657), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (6652, 6657), True, 'import matplotlib.pyplot as plt\n'), ((6662, 6679), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss"""'], {}), "('Loss')\n", (6671, 6679), True, 'import matplotlib.pyplot as plt\n'), ((6684, 6732), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {'label': '"""train"""'}), "(history.history['loss'], label='train')\n", (6692, 6732), True, 'import matplotlib.pyplot as plt\n'), ((6737, 6794), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {'label': '"""validation"""'}), "(history.history['val_loss'], label='validation')\n", (6745, 6794), True, 'import matplotlib.pyplot as plt\n'), ((6799, 6811), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6809, 6811), True, 'import matplotlib.pyplot as plt\n'), ((6852, 6868), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (6863, 6868), True, 'import matplotlib.pyplot as plt\n'), ((6873, 6894), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy"""'], {}), "('Accuracy')\n", (6882, 6894), True, 'import matplotlib.pyplot as plt\n'), ((6899, 6951), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {'label': '"""train"""'}), "(history.history['accuracy'], label='train')\n", (6907, 6951), True, 'import matplotlib.pyplot as plt\n'), ((6956, 7017), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {'label': '"""validation"""'}), "(history.history['val_accuracy'], label='validation')\n", (6964, 7017), True, 'import matplotlib.pyplot as plt\n'), ((7022, 7034), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7032, 7034), True, 'import matplotlib.pyplot as plt\n'), ((7039, 7049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7047, 7049), True, 'import matplotlib.pyplot as plt\n'), ((7083, 7137), 'os.chdir', 'os.chdir', (['"""/Users/ligk2e/Desktop/deepimmuno/reproduce"""'], {}), "('/Users/ligk2e/Desktop/deepimmuno/reproduce')\n", (7091, 7137), False, 'import os, sys\n'), ((7154, 7188), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/after_pca.txt"""'], {}), "('./data/after_pca.txt')\n", (7164, 7188), True, 'import numpy as np\n'), ((7199, 7245), 'pandas.read_csv', 'pd.read_csv', (['"""./data/remove0123_sample100.csv"""'], {}), "('./data/remove0123_sample100.csv')\n", (7210, 7245), True, 'import pandas as pd\n'), ((7345, 7406), 'pandas.read_csv', 'pd.read_csv', (['"""./data/hla2paratopeTable_aligned.txt"""'], {'sep': '"""\t"""'}), "('./data/hla2paratopeTable_aligned.txt', sep='\\t')\n", (7356, 7406), True, 'import pandas as pd\n'), ((1556, 1602), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(16)', 'kernel_size': '(2, 12)'}), '(filters=16, kernel_size=(2, 12))\n', (1569, 1602), False, 'from tensorflow.keras import layers\n'), ((1624, 1651), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1649, 1651), False, 'from tensorflow.keras import layers\n'), ((1697, 1742), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(2, 1)'}), '(filters=32, kernel_size=(2, 1))\n', (1710, 1742), False, 'from tensorflow.keras import layers\n'), ((1761, 1788), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1786, 1788), False, 'from tensorflow.keras import layers\n'), ((1834, 1884), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {'pool_size': '(2, 1)', 'strides': '(2, 1)'}), '(pool_size=(2, 1), strides=(2, 1))\n', (1850, 1884), False, 'from tensorflow.keras import layers\n'), ((1901, 1917), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (1915, 1917), False, 'from tensorflow.keras import layers\n'), ((1976, 2023), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(16)', 'kernel_size': '(15, 12)'}), '(filters=16, kernel_size=(15, 12))\n', (1989, 2023), False, 'from tensorflow.keras import layers\n'), ((2049, 2076), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2074, 2076), False, 'from tensorflow.keras import layers\n'), ((2122, 2172), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {'pool_size': '(2, 1)', 'strides': '(2, 1)'}), '(pool_size=(2, 1), strides=(2, 1))\n', (2138, 2172), False, 'from tensorflow.keras import layers\n'), ((2190, 2235), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(9, 1)'}), '(filters=32, kernel_size=(9, 1))\n', (2203, 2235), False, 'from tensorflow.keras import layers\n'), ((2252, 2279), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2277, 2279), False, 'from tensorflow.keras import layers\n'), ((2325, 2375), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {'pool_size': '(2, 1)', 'strides': '(2, 1)'}), '(pool_size=(2, 1), strides=(2, 1))\n', (2341, 2375), False, 'from tensorflow.keras import layers\n'), ((2390, 2406), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2404, 2406), False, 'from tensorflow.keras import layers\n'), ((2519, 2555), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2531, 2555), False, 'from tensorflow.keras import layers\n'), ((2573, 2592), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (2587, 2592), False, 'from tensorflow.keras import layers\n'), ((2604, 2641), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (2616, 2641), False, 'from tensorflow.keras import layers\n'), ((3230, 3246), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (3238, 3246), True, 'import numpy as np\n'), ((8483, 8577), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(15)', 'restore_best_weights': '(False)'}), "(monitor='val_loss', patience=15,\n restore_best_weights=False)\n", (8512, 8577), True, 'import tensorflow.keras as keras\n'), ((8598, 8687), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""loss"""', 'patience': '(2)', 'restore_best_weights': '(False)'}), "(monitor='loss', patience=2,\n restore_best_weights=False)\n", (8627, 8687), True, 'import tensorflow.keras as keras\n'), ((9469, 9494), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (9478, 9494), False, 'from sklearn.metrics import auc, precision_recall_curve, roc_curve, confusion_matrix\n'), ((9509, 9522), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (9512, 9522), False, 'from sklearn.metrics import auc, precision_recall_curve, roc_curve, confusion_matrix\n'), ((9598, 9636), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (9620, 9636), False, 'from sklearn.metrics import auc, precision_recall_curve, roc_curve, confusion_matrix\n'), ((9652, 9674), 'sklearn.metrics.auc', 'auc', (['recall', 'precision'], {}), '(recall, precision)\n', (9655, 9674), False, 'from sklearn.metrics import auc, precision_recall_curve, roc_curve, confusion_matrix\n'), ((6566, 6590), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (6575, 6590), True, 'import numpy as np\n'), ((7309, 7332), 'numpy.arange', 'np.arange', (['ori.shape[0]'], {}), '(ori.shape[0])\n', (7318, 7332), True, 'import numpy as np\n'), ((993, 1009), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (1001, 1009), True, 'import numpy as np\n'), ((5760, 5799), 'numpy.array', 'np.array', (["ori['immunogenicity'].iloc[i]"], {}), "(ori['immunogenicity'].iloc[i])\n", (5768, 5799), True, 'import numpy as np\n'), ((8336, 8367), 'tensorflow.keras.losses.MeanSquaredError', 'keras.losses.MeanSquaredError', ([], {}), '()\n', (8365, 8367), True, 'import tensorflow.keras as keras\n'), ((8391, 8423), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (8412, 8423), True, 'import tensorflow.keras as keras\n'), ((9292, 9317), 'numpy.arange', 'np.arange', (['valid.shape[0]'], {}), '(valid.shape[0])\n', (9301, 9317), True, 'import numpy as np\n')] |
import numpy
from PIL import Image
import scipy.ndimage
import sys
path = sys.argv[1]
region = sys.argv[2]
x_start = int(sys.argv[3])
y_start = int(sys.argv[4])
x_end = int(sys.argv[5])
y_end = int(sys.argv[6])
out_fname = sys.argv[7]
x_len = x_end - x_start
y_len = y_end - y_start
merged_im = numpy.zeros((x_len * 4096, y_len * 4096, 3), dtype='uint8')
for i in xrange(x_len):
for j in xrange(y_len):
fname = '{}/{}_{}_{}_sat.png'.format(path, region, x_start + i, y_start + j)
merged_im[i*4096:(i+1)*4096, j*4096:(j+1)*4096, :] = scipy.ndimage.imread(fname)[:, :, 0:3].swapaxes(0, 1)
Image.fromarray(merged_im.swapaxes(0, 1)).save(out_fname)
| [
"numpy.zeros"
] | [((298, 357), 'numpy.zeros', 'numpy.zeros', (['(x_len * 4096, y_len * 4096, 3)'], {'dtype': '"""uint8"""'}), "((x_len * 4096, y_len * 4096, 3), dtype='uint8')\n", (309, 357), False, 'import numpy\n')] |
"""
Contains useful graphic generators. Currently, effect measure plots and functional form assessment plots
are implemented. Uses matplotlib to generate graphics. Future inclusions include forest plots
Contents:
Functional form assessment- func_form_plot()
Forest plot/ effect measure plot- EffectMeasurePlot()
P-value distribution plot- pvalue_plot()
Spaghetti plot- spaghetti_plot()
Receiver-Operator Curve- roc()
Dynamic risk plot- dynamic_risk_plot()
"""
import numpy as np
import pandas as pd
from scipy.stats import norm
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.genmod.families import links
from statsmodels.nonparametric.smoothers_lowess import lowess
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as mticker
class EffectMeasurePlot:
"""Used to generate effect measure plots. effectmeasure plot accepts four list type objects.
effectmeasure_plot is initialized with the associated names for each line, the point estimate,
the lower confidence limit, and the upper confidence limit.
Plots will resemble the following form:
_____________________________________________ Measure % CI
| |
1 | --------o------- | x n, 2n
| |
2 | ----o---- | w m, 2m
| |
|___________________________________________|
# # # #
The following functions (and their purposes) live within effectmeasure_plot
labels(**kwargs)
Used to change the labels in the plot, as well as the center and scale. Inputs are
keyword arguments
KEYWORDS:
-effectmeasure + changes the effect measure label
-conf_int + changes the confidence interval label
-scale + changes the scale to either log or linear
-center + changes the reference line for the center
colors(**kwargs)
Used to change the color of points and lines. Also can change the shape of points.
Valid colors and shapes for matplotlib are required. Inputs are keyword arguments
KEYWORDS:
-errorbarcolor + changes the error bar colors
-linecolor + changes the color of the reference line
-pointcolor + changes the color of the points
-pointshape + changes the shape of points
plot(t_adjuster=0.01,decimal=3,size=3)
Generates the effect measure plot of the input lists according to the pre-specified
colors, shapes, and labels of the class object
Arguments:
-t_adjuster + used to refine alignment of the table with the line graphs.
When generate plots, trial and error for this value are usually
necessary
-decimal + number of decimal places to display in the table
-size + size of the plot to generate
Example)
>>>lab = ['One','Two'] #generating lists of data to plot
>>>emm = [1.01,1.31]
>>>lcl = ['0.90',1.01]
>>>ucl = [1.11,1.53]
>>>
>>>x = zepid.graphics.effectmeasure_plot(lab,emm,lcl,ucl) #initializing effectmeasure_plot with the above lists
>>>x.labels(effectmeasure='RR') #changing the table label to 'RR'
>>>x.colors(pointcolor='r') #changing the point colors to red
>>>x.plot(t_adjuster=0.13) #generating the effect measure plot
"""
def __init__(self, label, effect_measure, lcl, ucl):
"""Initializes effectmeasure_plot with desired data to plot. All lists should be the same
length. If a blank space is desired in the plot, add an empty character object (' ') to
each list at the desired point.
Inputs:
label
-list of labels to use for y-axis
effect_measure
-list of numbers for point estimates to plot. If point estimate has trailing zeroes,
input as a character object rather than a float
lcl
-list of numbers for upper confidence limits to plot. If point estimate has trailing
zeroes, input as a character object rather than a float
ucl
-list of numbers for upper confidence limits to plot. If point estimate has
trailing zeroes, input as a character object rather than a float
"""
self.df = pd.DataFrame()
self.df['study'] = label
self.df['OR'] = effect_measure
self.df['LCL'] = lcl
self.df['UCL'] = ucl
self.df['OR2'] = self.df['OR'].astype(str).astype(float)
if (all(isinstance(item, float) for item in lcl)) & (all(isinstance(item, float) for item in effect_measure)):
self.df['LCL_dif'] = self.df['OR'] - self.df['LCL']
else:
self.df['LCL_dif'] = (pd.to_numeric(self.df['OR'])) - (pd.to_numeric(self.df['LCL']))
if (all(isinstance(item, float) for item in ucl)) & (all(isinstance(item, float) for item in effect_measure)):
self.df['UCL_dif'] = self.df['UCL'] - self.df['OR']
else:
self.df['UCL_dif'] = (pd.to_numeric(self.df['UCL'])) - (pd.to_numeric(self.df['OR']))
self.em = 'OR'
self.ci = '95% CI'
self.scale = 'linear'
self.center = 1
self.errc = 'dimgrey'
self.shape = 'd'
self.pc = 'k'
self.linec = 'gray'
def labels(self, **kwargs):
"""Function to change the labels of the outputted table. Additionally, the scale and reference
value can be changed.
Accepts the following keyword arguments:
effectmeasure
-changes the effect measure label
conf_int
-changes the confidence interval label
scale
-changes the scale to either log or linear
center
-changes the reference line for the center
"""
if 'effectmeasure' in kwargs:
self.em = kwargs['effectmeasure']
if 'ci' in kwargs:
self.ci = kwargs['conf_int']
if 'scale' in kwargs:
self.scale = kwargs['scale']
if 'center' in kwargs:
self.center = kwargs['center']
def colors(self, **kwargs):
"""Function to change colors and shapes.
Accepts the following keyword arguments:
errorbarcolor
-changes the error bar colors
linecolor
-changes the color of the reference line
pointcolor
-changes the color of the points
pointshape
-changes the shape of points
"""
if 'errorbarcolor' in kwargs:
self.errc = kwargs['errorbarcolor']
if 'pointshape' in kwargs:
self.shape = kwargs['pointshape']
if 'linecolor' in kwargs:
self.linec = kwargs['linecolor']
if 'pointcolor' in kwargs:
self.pc = kwargs['pointcolor']
def plot(self, figsize=(3, 3), t_adjuster=0.01, decimal=3, size=3, max_value=None, min_value=None):
"""Generates the matplotlib effect measure plot with the default or specified attributes.
The following variables can be used to further fine-tune the effect measure plot
t_adjuster
-used to refine alignment of the table with the line graphs. When generate plots, trial
and error for this value are usually necessary. I haven't come up with an algorithm to
determine this yet...
decimal
-number of decimal places to display in the table
size
-size of the plot to generate
max_value
-maximum value of x-axis scale. Default is None, which automatically determines max value
min_value
-minimum value of x-axis scale. Default is None, which automatically determines min value
"""
tval = []
ytick = []
for i in range(len(self.df)):
if (np.isnan(self.df['OR2'][i]) == False):
if ((isinstance(self.df['OR'][i], float)) & (isinstance(self.df['LCL'][i], float)) & (
isinstance(self.df['UCL'][i], float))):
tval.append([round(self.df['OR2'][i], decimal), (
'(' + str(round(self.df['LCL'][i], decimal)) + ', ' + str(
round(self.df['UCL'][i], decimal)) + ')')])
else:
tval.append(
[self.df['OR'][i], ('(' + str(self.df['LCL'][i]) + ', ' + str(self.df['UCL'][i]) + ')')])
ytick.append(i)
else:
tval.append([' ', ' '])
ytick.append(i)
if max_value is None:
if pd.to_numeric(self.df['UCL']).max() < 1:
maxi = round(((pd.to_numeric(self.df['UCL'])).max() + 0.05),
2) # setting x-axis maximum for UCL less than 1
if (pd.to_numeric(self.df['UCL']).max() < 9) and (pd.to_numeric(self.df['UCL']).max() >= 1):
maxi = round(((pd.to_numeric(self.df['UCL'])).max() + 1),
0) # setting x-axis maximum for UCL less than 10
if pd.to_numeric(self.df['UCL']).max() > 9:
maxi = round(((pd.to_numeric(self.df['UCL'])).max() + 10),
0) # setting x-axis maximum for UCL less than 100
else:
maxi = max_value
if min_value is None:
if pd.to_numeric(self.df['LCL']).min() > 0:
mini = round(((pd.to_numeric(self.df['LCL'])).min() - 0.1), 1) # setting x-axis minimum
if pd.to_numeric(self.df['LCL']).min() < 0:
mini = round(((pd.to_numeric(self.df['LCL'])).min() - 0.05), 2) # setting x-axis minimum
else:
mini = min_value
plt.figure(figsize=figsize) # blank figure
gspec = gridspec.GridSpec(1, 6) # sets up grid
plot = plt.subplot(gspec[0, 0:4]) # plot of data
tabl = plt.subplot(gspec[0, 4:]) # table of OR & CI
plot.set_ylim(-1, (len(self.df))) # spacing out y-axis properly
if self.scale == 'log':
try:
plot.set_xscale('log')
except:
raise ValueError('For the log scale, all values must be positive')
plot.axvline(self.center, color=self.linec, zorder=1)
plot.errorbar(self.df.OR2, self.df.index, xerr=[self.df.LCL_dif, self.df.UCL_dif], marker='None', zorder=2,
ecolor=self.errc, elinewidth=(size / size), linewidth=0)
plot.scatter(self.df.OR2, self.df.index, c=self.pc, s=(size * 25), marker=self.shape, zorder=3,
edgecolors='None')
plot.xaxis.set_ticks_position('bottom')
plot.yaxis.set_ticks_position('left')
plot.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plot.get_xaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
plot.set_yticks(ytick)
plot.set_xlim([mini, maxi])
plot.set_xticks([mini, self.center, maxi])
plot.set_xticklabels([mini, self.center, maxi])
plot.set_yticklabels(self.df.study)
plot.yaxis.set_ticks_position('none')
plot.invert_yaxis() # invert y-axis to align values properly with table
tb = tabl.table(cellText=tval, cellLoc='center', loc='right', colLabels=[self.em, self.ci],
bbox=[0, t_adjuster, 1, 1])
tabl.axis('off')
tb.auto_set_font_size(False)
tb.set_fontsize(12)
for key, cell in tb.get_celld().items():
cell.set_linewidth(0)
return plot
def functional_form_plot(df, outcome, var, f_form=None, outcome_type='binary', link_dist=None, ylims=None,
loess_value=0.4, legend=True, model_results=True, loess=True, points=False, discrete=False):
"""Creates a LOESS plot to aid in functional form assessment for continuous variables.
Plots can be created for binary and continuous outcomes. Default options are set to create
a functional form plot for a binary outcome. To convert to a continuous outcome,
outcome_type needs to be changed, in addition to the link_dist
Returns a matplotlib graph with a LOESS line (dashed red-line), regression line (sold blue-line),
and confidence interval (shaded blue)
df:
-dataframe that contains the variables of interest
outcome:
-Column name of the outcome variable of interest
var:
-Column name of the variable of interest for the functional form assessment
f_form:
-Regression equation of the functional form to assess. Default is None, which will produce
a linear functional form. Input the regression equation as the variables of interest, separated
by +. Example) 'var + var_sq'
outcome_type:
-Variable type of the outcome variable. Currently, only binary and continuous variables are
supported. Default is binary
link_dist:
-Link and distribution for the GLM regression equation. Change this to any valid link and
distributions supported by statsmodels. Default is None, which conducts logistic regression
ylims:
-List object of length 2 that holds the upper and lower limits of the y-axis. Y-axis limits should be
specified when comparing multiple graphs. These need to be user-specified since the results between
models and datasets can be so variable. Default is None, which returns the matplotlib y-axis of best fit.
loess_value:
-Fraction of observations to use to fit the LOESS curve. This will need to be changed iteratively
to determine which percent works best for the data. Default is 0.5
legend:
-Turn the legend on or off. Default is True, displaying the legend in the graph
model_results:
-Whether to produce the model results. Default is True, which provides model results
loess:
-Whether to plot the LOESS curve along with the functional form. Default is True
points:
-Whether to plot the data points, where size is relative to the number of observations. Default is False
discrete:
-If your data is truly continuous, leave setting to bin the dat. Will automatically bin observations into categories
for generation of the LOESS curve. If you data is discrete, you can set this to True to use your actual values.
If you get a perfect SeparationError from statsmodels, it means you might have to reshift your categories.
Example)
>>>data['var1_sq'] = data['var1']**2
>>>zepid.graphics.func_form_plot(df=data,outcome='D',var='var1',f_form='var1 + var1_sq')
"""
# Copying out the dataframe to a new object we will manipulate a bit
rf = df.copy()
rf = rf.dropna(subset=[var, outcome]).sort_values(by=[var, outcome]).reset_index()
print('Warning: missing observations of model variables are dropped')
print(int(df.shape[0] - rf.shape[0]), ' observations were dropped from the functional form assessment')
# Functional form for the model
if f_form is None:
f_form = var
else:
pass
# Generating Models
if outcome_type == 'binary':
if link_dist is None:
link_dist = sm.families.family.Binomial(sm.families.links.logit)
else:
pass
elif outcome_type == 'continuous':
if link_dist is None:
link_dist = sm.families.family.Gaussian(sm.families.links.identity)
else:
pass
else:
raise ValueError('Only binary or continuous outcomes are currently supported')
# Generating LOESS or points if requested
ax = plt.gca()
if loess or points:
if outcome_type == 'binary':
if discrete is False:
# Binning continuous variable into categories to get "General" functional form
categories = int((np.max(rf[var]) - np.min(rf[var])) / 5)
print('''A total of ''' + str(categories) + ''' categories were created. If you would like to influence
the number of categories the spline is fit to, do the following\n\tIncrease: multiply by
constant >1\n\tDecrease: multiply by contast <1 and >0''')
rf['vbin'] = pd.qcut(rf[var], q=categories, duplicates='drop').cat.codes
djm = smf.glm(outcome + '~ C(vbin)', rf, family=link_dist).fit()
else:
djm = smf.glm(outcome + '~ C(' + var + ')', rf, family=link_dist).fit()
djf = djm.get_prediction(rf).summary_frame()
dj = pd.concat([rf, djf], axis=1)
dj.sort_values(var, inplace=True)
if points:
pf = dj.groupby(by=[var, 'mean']).count().reset_index()
ax.scatter(pf[var], pf['mean'], s=[100 * (n / np.max(pf[var])) for n in pf[var]],
color='gray', label='Data point')
if loess:
yl = lowess(list(dj['mean']), list(dj[var]), frac=loess_value)
lowess_x = list(zip(*yl))[0]
lowess_y = list(zip(*yl))[1]
ax.plot(lowess_x, lowess_y, '--', color='red', linewidth=1, label='LOESS')
elif outcome_type == 'continuous':
if points:
pf = rf.groupby(by=[var, outcome]).count().reset_index()
ax.scatter(pf[var], pf[outcome], color='gray', label='Data point')
if loess:
yl = lowess(list(rf[outcome]), list(rf[var]), frac=loess_value)
lowess_x = list(zip(*yl))[0]
lowess_y = list(zip(*yl))[1]
ax.plot(lowess_x, lowess_y, '--', color='red', linewidth=1, label='LOESS')
else:
raise ValueError('Functional form assessment only supports binary or continuous outcomes currently')
# Functional form model fitting
ffm = smf.glm(outcome + ' ~ ' + f_form, rf, family=link_dist).fit()
if model_results is True:
print(ffm.summary())
print('AIC: ', ffm.aic)
print('BIC: ', ffm.bic)
fff = ffm.get_prediction(rf).summary_frame()
ff = pd.concat([rf, fff], axis=1)
ff.sort_values(var, inplace=True)
# Generating plot for functional form
ax.fill_between(ff[var], ff['mean_ci_upper'], ff['mean_ci_lower'], alpha=0.1, color='blue', label='95% CI')
ax.plot(ff[var], ff['mean'], '-', color='blue', label='Regression')
ax.set_xlabel(var)
ax.set_ylabel('Outcome')
if legend is True:
ax.legend()
ax.set_ylim(ylims)
return ax
def pvalue_plot(point, sd, color='b', fill=True, null=0, alpha=None):
"""Creates a plot of the p-value distribution based on a point estimate and standard deviation.
I find this plot to be useful to explain p-values and how much evidence weight you have in a
specific value. I think it is useful to explain what exactly a p-value tells you. Note that this
plot only works for measures on a linear scale (i.e. it will plot exp(log(RR)) incorrectly). It also
helps to understand what exactly confidence intervals are telling you. These plots are based on
Rothman Epidemiology 2nd Edition pg 152-153 and explained more fully within.
Returns matplotlib axes object
point:
-point estimate. Must be on a linear scale (RD / log(RR))
sd:
-standard error of the estimate. Must for linear scale (SE(RD) / SE(log(RR)))
color:
-change color of p-value plot
fill:
-Whether to fill the curve under the p-value distribution. Setting to False prevents fill
null:
-The main value to compare to. The default is zero
Example)
>>>zepid.graphics.pvalue_plot(point=-0.1,sd=0.061,alpha=0.025)
"""
if point <= null:
lower = (point - 3 * sd)
if (point + 3 * sd) < 0:
upper = point + 3 * sd
else:
upper = null + 3 * sd
if point > null:
upper = (point + 3 * sd)
if (point - 3 * sd) > 0:
lower = null - 3 * sd
else:
lower = point - 3 * sd
ax = plt.gca()
x1 = np.linspace(lower, point, 100)
x2 = np.linspace(point, upper, 100)
ax.plot(x2, 2 * (1 - norm.cdf(x2, loc=point, scale=sd)), c=color)
ax.plot(x1, 2 * norm.cdf(x1, loc=point, scale=sd), c=color)
if fill == True:
ax.fill_between(x2, 2 * (1 - norm.cdf(x2, loc=point, scale=sd)), color=color, alpha=0.2)
ax.fill_between(x1, 2 * norm.cdf(x1, loc=point, scale=sd), color=color, alpha=0.2)
ax.vlines(null, 0, 1, colors='k')
ax.set_xlim([lower, upper])
ax.set_ylim([0, 1])
ax.set_ylabel('P-value')
if alpha is not None:
ax.hlines(alpha, lower, upper)
return ax
def spaghetti_plot(df, idvar, variable, time):
"""Create a spaghetti plot by an ID variable. A spaghetti plot can be useful for visualizing
trends or looking at longitudinal data patterns for individuals all at once.
Returns matplotlib axes
df:
-pandas dataframe containing variables of interest
idvar:
-ID variable for observations. This should indicate the group or individual followed over
the time variable
variable:
-Variable of interest to see how it varies over time
time:
-Time or other variable in which the variable variation occurs
Example)
>>>zepid.graphics.spaghetti_plot(df,idvar='pid',variable='v',time='t')
"""
ax = plt.gca()
for i in df[idvar].unique():
s = df.loc[df[idvar] == i].copy()
s.sort_values(time, ascending=False)
ax.plot(s[time], s[variable])
ax.set_xlabel(time)
ax.set_ylabel(variable)
return ax
def roc(df, true, threshold, youden_index=True):
"""Generate a Receiver Operator Curve from true values and predicted probabilities.
Returns matplotlib axes
df:
-pandas dataframe containing variables of interest
true:
-the true designation of the outcome (1, 0)
threshold:
-predicted probabilities for the outcome
youden_index:
-Whether to calculate Youden's index. Youden's index maximizes both sensitivity and specificity.
The formula finds the maximum of (sensitivity + specificity - 1)
"""
sens = []
fpr = []
thresh = []
tf = df[[threshold, true]].copy()
if tf.isnull().values.sum() != 0:
raise ValueError('ROC curve cannot handle missing data for probability or true values')
# Getting all possible cutpoints
values = (list(np.unique(tf[threshold])))
values = [float(np.min(tf[threshold]) - 0.001)] + values + [float(np.max(tf[threshold]) + 0.001)]
# Going through all the cutpoints and calculating Sensitivity and 1-Specificity
for v in list(reversed(values)):
thresh.append(v)
prediction = np.where(tf[threshold] >= v, 1, 0)
se = prediction[tf[true] == 1].mean()
sens.append(se)
sp = prediction[tf[true] == 0].mean()
fpr.append(sp)
# If requested, calculate Youden's Index
if youden_index is True:
spec = [1 - i for i in fpr]
youdens = []
for i, j in zip(sens, spec):
youdens.append(i + j - 1)
ind = np.argmax(youdens)
print('----------------------------------------------------------------------')
print("Youden's Index: ", thresh[ind])
print("Predictive values at Youden's Index")
print("\tSensitivity: ", sens[ind])
print("\tSpecificity: ", spec[ind])
print('----------------------------------------------------------------------')
# Creating ROC plot
ax = plt.gca()
ax.plot(fpr, sens, color='blue')
ax.plot([0, 1], [0, 1], color='gray', linestyle='--')
if youden_index is True:
ax.text(0.65, 0.35, "Youden's Index:\n " + str(round(thresh[ind], 5)))
ax.set_xlim([-0.01, 1.01])
ax.set_ylim([-0.01, 1.01])
ax.set_ylabel('Sensitivity')
ax.set_xlabel('1 -Specificity')
return ax
def dynamic_risk_plot(risk_exposed, risk_unexposed, measure='RD', loess=True, loess_value=0.25, point_color='darkblue',
line_color='b', scale='linear'):
"""Creates a plot of risk measures over time. See Cole et al. "Estimation of standardized risk difference and ratio
in a competing risks framework: application to injection drug use and progression to AIDS after initiation of
antiretroviral therapy." Am J Epidemiol. 2015 for an example of this plot
risk_exposed:
-pandas Series with the probability of the outcome among the exposed group. Index by 'timeline'
where 'timeline' is the time. If you directly output the 1 - survival_function_ from
lifelines.KaplanMeierFitter(), this should create a valid input
risk_unexposed:
-pandas Series with the probability of the outcome among the exposed group. Index by 'timeline'
where 'timeline' is the time
measure:
-whether to generate the risk difference (RD) or risk ratio (RR). Default is 'RD'
loess:
-whether to generate LOESS curve fit to the calculated points. Default is True
loess_value:
-fraction of values to fit LOESS curve to. Default is 0.25
point_color:
-color of the points
line_color:
-color of the LOESS line generated and plotted
scale:
-change the y-axis scale. Options are 'linear' (default), 'log', 'log-transform'. 'log' and 'log-transform'
is only a valid option for Risk Ratio plots
"""
re = risk_exposed.drop_duplicates(keep='first').iloc[:, 0].rename('exposed').reset_index()
ru = risk_unexposed.drop_duplicates(keep='first').iloc[:, 0].rename('unexposed').reset_index()
re.timeline = np.round(re.timeline * 100000).astype(int) # This avoids a merge issue on floats
ru.timeline = np.round(ru.timeline * 100000).astype(int)
r = pd.merge(re, ru, how='outer', left_on='timeline', right_on='timeline').sort_values(by='timeline')
r.timeline /= 100000
r.ffill(inplace=True)
if measure == 'RD':
r['m'] = r['exposed'] - r['unexposed']
elif measure == 'RR':
r['m'] = r['exposed'] / r['unexposed']
if scale == 'log-transform':
r['m'] = np.log(r['m'])
else:
raise ValueError('Only "RD" and "RR" are currently supported')
# Generating the plot
ax = plt.gca()
ax.plot(r['timeline'], r['m'], 'o', c=point_color)
if loess is True:
l = lowess(list(r['m']), list(r['timeline']), frac=loess_value)
lowess_x = list(zip(*l))[0]
lowess_y = list(zip(*l))[1]
ax.plot(lowess_x, lowess_y, '-', c=line_color, linewidth=4)
if measure == 'RD':
ax.hlines(0, 0, np.max(r['timeline'] + 0.5), linewidth=1.5)
ax.set_ylabel('Risk Difference')
if measure == 'RR':
if scale == 'log-transform':
ax.hlines(0, 0, np.max(r['timeline'] + 0.5), linewidth=1.5)
ax.set_ylabel('ln(Risk Ratio)')
elif scale == 'log':
ax.set_ylabel('Risk Ratio')
ax.set_yscale('log')
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
ax.hlines(1, 0, np.max(r['timeline'] + 0.5), linewidth=1.5)
else:
ax.hlines(1, 0, np.max(r['timeline'] + 0.5), linewidth=1.5)
ax.set_xlabel('Time')
ax.set_xlim([0, np.max(r['timeline']) + 0.5])
return ax
| [
"matplotlib.ticker.NullFormatter",
"numpy.log",
"statsmodels.api.families.family.Binomial",
"matplotlib.ticker.ScalarFormatter",
"statsmodels.formula.api.glm",
"scipy.stats.norm.cdf",
"statsmodels.api.families.family.Gaussian",
"pandas.qcut",
"numpy.where",
"numpy.max",
"numpy.linspace",
"matp... | [((16314, 16323), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (16321, 16323), True, 'import matplotlib.pyplot as plt\n'), ((18833, 18861), 'pandas.concat', 'pd.concat', (['[rf, fff]'], {'axis': '(1)'}), '([rf, fff], axis=1)\n', (18842, 18861), True, 'import pandas as pd\n'), ((20839, 20848), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20846, 20848), True, 'import matplotlib.pyplot as plt\n'), ((20859, 20889), 'numpy.linspace', 'np.linspace', (['lower', 'point', '(100)'], {}), '(lower, point, 100)\n', (20870, 20889), True, 'import numpy as np\n'), ((20900, 20930), 'numpy.linspace', 'np.linspace', (['point', 'upper', '(100)'], {}), '(point, upper, 100)\n', (20911, 20930), True, 'import numpy as np\n'), ((22232, 22241), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22239, 22241), True, 'import matplotlib.pyplot as plt\n'), ((24472, 24481), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24479, 24481), True, 'import matplotlib.pyplot as plt\n'), ((27267, 27276), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (27274, 27276), True, 'import matplotlib.pyplot as plt\n'), ((4739, 4753), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4751, 4753), True, 'import pandas as pd\n'), ((10301, 10328), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (10311, 10328), True, 'import matplotlib.pyplot as plt\n'), ((10362, 10385), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(6)'], {}), '(1, 6)\n', (10379, 10385), True, 'import matplotlib.gridspec as gridspec\n'), ((10418, 10444), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gspec[0, 0:4]'], {}), '(gspec[0, 0:4])\n', (10429, 10444), True, 'import matplotlib.pyplot as plt\n'), ((10477, 10502), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gspec[0, 4:]'], {}), '(gspec[0, 4:])\n', (10488, 10502), True, 'import matplotlib.pyplot as plt\n'), ((23338, 23362), 'numpy.unique', 'np.unique', (['tf[threshold]'], {}), '(tf[threshold])\n', (23347, 23362), True, 'import numpy as np\n'), ((23639, 23673), 'numpy.where', 'np.where', (['(tf[threshold] >= v)', '(1)', '(0)'], {}), '(tf[threshold] >= v, 1, 0)\n', (23647, 23673), True, 'import numpy as np\n'), ((24046, 24064), 'numpy.argmax', 'np.argmax', (['youdens'], {}), '(youdens)\n', (24055, 24064), True, 'import numpy as np\n'), ((11341, 11376), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (11374, 11376), False, 'import matplotlib\n'), ((11424, 11457), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (11455, 11457), False, 'import matplotlib\n'), ((15885, 15937), 'statsmodels.api.families.family.Binomial', 'sm.families.family.Binomial', (['sm.families.links.logit'], {}), '(sm.families.links.logit)\n', (15912, 15937), True, 'import statsmodels.api as sm\n'), ((17270, 17298), 'pandas.concat', 'pd.concat', (['[rf, djf]'], {'axis': '(1)'}), '([rf, djf], axis=1)\n', (17279, 17298), True, 'import pandas as pd\n'), ((18584, 18639), 'statsmodels.formula.api.glm', 'smf.glm', (["(outcome + ' ~ ' + f_form)", 'rf'], {'family': 'link_dist'}), "(outcome + ' ~ ' + f_form, rf, family=link_dist)\n", (18591, 18639), True, 'import statsmodels.formula.api as smf\n'), ((21023, 21056), 'scipy.stats.norm.cdf', 'norm.cdf', (['x1'], {'loc': 'point', 'scale': 'sd'}), '(x1, loc=point, scale=sd)\n', (21031, 21056), False, 'from scipy.stats import norm\n'), ((26619, 26649), 'numpy.round', 'np.round', (['(re.timeline * 100000)'], {}), '(re.timeline * 100000)\n', (26627, 26649), True, 'import numpy as np\n'), ((26719, 26749), 'numpy.round', 'np.round', (['(ru.timeline * 100000)'], {}), '(ru.timeline * 100000)\n', (26727, 26749), True, 'import numpy as np\n'), ((26771, 26841), 'pandas.merge', 'pd.merge', (['re', 'ru'], {'how': '"""outer"""', 'left_on': '"""timeline"""', 'right_on': '"""timeline"""'}), "(re, ru, how='outer', left_on='timeline', right_on='timeline')\n", (26779, 26841), True, 'import pandas as pd\n'), ((27622, 27649), 'numpy.max', 'np.max', (["(r['timeline'] + 0.5)"], {}), "(r['timeline'] + 0.5)\n", (27628, 27649), True, 'import numpy as np\n'), ((5189, 5217), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['OR']"], {}), "(self.df['OR'])\n", (5202, 5217), True, 'import pandas as pd\n'), ((5222, 5251), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['LCL']"], {}), "(self.df['LCL'])\n", (5235, 5251), True, 'import pandas as pd\n'), ((5488, 5517), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['UCL']"], {}), "(self.df['UCL'])\n", (5501, 5517), True, 'import pandas as pd\n'), ((5522, 5550), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['OR']"], {}), "(self.df['OR'])\n", (5535, 5550), True, 'import pandas as pd\n'), ((8390, 8417), 'numpy.isnan', 'np.isnan', (["self.df['OR2'][i]"], {}), "(self.df['OR2'][i])\n", (8398, 8417), True, 'import numpy as np\n'), ((16067, 16122), 'statsmodels.api.families.family.Gaussian', 'sm.families.family.Gaussian', (['sm.families.links.identity'], {}), '(sm.families.links.identity)\n', (16094, 16122), True, 'import statsmodels.api as sm\n'), ((20957, 20990), 'scipy.stats.norm.cdf', 'norm.cdf', (['x2'], {'loc': 'point', 'scale': 'sd'}), '(x2, loc=point, scale=sd)\n', (20965, 20990), False, 'from scipy.stats import norm\n'), ((21220, 21253), 'scipy.stats.norm.cdf', 'norm.cdf', (['x1'], {'loc': 'point', 'scale': 'sd'}), '(x1, loc=point, scale=sd)\n', (21228, 21253), False, 'from scipy.stats import norm\n'), ((27130, 27144), 'numpy.log', 'np.log', (["r['m']"], {}), "(r['m'])\n", (27136, 27144), True, 'import numpy as np\n'), ((27800, 27827), 'numpy.max', 'np.max', (["(r['timeline'] + 0.5)"], {}), "(r['timeline'] + 0.5)\n", (27806, 27827), True, 'import numpy as np\n'), ((28403, 28424), 'numpy.max', 'np.max', (["r['timeline']"], {}), "(r['timeline'])\n", (28409, 28424), True, 'import numpy as np\n'), ((21127, 21160), 'scipy.stats.norm.cdf', 'norm.cdf', (['x2'], {'loc': 'point', 'scale': 'sd'}), '(x2, loc=point, scale=sd)\n', (21135, 21160), False, 'from scipy.stats import norm\n'), ((23436, 23457), 'numpy.max', 'np.max', (['tf[threshold]'], {}), '(tf[threshold])\n', (23442, 23457), True, 'import numpy as np\n'), ((28036, 28061), 'matplotlib.ticker.ScalarFormatter', 'mticker.ScalarFormatter', ([], {}), '()\n', (28059, 28061), True, 'import matplotlib.ticker as mticker\n'), ((28223, 28250), 'numpy.max', 'np.max', (["(r['timeline'] + 0.5)"], {}), "(r['timeline'] + 0.5)\n", (28229, 28250), True, 'import numpy as np\n'), ((28311, 28338), 'numpy.max', 'np.max', (["(r['timeline'] + 0.5)"], {}), "(r['timeline'] + 0.5)\n", (28317, 28338), True, 'import numpy as np\n'), ((9171, 9200), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['UCL']"], {}), "(self.df['UCL'])\n", (9184, 9200), True, 'import pandas as pd\n'), ((9646, 9675), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['UCL']"], {}), "(self.df['UCL'])\n", (9659, 9675), True, 'import pandas as pd\n'), ((9936, 9965), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['LCL']"], {}), "(self.df['LCL'])\n", (9949, 9965), True, 'import pandas as pd\n'), ((10099, 10128), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['LCL']"], {}), "(self.df['LCL'])\n", (10112, 10128), True, 'import pandas as pd\n'), ((16944, 16993), 'pandas.qcut', 'pd.qcut', (['rf[var]'], {'q': 'categories', 'duplicates': '"""drop"""'}), "(rf[var], q=categories, duplicates='drop')\n", (16951, 16993), True, 'import pandas as pd\n'), ((17027, 17079), 'statsmodels.formula.api.glm', 'smf.glm', (["(outcome + '~ C(vbin)')", 'rf'], {'family': 'link_dist'}), "(outcome + '~ C(vbin)', rf, family=link_dist)\n", (17034, 17079), True, 'import statsmodels.formula.api as smf\n'), ((17128, 17187), 'statsmodels.formula.api.glm', 'smf.glm', (["(outcome + '~ C(' + var + ')')", 'rf'], {'family': 'link_dist'}), "(outcome + '~ C(' + var + ')', rf, family=link_dist)\n", (17135, 17187), True, 'import statsmodels.formula.api as smf\n'), ((23386, 23407), 'numpy.min', 'np.min', (['tf[threshold]'], {}), '(tf[threshold])\n', (23392, 23407), True, 'import numpy as np\n'), ((9386, 9415), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['UCL']"], {}), "(self.df['UCL'])\n", (9399, 9415), True, 'import pandas as pd\n'), ((9432, 9461), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['UCL']"], {}), "(self.df['UCL'])\n", (9445, 9461), True, 'import pandas as pd\n'), ((16553, 16568), 'numpy.max', 'np.max', (['rf[var]'], {}), '(rf[var])\n', (16559, 16568), True, 'import numpy as np\n'), ((16571, 16586), 'numpy.min', 'np.min', (['rf[var]'], {}), '(rf[var])\n', (16577, 16586), True, 'import numpy as np\n'), ((9244, 9273), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['UCL']"], {}), "(self.df['UCL'])\n", (9257, 9273), True, 'import pandas as pd\n'), ((9507, 9536), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['UCL']"], {}), "(self.df['UCL'])\n", (9520, 9536), True, 'import pandas as pd\n'), ((9719, 9748), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['UCL']"], {}), "(self.df['UCL'])\n", (9732, 9748), True, 'import pandas as pd\n'), ((10009, 10038), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['LCL']"], {}), "(self.df['LCL'])\n", (10022, 10038), True, 'import pandas as pd\n'), ((10172, 10201), 'pandas.to_numeric', 'pd.to_numeric', (["self.df['LCL']"], {}), "(self.df['LCL'])\n", (10185, 10201), True, 'import pandas as pd\n'), ((17506, 17521), 'numpy.max', 'np.max', (['pf[var]'], {}), '(pf[var])\n', (17512, 17521), True, 'import numpy as np\n')] |
import math
import cloudpickle
import torch
import numpy as np
from collections import OrderedDict
def save_checkpoint(state, filename='checkpoint.pkl'):
data = cloudpickle.dumps(state)
with open(filename, 'wb') as fi:
fi.write(data)
def load_checkpoint(filename='checkpoint.pkl'):
with open(filename, 'rb') as fi:
return cloudpickle.load(fi)
def sample_action(policy, state, mode='Categorical'):
# Get the current policy pi_s
state = state.float().unsqueeze(0)
pi_s = policy(state)
# Use pi_s to make an action, using any dist in torch.
# The dist should match the policy, of course.
Dist = getattr(torch.distributions, mode)
m = Dist(*pi_s)
action = m.sample()
log_prob = m.log_prob(action).unsqueeze(0)
return policy, action.item(), log_prob
def estimate_regret(states, state, critic):
"""Estimate regret, given a critic."""
values = [critic(s) for s in states]
best = np.max(values)
actual = values[state]
return best - actual
class Oracle(object):
def __init__(self, env, step_value=1):
"""An oracle that counts ALL the steps taken in an Gym env."""
self._env = env
self.total_steps = 0
self.step_value = step_value
def __getattr__(self, attr):
# NOTE: do not use hasattr, it goes into
# infinite recurrsion
# See if this object has attr
if attr in self.__dict__:
# this object has it
return getattr(self, attr)
# proxy to the env
return getattr(self._env, attr)
def step(self, action):
self.total_steps += self.step_value
ret = self._env.step(action)
return ret
| [
"cloudpickle.dumps",
"cloudpickle.load",
"numpy.max"
] | [((168, 192), 'cloudpickle.dumps', 'cloudpickle.dumps', (['state'], {}), '(state)\n', (185, 192), False, 'import cloudpickle\n'), ((964, 978), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (970, 978), True, 'import numpy as np\n'), ((355, 375), 'cloudpickle.load', 'cloudpickle.load', (['fi'], {}), '(fi)\n', (371, 375), False, 'import cloudpickle\n')] |
# cython: language_level=3
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2016-2020, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""Example 10.
This example shows simple usage of the DPNP
in combination with dpCtl.
"""
import time
try:
import dpnp
except ImportError:
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
import dpnp
import numpy
def run(executor, size, test_type, repetition):
x = executor.arange(size * size, dtype=test_type).reshape((size, size))
times = []
for _ in range(repetition):
start_time = time.perf_counter()
result = executor.sum(x)
end_time = time.perf_counter()
times.append(end_time - start_time)
return numpy.median(times), result
def example():
test_repetition = 5
for test_type in [numpy.float64, numpy.float32, numpy.int64, numpy.int32]:
type_name = numpy.dtype(test_type).name
print(f"...Test data type is {type_name}, each test repetitions {test_repetition}")
for size in [64, 128, 256, 512, 1024, 2048, 4096]:
time_numpy, result_numpy = run(numpy, size, test_type, test_repetition)
time_dpnp, result_dpnp = run(dpnp, size, test_type, test_repetition)
if result_dpnp == result_numpy:
verification = True
else:
verification = f"({result_dpnp} != {result_numpy})"
msg = f"type:{type_name}:N:{size:4}:NumPy:{time_numpy:.3e}:SYCL:{time_dpnp:.3e}"
msg += f":ratio:{time_numpy/time_dpnp:6.2f}:verification:{verification}"
print(msg)
if __name__ == "__main__":
try:
import dpctl
with dpctl.device_context("opencl:gpu") as gpu_queue:
gpu_queue.get_sycl_device().dump_device_info()
example()
except ImportError:
example()
| [
"numpy.median",
"time.perf_counter",
"dpctl.device_context",
"os.path.abspath",
"numpy.dtype"
] | [((2004, 2023), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2021, 2023), False, 'import time\n'), ((2076, 2095), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2093, 2095), False, 'import time\n'), ((2152, 2171), 'numpy.median', 'numpy.median', (['times'], {}), '(times)\n', (2164, 2171), False, 'import numpy\n'), ((1757, 1777), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (1772, 1777), False, 'import os\n'), ((2320, 2342), 'numpy.dtype', 'numpy.dtype', (['test_type'], {}), '(test_type)\n', (2331, 2342), False, 'import numpy\n'), ((3107, 3141), 'dpctl.device_context', 'dpctl.device_context', (['"""opencl:gpu"""'], {}), "('opencl:gpu')\n", (3127, 3141), False, 'import dpctl\n')] |
import numpy as np
import pytest
from vispy.color import Colormap as VispyColormap
from napari.utils.colormaps import Colormap
from napari.utils.colormaps.colormap_utils import (
_MATPLOTLIB_COLORMAP_NAMES,
_VISPY_COLORMAPS_ORIGINAL,
_VISPY_COLORMAPS_TRANSLATIONS,
AVAILABLE_COLORMAPS,
_increment_unnamed_colormap,
ensure_colormap,
vispy_or_mpl_colormap,
)
from napari.utils.colormaps.vendored import cm
@pytest.mark.parametrize("name", list(AVAILABLE_COLORMAPS.keys()))
def test_colormap(name):
np.random.seed(0)
cmap = AVAILABLE_COLORMAPS[name]
# Test can map random 0-1 values
values = np.random.rand(50)
colors = cmap.map(values)
assert colors.shape == (len(values), 4)
# Create vispy colormap and check current colormaps match vispy
# colormap
vispy_cmap = VispyColormap(*cmap)
vispy_colors = vispy_cmap.map(values)
np.testing.assert_almost_equal(colors, vispy_colors, decimal=6)
def test_increment_unnamed_colormap():
# test that unnamed colormaps are incremented
names = [
'[unnamed colormap 0]',
'existing_colormap',
'perceptually_uniform',
'[unnamed colormap 1]',
]
assert _increment_unnamed_colormap(names)[0] == '[unnamed colormap 2]'
# test that named colormaps are not incremented
named_colormap = 'perfect_colormap'
assert (
_increment_unnamed_colormap(names, named_colormap)[0] == named_colormap
)
def test_can_accept_vispy_colormaps():
"""Test that we can accept vispy colormaps."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
vispy_cmap = VispyColormap(colors)
cmap = ensure_colormap(vispy_cmap)
assert isinstance(cmap, Colormap)
np.testing.assert_almost_equal(cmap.colors, colors)
def test_can_accept_napari_colormaps():
"""Test that we can accept napari colormaps."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
napari_cmap = Colormap(colors)
cmap = ensure_colormap(napari_cmap)
assert isinstance(cmap, Colormap)
np.testing.assert_almost_equal(cmap.colors, colors)
def test_can_accept_vispy_colormap_name_tuple():
"""Test that we can accept vispy colormap named type."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
vispy_cmap = VispyColormap(colors)
cmap = ensure_colormap(('special_name', vispy_cmap))
assert isinstance(cmap, Colormap)
np.testing.assert_almost_equal(cmap.colors, colors)
assert cmap.name == 'special_name'
def test_can_accept_napari_colormap_name_tuple():
"""Test that we can accept napari colormap named type."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
napari_cmap = Colormap(colors)
cmap = ensure_colormap(('special_name', napari_cmap))
assert isinstance(cmap, Colormap)
np.testing.assert_almost_equal(cmap.colors, colors)
assert cmap.name == 'special_name'
def test_can_accept_named_vispy_colormaps():
"""Test that we can accept named vispy colormap."""
cmap = ensure_colormap('red')
assert isinstance(cmap, Colormap)
assert cmap.name == 'red'
def test_can_accept_named_mpl_colormap():
"""Test we can accept named mpl colormap"""
cmap_name = 'RdYlGn'
cmap = ensure_colormap(cmap_name)
assert isinstance(cmap, Colormap)
assert cmap.name == cmap_name
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_can_accept_vispy_colormaps_in_dict():
"""Test that we can accept vispy colormaps in a dictionary."""
colors_a = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
colors_b = np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 0, 1, 1]])
vispy_cmap_a = VispyColormap(colors_a)
vispy_cmap_b = VispyColormap(colors_b)
cmap = ensure_colormap({'a': vispy_cmap_a, 'b': vispy_cmap_b})
assert isinstance(cmap, Colormap)
np.testing.assert_almost_equal(cmap.colors, colors_a)
assert cmap.name == 'a'
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_can_accept_napari_colormaps_in_dict():
"""Test that we can accept vispy colormaps in a dictionary"""
colors_a = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
colors_b = np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 0, 1, 1]])
napari_cmap_a = Colormap(colors_a)
napari_cmap_b = Colormap(colors_b)
cmap = ensure_colormap({'a': napari_cmap_a, 'b': napari_cmap_b})
assert isinstance(cmap, Colormap)
np.testing.assert_almost_equal(cmap.colors, colors_a)
assert cmap.name == 'a'
def test_can_accept_colormap_dict():
"""Test that we can accept vispy colormaps in a dictionary"""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
cmap = ensure_colormap({'colors': colors, 'name': 'special_name'})
assert isinstance(cmap, Colormap)
np.testing.assert_almost_equal(cmap.colors, colors)
assert cmap.name == 'special_name'
def test_can_degrade_gracefully():
"""Test that we can degrade gracefully if given something not recognized."""
with pytest.warns(UserWarning):
cmap = ensure_colormap(object)
assert isinstance(cmap, Colormap)
assert cmap.name == 'gray'
def test_vispy_colormap_amount():
"""
Test that the amount of localized vispy colormap names matches available colormaps.
"""
for name in _VISPY_COLORMAPS_ORIGINAL:
assert name in _VISPY_COLORMAPS_TRANSLATIONS
def test_mpl_colormap_exists():
"""Test that all localized mpl colormap names exist."""
for name in _MATPLOTLIB_COLORMAP_NAMES:
assert getattr(cm, name, None) is not None
def test_colormap_error_suggestion():
"""
Test that vispy/mpl errors, when using `display_name`, suggest `name`.
"""
name = '"twilight_shifted"'
display_name = 'twilight shifted'
with pytest.raises(KeyError) as excinfo:
vispy_or_mpl_colormap(display_name)
assert name in str(excinfo.value)
wrong_name = 'foobar'
with pytest.raises(KeyError) as excinfo:
vispy_or_mpl_colormap(wrong_name)
assert name in str(excinfo.value)
| [
"napari.utils.colormaps.colormap_utils._increment_unnamed_colormap",
"pytest.mark.filterwarnings",
"numpy.random.rand",
"napari.utils.colormaps.Colormap",
"numpy.testing.assert_almost_equal",
"numpy.array",
"napari.utils.colormaps.colormap_utils.AVAILABLE_COLORMAPS.keys",
"pytest.raises",
"numpy.ran... | [((3373, 3422), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::UserWarning"""'], {}), "('ignore::UserWarning')\n", (3399, 3422), False, 'import pytest\n'), ((3953, 4002), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::UserWarning"""'], {}), "('ignore::UserWarning')\n", (3979, 4002), False, 'import pytest\n'), ((531, 548), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (545, 548), True, 'import numpy as np\n'), ((638, 656), 'numpy.random.rand', 'np.random.rand', (['(50)'], {}), '(50)\n', (652, 656), True, 'import numpy as np\n'), ((832, 852), 'vispy.color.Colormap', 'VispyColormap', (['*cmap'], {}), '(*cmap)\n', (845, 852), True, 'from vispy.color import Colormap as VispyColormap\n'), ((899, 962), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['colors', 'vispy_colors'], {'decimal': '(6)'}), '(colors, vispy_colors, decimal=6)\n', (929, 962), True, 'import numpy as np\n'), ((1571, 1623), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]'], {}), '([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])\n', (1579, 1623), True, 'import numpy as np\n'), ((1641, 1662), 'vispy.color.Colormap', 'VispyColormap', (['colors'], {}), '(colors)\n', (1654, 1662), True, 'from vispy.color import Colormap as VispyColormap\n'), ((1674, 1701), 'napari.utils.colormaps.colormap_utils.ensure_colormap', 'ensure_colormap', (['vispy_cmap'], {}), '(vispy_cmap)\n', (1689, 1701), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((1744, 1795), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['cmap.colors', 'colors'], {}), '(cmap.colors, colors)\n', (1774, 1795), True, 'import numpy as np\n'), ((1903, 1955), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]'], {}), '([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])\n', (1911, 1955), True, 'import numpy as np\n'), ((1974, 1990), 'napari.utils.colormaps.Colormap', 'Colormap', (['colors'], {}), '(colors)\n', (1982, 1990), False, 'from napari.utils.colormaps import Colormap\n'), ((2002, 2030), 'napari.utils.colormaps.colormap_utils.ensure_colormap', 'ensure_colormap', (['napari_cmap'], {}), '(napari_cmap)\n', (2017, 2030), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((2073, 2124), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['cmap.colors', 'colors'], {}), '(cmap.colors, colors)\n', (2103, 2124), True, 'import numpy as np\n'), ((2250, 2302), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]'], {}), '([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])\n', (2258, 2302), True, 'import numpy as np\n'), ((2320, 2341), 'vispy.color.Colormap', 'VispyColormap', (['colors'], {}), '(colors)\n', (2333, 2341), True, 'from vispy.color import Colormap as VispyColormap\n'), ((2353, 2398), 'napari.utils.colormaps.colormap_utils.ensure_colormap', 'ensure_colormap', (["('special_name', vispy_cmap)"], {}), "(('special_name', vispy_cmap))\n", (2368, 2398), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((2441, 2492), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['cmap.colors', 'colors'], {}), '(cmap.colors, colors)\n', (2471, 2492), True, 'import numpy as np\n'), ((2659, 2711), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]'], {}), '([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])\n', (2667, 2711), True, 'import numpy as np\n'), ((2730, 2746), 'napari.utils.colormaps.Colormap', 'Colormap', (['colors'], {}), '(colors)\n', (2738, 2746), False, 'from napari.utils.colormaps import Colormap\n'), ((2758, 2804), 'napari.utils.colormaps.colormap_utils.ensure_colormap', 'ensure_colormap', (["('special_name', napari_cmap)"], {}), "(('special_name', napari_cmap))\n", (2773, 2804), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((2847, 2898), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['cmap.colors', 'colors'], {}), '(cmap.colors, colors)\n', (2877, 2898), True, 'import numpy as np\n'), ((3052, 3074), 'napari.utils.colormaps.colormap_utils.ensure_colormap', 'ensure_colormap', (['"""red"""'], {}), "('red')\n", (3067, 3074), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((3271, 3297), 'napari.utils.colormaps.colormap_utils.ensure_colormap', 'ensure_colormap', (['cmap_name'], {}), '(cmap_name)\n', (3286, 3297), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((3552, 3604), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]'], {}), '([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])\n', (3560, 3604), True, 'import numpy as np\n'), ((3620, 3672), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [1, 0, 0, 1], [0, 0, 1, 1]]'], {}), '([[0, 0, 0, 1], [1, 0, 0, 1], [0, 0, 1, 1]])\n', (3628, 3672), True, 'import numpy as np\n'), ((3692, 3715), 'vispy.color.Colormap', 'VispyColormap', (['colors_a'], {}), '(colors_a)\n', (3705, 3715), True, 'from vispy.color import Colormap as VispyColormap\n'), ((3735, 3758), 'vispy.color.Colormap', 'VispyColormap', (['colors_b'], {}), '(colors_b)\n', (3748, 3758), True, 'from vispy.color import Colormap as VispyColormap\n'), ((3770, 3825), 'napari.utils.colormaps.colormap_utils.ensure_colormap', 'ensure_colormap', (["{'a': vispy_cmap_a, 'b': vispy_cmap_b}"], {}), "({'a': vispy_cmap_a, 'b': vispy_cmap_b})\n", (3785, 3825), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((3868, 3921), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['cmap.colors', 'colors_a'], {}), '(cmap.colors, colors_a)\n', (3898, 3921), True, 'import numpy as np\n'), ((4132, 4184), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]'], {}), '([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])\n', (4140, 4184), True, 'import numpy as np\n'), ((4200, 4252), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [1, 0, 0, 1], [0, 0, 1, 1]]'], {}), '([[0, 0, 0, 1], [1, 0, 0, 1], [0, 0, 1, 1]])\n', (4208, 4252), True, 'import numpy as np\n'), ((4273, 4291), 'napari.utils.colormaps.Colormap', 'Colormap', (['colors_a'], {}), '(colors_a)\n', (4281, 4291), False, 'from napari.utils.colormaps import Colormap\n'), ((4312, 4330), 'napari.utils.colormaps.Colormap', 'Colormap', (['colors_b'], {}), '(colors_b)\n', (4320, 4330), False, 'from napari.utils.colormaps import Colormap\n'), ((4342, 4399), 'napari.utils.colormaps.colormap_utils.ensure_colormap', 'ensure_colormap', (["{'a': napari_cmap_a, 'b': napari_cmap_b}"], {}), "({'a': napari_cmap_a, 'b': napari_cmap_b})\n", (4357, 4399), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((4442, 4495), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['cmap.colors', 'colors_a'], {}), '(cmap.colors, colors_a)\n', (4472, 4495), True, 'import numpy as np\n'), ((4642, 4694), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]'], {}), '([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])\n', (4650, 4694), True, 'import numpy as np\n'), ((4706, 4765), 'napari.utils.colormaps.colormap_utils.ensure_colormap', 'ensure_colormap', (["{'colors': colors, 'name': 'special_name'}"], {}), "({'colors': colors, 'name': 'special_name'})\n", (4721, 4765), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((4808, 4859), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['cmap.colors', 'colors'], {}), '(cmap.colors, colors)\n', (4838, 4859), True, 'import numpy as np\n'), ((473, 499), 'napari.utils.colormaps.colormap_utils.AVAILABLE_COLORMAPS.keys', 'AVAILABLE_COLORMAPS.keys', ([], {}), '()\n', (497, 499), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((5026, 5051), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (5038, 5051), False, 'import pytest\n'), ((5068, 5091), 'napari.utils.colormaps.colormap_utils.ensure_colormap', 'ensure_colormap', (['object'], {}), '(object)\n', (5083, 5091), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((5796, 5819), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (5809, 5819), False, 'import pytest\n'), ((5840, 5875), 'napari.utils.colormaps.colormap_utils.vispy_or_mpl_colormap', 'vispy_or_mpl_colormap', (['display_name'], {}), '(display_name)\n', (5861, 5875), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((5951, 5974), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (5964, 5974), False, 'import pytest\n'), ((5995, 6028), 'napari.utils.colormaps.colormap_utils.vispy_or_mpl_colormap', 'vispy_or_mpl_colormap', (['wrong_name'], {}), '(wrong_name)\n', (6016, 6028), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((1210, 1244), 'napari.utils.colormaps.colormap_utils._increment_unnamed_colormap', '_increment_unnamed_colormap', (['names'], {}), '(names)\n', (1237, 1244), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n'), ((1388, 1438), 'napari.utils.colormaps.colormap_utils._increment_unnamed_colormap', '_increment_unnamed_colormap', (['names', 'named_colormap'], {}), '(names, named_colormap)\n', (1415, 1438), False, 'from napari.utils.colormaps.colormap_utils import _MATPLOTLIB_COLORMAP_NAMES, _VISPY_COLORMAPS_ORIGINAL, _VISPY_COLORMAPS_TRANSLATIONS, AVAILABLE_COLORMAPS, _increment_unnamed_colormap, ensure_colormap, vispy_or_mpl_colormap\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 21 10:43:03 2020
@author: danish
"""
import cv2 # for capturing videos
import math # for mathematical operations
import pandas as pd
from keras.preprocessing import image # for preprocessing the images
from glob import glob
from tqdm import tqdm
import os
import numpy as np
import shutil
def VideoNameDF(name, dir):
""" We will now store the name of videos in a dataframe.
Returns a datframe. Containing names of videos."""
# open the .txt file which have names of videos
f = open("{0}/{1}".format(dir, name), "r")
temp = f.read()
#Spliting the videos by new line.
videos = temp.split('\n')
# creating a dataframe having video names
train = pd.DataFrame()
train['video_name'] = videos
train = train[:-1]
train.head()
return train
def TagVideos(df):
""" The entire part before the ‘/’ in the video name represents the tag of the video,
Hence, we will split the entire string on ‘/’ and select the tag for all the videos.
Returns a datframe"""
# creating tags for videos
video_tag = []
for i in range(df.shape[0]):
video_tag.append(df['video_name'][i].split('/')[0])
df['tag'] = video_tag
return df
def Video2Frames(df, frames_dir='train_1', videos_dir='UCF'):
# storing the frames from training videos
""" Extract the frames from the training videos which will be used to train the model.
Store the frames in the given frames directory."""
os.makedirs(frames_dir, exist_ok=True)
for i in tqdm(range(df.shape[0])):
count = 0
videoFile = df['video_name'][i]
# capturing the video from the given path
#cap = cv2.VideoCapture(videos_dir+'/'+videoFile.split(' ')[0].split('/')[0])
cap = cv2.VideoCapture(videos_dir+'/'+videoFile.split(' ')[0])
frameRate = cap.get(5) #frame rate
#x=1
while(cap.isOpened()):
frameId = cap.get(1) #current frame number
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
# storing the frames in a new folder named train_1
filename = frames_dir + '/' + videoFile.split('/')[1].split(' ')[0] +"_frame%d.jpg" % count;count+=1
cv2.imwrite(filename, frame)
cap.release()
return '\nFrames are extracted from the videos succesfully!'
def FramesCSV(frames_dir, csv_dir='UCF', csv_name='train_new.csv'):
""" Save the name of the frames with their corresponding tag in a .csv file.
Creating this file will help us to read the frames.
Returns a datframe containing the name of all the frames."""
# getting the names of all the images
images = glob("{0}/*.jpg".format(frames_dir))
images = [path.replace('\\', '/') for path in images]
train_image = []
train_class = []
for i in tqdm(range(len(images))):
# creating the image name
#Following line will extract the last part of the path which is image name.
index = len(images[i].split('/'))-1
train_image.append(images[i].split('/')[index])
# creating the class of image
train_class.append(images[i].split('/')[index].split('_')[1])
# storing the images and their class in a dataframe
train_data = pd.DataFrame()
train_data['image'] = train_image
train_data['class'] = train_class
# converting the dataframe into csv file
os.makedirs(csv_dir, exist_ok=True)
train_data.to_csv('{0}/{1}'.format(csv_dir, csv_name),header=True, index=False)
return train_data
def FrameExtractor(test_videos, index=None, frames_dir='temp', videos_dir='UCF-101'):
""" Extract the frames from given video clip and store in the given directory."""
if type(test_videos) == str:
test_videos = [test_videos]
index=0
if index==None:
raise ValueError('Invalid value for `argument` index.')
count = 0
#Setting the permission and removing `frames_dir` directory which may contain old files.
#os.chmod(frames_dir, 0o777)
shutil.rmtree(frames_dir, ignore_errors=True)
#Creating the new directory
os.makedirs(frames_dir, exist_ok=True)
videoFile = test_videos[index]
# capturing the video from the given path
cap = cv2.VideoCapture(videos_dir+'/'+videoFile.split(' ')[0])
frameRate = cap.get(5) #frame rate
# removing all other files from the temp folder
# files = glob(frames_dir)
# for f in files:
# os.remove(f)
while(cap.isOpened()):
frameId = cap.get(1) #current frame number
#print(frameId)
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
# storing the frames of this particular video in temp folder
filename =frames_dir+'/' + "_frame%d.jpg" % count;count+=1
cv2.imwrite(filename, frame)
cap.release()
return videoFile
def LoadImages(frames_dir='temp'):
""" Read the frames from the given directory process them and make list of all the frames
which belongs to a video, convert the list into Numpy array and return that array."""
# reading all the frames from temp folder
images = glob(frames_dir+"/*.jpg")
prediction_images = []
for i in range(len(images)):
img = image.load_img(images[i], target_size=(224,224,3))
img = image.img_to_array(img)
img = img/255
prediction_images.append(img)
# converting all the frames for a test video into numpy array
prediction_images = np.array(prediction_images)
return prediction_images
| [
"keras.preprocessing.image.img_to_array",
"cv2.imwrite",
"os.makedirs",
"math.floor",
"numpy.array",
"shutil.rmtree",
"pandas.DataFrame",
"glob.glob",
"keras.preprocessing.image.load_img"
] | [((740, 754), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (752, 754), True, 'import pandas as pd\n'), ((1528, 1566), 'os.makedirs', 'os.makedirs', (['frames_dir'], {'exist_ok': '(True)'}), '(frames_dir, exist_ok=True)\n', (1539, 1566), False, 'import os\n'), ((3390, 3404), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3402, 3404), True, 'import pandas as pd\n'), ((3531, 3566), 'os.makedirs', 'os.makedirs', (['csv_dir'], {'exist_ok': '(True)'}), '(csv_dir, exist_ok=True)\n', (3542, 3566), False, 'import os\n'), ((4162, 4207), 'shutil.rmtree', 'shutil.rmtree', (['frames_dir'], {'ignore_errors': '(True)'}), '(frames_dir, ignore_errors=True)\n', (4175, 4207), False, 'import shutil\n'), ((4244, 4282), 'os.makedirs', 'os.makedirs', (['frames_dir'], {'exist_ok': '(True)'}), '(frames_dir, exist_ok=True)\n', (4255, 4282), False, 'import os\n'), ((5341, 5368), 'glob.glob', 'glob', (["(frames_dir + '/*.jpg')"], {}), "(frames_dir + '/*.jpg')\n", (5345, 5368), False, 'from glob import glob\n'), ((5680, 5707), 'numpy.array', 'np.array', (['prediction_images'], {}), '(prediction_images)\n', (5688, 5707), True, 'import numpy as np\n'), ((5441, 5493), 'keras.preprocessing.image.load_img', 'image.load_img', (['images[i]'], {'target_size': '(224, 224, 3)'}), '(images[i], target_size=(224, 224, 3))\n', (5455, 5493), False, 'from keras.preprocessing import image\n'), ((5506, 5529), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (5524, 5529), False, 'from keras.preprocessing import image\n'), ((4986, 5014), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'frame'], {}), '(filename, frame)\n', (4997, 5014), False, 'import cv2\n'), ((2362, 2390), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'frame'], {}), '(filename, frame)\n', (2373, 2390), False, 'import cv2\n'), ((4801, 4822), 'math.floor', 'math.floor', (['frameRate'], {}), '(frameRate)\n', (4811, 4822), False, 'import math\n'), ((2133, 2154), 'math.floor', 'math.floor', (['frameRate'], {}), '(frameRate)\n', (2143, 2154), False, 'import math\n')] |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from mars.core import OutputType
from mars.core.operand import Operand, TileableOperandMixin, \
execute, estimate_size
class MyOperand(Operand, TileableOperandMixin):
@classmethod
def execute(cls, ctx, op):
return 1
@classmethod
def estimate_size(cls, ctx, op):
return 1
class MyOperand2(MyOperand):
@classmethod
def execute(cls, ctx, op):
raise NotImplementedError
@classmethod
def estimate_size(cls, ctx, op):
raise NotImplementedError
class _OperandMixin(TileableOperandMixin):
@classmethod
def tile(cls, op):
out = op.outputs[0]
params = out.params.copy()
params['index'] = (0,) * out.ndim
chunk = op.copy().reset_key().new_chunk(
None, kws=[params])
new_params = out.params.copy()
new_params['chunks'] = [chunk]
new_params['nsplits'] = ()
return op.copy().new_tileables(op.inputs, kws=[new_params])
class MyOperand3(Operand, _OperandMixin):
@classmethod
def execute(cls, ctx, op):
raise ValueError('intend to fail')
@classmethod
def post_execute(cls, ctx, op): # pragma: no cover
ctx[op.outputs[0].key] += 1
class MyOperand4(Operand, _OperandMixin):
@classmethod
def post_execute(cls, ctx, op):
ctx[op.outputs[0].key] += 1
class MyOperand5(MyOperand4):
pass
def test_execute():
MyOperand.register_executor(lambda *_: 2)
assert execute(dict(), MyOperand(_key='1')) == 2
assert execute(dict(), MyOperand2(_key='1')) == 2
MyOperand.unregister_executor()
assert execute(dict(), MyOperand(_key='1')) == 1
MyOperand2.unregister_executor()
with pytest.raises(KeyError):
execute(dict(), MyOperand2(_key='1'))
def test_estimate_size():
MyOperand.register_size_estimator(lambda *_: 2)
assert estimate_size(dict(), MyOperand(_key='1')) == 2
assert estimate_size(dict(), MyOperand2(_key='1')) == 2
MyOperand.unregister_size_estimator()
assert estimate_size(dict(), MyOperand(_key='1')) == 1
MyOperand2.unregister_size_estimator()
with pytest.raises(KeyError):
estimate_size(dict(), MyOperand2(_key='1'))
def test_unknown_dtypes():
op = MyOperand(_output_types=[OutputType.dataframe])
df = op.new_tileable(None, dtypes=None)
op2 = MyOperand(_output_types=[OutputType.scalar])
with pytest.raises(ValueError) as exc_info:
op2.new_tileable([df])
assert 'executed first' in exc_info.value.args[0]
def test_post_execute(setup):
op = MyOperand3(_output_types=[OutputType.tensor])
t = op.new_tileable(None, dtype=np.dtype(float), shape=())
with pytest.raises(ValueError, match='intend to fail'):
t.execute()
op = MyOperand5(_output_types=[OutputType.tensor])
t2 = op.new_tileable(None, dtype=np.dtype(float), shape=())
def execute_error(*_):
raise ValueError('intend to fail again')
with pytest.raises(ValueError, match='intend to fail again'):
operand_executors = {MyOperand4: execute_error}
t2.execute(extra_config={'operand_executors': operand_executors}).fetch()
def execute_normally(ctx, op):
ctx[op.outputs[0].key] = 1
operand_executors = {MyOperand5: execute_normally}
assert t2.execute(extra_config={'operand_executors': operand_executors}).fetch() == 2
| [
"numpy.dtype",
"pytest.raises"
] | [((2325, 2348), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2338, 2348), False, 'import pytest\n'), ((2749, 2772), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2762, 2772), False, 'import pytest\n'), ((3020, 3045), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3033, 3045), False, 'import pytest\n'), ((3303, 3352), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""intend to fail"""'}), "(ValueError, match='intend to fail')\n", (3316, 3352), False, 'import pytest\n'), ((3581, 3636), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""intend to fail again"""'}), "(ValueError, match='intend to fail again')\n", (3594, 3636), False, 'import pytest\n'), ((3267, 3282), 'numpy.dtype', 'np.dtype', (['float'], {}), '(float)\n', (3275, 3282), True, 'import numpy as np\n'), ((3467, 3482), 'numpy.dtype', 'np.dtype', (['float'], {}), '(float)\n', (3475, 3482), True, 'import numpy as np\n')] |
"""Implementation of core scheduling algorithms using Gurobi."""
import logging
import os
from collections import defaultdict
from gurobipy import *
import numpy as np
import shelve
import astropy.units as u
import pandas as pd
from collections import defaultdict
from .constants import TIME_BLOCK_SIZE, EXPOSURE_TIME, READOUT_TIME, FILTER_CHANGE_TIME
from .constants import PROGRAM_NAME_TO_ID
max_exps_per_slot = np.ceil((TIME_BLOCK_SIZE /
(EXPOSURE_TIME + READOUT_TIME)).to(
u.dimensionless_unscaled).value).astype(int)
logger = logging.getLogger(__name__)
def night_optimize(df_metric, df, requests_allowed, time_limit=30*u.second,
block_use = defaultdict(float)):
"""Determine which requests to observe and in what slots.
Decision variable is yes/no per request_id, slot, filter,
with an additional decision variable on which filter to use in which slot
and another for which request sets are observed at all."""
# these are fragile when columns get appended
slots = np.unique(df_metric.columns.get_level_values(0).values)
filter_ids = np.unique(df_metric.columns.get_level_values(1).values)
# extra columns floating around cause problems
filter_ids = [fid for fid in filter_ids if fid != '']
# flatten the metric dataframe to make it easier to work with
df_metric_local = df_metric.copy()
df_metric_local['request_id'] = df_metric_local.index
# make a "tidy" dataframe with one row per (request, slot, filter)
dft = pd.melt(df_metric_local,id_vars='request_id',
var_name=['slot','metric_filter_id'],
value_name='metric')
# get n_reqs by fid
n_reqs_cols = ['n_reqs_{}'.format(fid) for fid in filter_ids]
n_reqs_cols.extend(['program_id','subprogram_name',
'total_requests_tonight','exposure_time','dec'])
dft = pd.merge(dft,df[n_reqs_cols],left_on='request_id',right_index=True)
# don't need the dec column anymore
dft = dft.drop('dec',axis=1)
# calculate number of slots required per request set
# nreqs_{fid} weighted sum over the filters
for fid in filter_ids:
wfid = dft['metric_filter_id'] == fid
n_req_col = 'n_reqs_{}'.format(fid)
dft.loc[wfid, 'metric'] *= (dft.loc[wfid, n_req_col] /
dft.loc[wfid, 'total_requests_tonight'])
grprs = dft.groupby(['request_id','slot'])
dfrs = grprs['metric'].agg(np.sum)
# calculate n_usable slots
grpr = dfrs.groupby('request_id')
n_usable = grpr.agg(lambda x: np.sum(x > 0.05)).astype(int)
n_usable.name = 'n_usable'
# sum df_metric down to one column
metric_sum = grpr.agg(lambda x: np.sum(np.where(x > 0, x, 0)))
metric_sum.name = 'metric_sum'
# merge additional useful info
dfr = df[['program_id','subprogram_name','total_requests_tonight']].join(n_usable).join(metric_sum)
# determine which request sets have enough usable slots
dfr['observable_tonight'] = dfr['total_requests_tonight'] <= dfr['n_usable']
# restrict to only the observable requests
dfr = dfr.loc[dfr['observable_tonight'],:]
dft = pd.merge(dft,dfr[['n_usable','observable_tonight']],
left_on='request_id',right_index=True)
dft = dft.loc[dft['observable_tonight'],:]
request_sets = dfr.index.values
df_metric = df_metric.loc[dfr.index]
# Create an empty model
m = Model('slots')
# set the number of threads Gurobi uses
if 'GRB_USE_NTHREADS' in os.environ:
m.Params.Threads = int(os.environ['GRB_USE_NTHREADS'])
# decision variable: yes or no for each request set
yr_dict = m.addVars(df_metric_local.index,name='Yr',vtype=GRB.BINARY)
yr_series = pd.Series(yr_dict,name='Yr')
dfr = dfr.join(yr_series)
yrtf_dict = m.addVars(dft.index,name='Yrtf',vtype=GRB.BINARY)
yrtf_series = pd.Series(yrtf_dict,name='Yrtf')
dft = dft.join(yrtf_series)
# putting the Yrtf vars in the dft series is not always convenient;
# provide a lookup table
rtf_to_idx = {}
idx_to_rtf = dft[['request_id','slot','metric_filter_id']].to_dict(orient='index')
for k,v in idx_to_rtf.items():
rtf_to_idx[(v['request_id'],v['slot'],v['metric_filter_id'])] = k
# create resultant variables: Yr = 1 if request r is observed in at least
# one slot
for r in request_sets:
m.addGenConstrOr(yr_dict[r], dft.loc[dft['request_id'] == r, 'Yrtf'],
"orconstr_{}".format(r))
# nreqs_{fid} slots assigned per request set if it is observed
# this constructor is pretty slow
constr_nreqs = m.addConstrs(
((np.sum(dft.loc[(dft['request_id'] == r) &
(dft['metric_filter_id'] == f), 'Yrtf'])
== (df.loc[r,'n_reqs_{}'.format(f)] * dfr.loc[r,'Yr']))
for f in filter_ids for r in request_sets),
"constr_nreqs")
# minimum slot separation per filter constraint
MIN_SLOT_SEPARATION = 2
# TODO: generalize this beyond just ZUDS
wZUDSt = ( (dft['subprogram_name'] == 'high_cadence')
)
# only add these parameters if there is a program to space
if np.sum(wZUDSt):
space_obs = True
else:
space_obs = False
if space_obs:
wZUDSg = wZUDSt & (dft['metric_filter_id'] == 1)
wZUDSr = wZUDSt & (dft['metric_filter_id'] == 2)
wZUDSi = wZUDSt & (dft['metric_filter_id'] == 3)
wrZUDS = ((dft['subprogram_name'] == 'high_cadence'))
ZUDS_request_sets = dfr.loc[wrZUDS].index.tolist()
filter_ids_to_limit = [1,2]
# slot separation
# create resultant variables: 1 if both slot_a and slot_b are true
yrttf = m.addVars(ZUDS_request_sets,slots[:-1],slots[1:],
filter_ids_to_limit, vtype=GRB.BINARY)
constraint_dict = {}
for r in ZUDS_request_sets:
for t in slots[:-1]:
for t2 in slots[1:]:
if t2 < t:
# avoid duplicate entries
continue
#TODO: this should be removed if we're not doing hard constraints
# dt = t2 - t
# if dt >= MIN_SLOT_SEPARATION:
# continue
for f in filter_ids_to_limit:
m.addGenConstrAnd(yrttf[r,t,t2,f],
[yrtf_dict[rtf_to_idx[(r,t,f)]],
yrtf_dict[rtf_to_idx[(r,t2,f)]]],
"slotdiff_and_{}_{}_{}_{}".format(r,t,t2,f))
dtdict = defaultdict(list)
for t in slots[:-1]:
for t2 in slots[1:]:
if t2 <= t:
# avoid duplicate entries
continue
dt = t2 - t
dtdict[dt].append((t,t2))
# create delta-t resultant variables: OR constraint for all pairwise slots
yrdtf = m.addVars(ZUDS_request_sets,dtdict.keys(),
filter_ids_to_limit, vtype=GRB.BINARY)
for r in ZUDS_request_sets:
for dt in dtdict.keys():
for f in filter_ids_to_limit:
# loop over items in dtdict
m.addGenConstrOr(yrdtf[r,dt,f],
[yrttf[r,t,t2,f] for (t,t2) in dtdict[dt]],
"slot_dt_indicator_{}_{}_{}".format(r,dt,f))
# THIS WORKS to set hard slot separation constraints
## # can set a hard constraint here by requiring all the low-separation pairs
## # to be zero
# constr_min_slotsep = m.addConstrs(
# (yrdtf[r,dt,f] == 0 for r in ZUDS_request_sets for dt in dtdict.keys() if dt <= (MIN_SLOT_SEPARATION-1) for f in filter_ids_to_limit), 'constr_min_slot_sep')
## # or use in the objective function
# create resultant variables: Ytf = 1 if slot t has filter f used
ytf = m.addVars(slots, filter_ids, vtype=GRB.BINARY)
for t in slots:
for f in filter_ids:
m.addGenConstrOr(ytf[t,f],
dft.loc[(dft['slot'] == t) &
(dft['metric_filter_id'] == f), 'Yrtf'], #tolist()
"orconstr_{}_{}".format(t,f))
# now constrain ourselves to one and only one filter per slot.
constr_onefilter = m.addConstrs(
(ytf.sum(t,'*') == 1 for t in slots), 'constr_onefilter')
# create filter change resultant variable: Ydfds = 1 if
# filter changes between slot s and s+1
ydfds = m.addVars(slots[:-1], vtype=GRB.BINARY)
# use indicator constraints to set the value
for i,t in enumerate(slots[:-1]):
for f in filter_ids:
m.addGenConstrIndicator(ydfds[t], False,
ytf[slots[i],f] - ytf[slots[i+1], f], GRB.EQUAL, 0,
"filt_change_indicator_{}_{}".format(t,f))
# total exposure time constraint
constr_nperslot = m.addConstrs(
((np.sum(dft.loc[dft['slot'] == t, 'Yrtf'] *
(dft.loc[dft['slot'] == t, 'exposure_time'] +
READOUT_TIME.to(u.second).value))
<= (TIME_BLOCK_SIZE.to(u.second).value * (1. - block_use[t])))
for t in slots), "constr_nperslot")
# program balance. To avoid key errors, only set constraints
# for programs that are present
msip_requests_needed = []
msip_requests_possible = {}
requests_needed = []
for p in requests_allowed.keys():
if p[0] == PROGRAM_NAME_TO_ID['MSIP']:
wmsipp = (dfr['program_id'] == p[0]) & (dfr['subprogram_name'] == p[1])
n_available = np.sum(dfr.loc[wmsipp,'total_requests_tonight'])
if n_available > 0:
# to demand exact equality we need to know how many
# requests we have
# set to the minimum of allowed or available
if n_available <= requests_allowed[p]:
# MSIP requests only come in pairs, so we need an even
# number.
# TODO: generalize this.
if n_available % 2 != 0:
n_available -= 1
msip_requests_possible[p] = n_available
else:
if requests_allowed[p] % 2 != 0:
requests_allowed[p] += 1
msip_requests_possible[p] = requests_allowed[p]
msip_requests_needed.append(p)
else:
if np.sum((dft['program_id'] == p[0]) &
(dft['subprogram_name'] == p[1])) > 0:
requests_needed.append(p)
# demand exact equality for MSIP
constr_msip_balance = m.addConstrs(
((np.sum(dft.loc[(dft['program_id'] == p[0]) &
(dft['subprogram_name'] == p[1]), 'Yrtf'])
== msip_requests_possible[p]) for p in msip_requests_needed),
"constr_msip_balance")
constr_balance = m.addConstrs(
((np.sum(dft.loc[(dft['program_id'] == p[0]) &
(dft['subprogram_name'] == p[1]), 'Yrtf'])
<= requests_allowed[p]) for p in requests_needed),
"constr_balance")
m.update()
# np.heaviside returns a TypeError so make our own
def heaviside(x, x0=0):
# scalars only
# < and > are not implimented for Gurobi Linexps, so have to do
# some unusual control flow here with ==, <=, >=
if x == 0:
return x0
elif x <= 0:
return 0
else:
return 1
# scale by number of standard exposures so long exposures aren't
# penalized
if not space_obs:
m.setObjective(
np.sum(dft['Yrtf'] * dft['metric'] *
dft['exposure_time']/EXPOSURE_TIME.to(u.second).value)
- ydfds.sum() * (FILTER_CHANGE_TIME / (EXPOSURE_TIME +
READOUT_TIME) * 2.5).value
- np.sum(
[heaviside((requests_allowed[p] - np.sum(
dft.loc[(dft['program_id'] == p[0]) &
(dft['subprogram_name'] == p[1]), 'Yrtf'].values
)))*2.5
for p in requests_needed]),
GRB.MAXIMIZE)
else:
def slot_scale(dt):
return dt/24.
m.setObjective(
np.sum(dft['Yrtf'] * dft['metric'] *
dft['exposure_time']/EXPOSURE_TIME.to(u.second).value)
- ydfds.sum() * (FILTER_CHANGE_TIME / (EXPOSURE_TIME +
READOUT_TIME) * 2.5).value
+ np.sum(yrdtf[r,dt,f]*slot_scale(dt) for r in ZUDS_request_sets for dt in dtdict.keys() if dt >= MIN_SLOT_SEPARATION for f in filter_ids_to_limit)
- np.sum(
[heaviside((requests_allowed[p] - np.sum(
dft.loc[(dft['program_id'] == p[0]) &
(dft['subprogram_name'] == p[1]), 'Yrtf'].values
)))*2.5
for p in requests_needed]),
GRB.MAXIMIZE)
# Quick and dirty is okay!
m.Params.TimeLimit = time_limit.to(u.second).value
m.update()
m.optimize()
# if we have optimization problems, MSIP exact equality is likely the problem.
# relax the constraint and retry
if (m.Status != GRB.OPTIMAL) and (m.Status != GRB.TIME_LIMIT):
logger.warning(f"Gurobi failed to optimize! Code {m.Status}")
logger.info("Relaxing MSIP exact constraint.")
m.setAttr("Sense", [c for c in constr_msip_balance.values()],
["<" for c in constr_msip_balance.values()])
m.optimize()
if (m.Status != GRB.OPTIMAL) and (m.Status != GRB.TIME_LIMIT):
logger.error(f"Gurobi optimization with relaxed MSIP constraints failed! Code {m.Status}")
# now get the decision variables. Use > a constant to avoid
# numerical precision issues
try:
dft['Yrtf_val'] = dft['Yrtf'].apply(lambda x: x.getAttr('x') > 0.1)
except AttributeError:
logger.error("Optimization reached time limit but didn't find solutions")
logger.info("Relaxing MSIP exact constraint.")
m.setAttr("Sense", [c for c in constr_msip_balance.values()],
["<" for c in constr_msip_balance.values()])
m.optimize()
try:
dft['Yrtf_val'] = dft['Yrtf'].apply(lambda x: x.getAttr('x') > 0.1)
except AttributeError:
logger.error(f"Gurobi optimization with relaxed MSIP constraints failed!")
df_schedule = dft.loc[dft['Yrtf_val'],['slot','metric_filter_id', 'request_id']]
n_iterations = 1
# if we don't optimize long enough, we can end up not satisfying
# our constraints. In that case, continue the optimization
while df_schedule.groupby(['slot','request_id']).agg(len).max()[0] > 1:
n_iterations += 1
if n_iterations > 10:
raise ValueError('Optimization failed to satisfy constraints')
print("> Slot optimization did not satisfy all constraints. Continuing Optimization (Iteration {})".format(n_iterations))
m.update()
m.optimize()
# now get the decision variables
dft['Yrtf_val'] = dft['Yrtf'].apply(lambda x: x.getAttr('x') > 0.1)
df_schedule = dft.loc[dft['Yrtf_val'],['slot','metric_filter_id', 'request_id']]
# get the request set decision variables
dfr['Yr_val'] = dfr['Yr'].apply(lambda x: x.getAttr('x') > 0.1)
# this doesn't work in the objective function but is a useful check
def num_filter_changes(ytf):
n_changes = 0
for i, slot in enumerate(slots[:-1]):
for fid in filter_ids:
if ytf[(slot,fid)].getAttr('x') == 1:
if not (ytf[(slots[i+1], fid)].getAttr('x') == 1):
n_changes+=1
return n_changes
print(f'Number of filter changes: {num_filter_changes(ytf)}')
return dfr.loc[dfr['Yr_val'],'program_id'].index, df_schedule, dft
def tsp_optimize(pairwise_distances):
# core algorithmic code from
# http://examples.gurobi.com/traveling-salesman-problem/
# Callback - use lazy constraints to eliminate sub-tours
def subtourelim(model, where):
if where == GRB.callback.MIPSOL:
selected = []
# make a list of edges selected in the solution
for i in range(n):
sol = model.cbGetSolution([model._vars[i,j] for j in range(n)])
selected += [(i,j) for j in range(n) if sol[j] > 0.5]
# find the shortest cycle in the selected edge list
tour = subtour(selected)
if len(tour) < n:
# add a subtour elimination constraint
expr = 0
for i in range(len(tour)):
for j in range(i+1, len(tour)):
expr += model._vars[tour[i], tour[j]]
model.cbLazy(expr <= len(tour)-1)
# Given a list of edges, finds the shortest subtour
def subtour(edges):
visited = [False]*n
cycles = []
lengths = []
selected = [[] for i in range(n)]
for x,y in edges:
selected[x].append(y)
while True:
current = visited.index(False)
thiscycle = [current]
while True:
visited[current] = True
neighbors = [x for x in selected[current] if not visited[x]]
if len(neighbors) == 0:
break
current = neighbors[0]
thiscycle.append(current)
cycles.append(thiscycle)
lengths.append(len(thiscycle))
if sum(lengths) == n:
break
return cycles[lengths.index(min(lengths))]
assert (pairwise_distances.shape[0] == pairwise_distances.shape[1])
n = pairwise_distances.shape[0]
# avoid optimization failures if we only feed in a couple of points
if n == 1:
return [0], [READOUT_TIME.to(u.second).value]
if n == 2:
return [0, 1], [pairwise_distances[0,1]]
m = Model()
# set the number of threads Gurobi uses
if 'GRB_USE_NTHREADS' in os.environ:
m.Params.Threads = int(os.environ['GRB_USE_NTHREADS'])
# Create variables
vars = {}
for i in range(n):
for j in range(i+1):
vars[i,j] = m.addVar(obj=pairwise_distances[i,j], vtype=GRB.BINARY, name='e'+str(i)+'_'+str(j))
vars[j,i] = vars[i,j]
m.update()
# Add degree-2 constraint, and forbid loops
for i in range(n):
m.addConstr(quicksum(vars[i,j] for j in range(n)) == 2)
vars[i,i].ub = 0
m.update()
# Optimize model
m._vars = vars
m.params.LazyConstraints = 1
m.optimize(subtourelim)
if m.Status != GRB.OPTIMAL:
raise ValueError("Optimization failure")
solution = m.getAttr('x', vars)
selected = [(i,j) for i in range(n) for j in range(n) if solution[i,j] > 0.5]
distances = np.sum([pairwise_distances[s] for s in selected])
distance = m.objVal
assert len(subtour(selected)) == n
# dictionary of connected nodes
edges = defaultdict(list)
for i in range(n):
for j in range(n):
if vars[i,j].getAttr('x') > 0.5:
edges[i].append(j)
def unwrap_tour(edges, start_node=None):
if start_node is None:
start_node = 0
current_node = start_node
# arbitrary choice of direction
next_node = edges[start_node][0]
tour = [start_node]
while next_node != start_node:
tour.append(next_node)
edge_nodes = edges[next_node]
assert (current_node in edge_nodes)
assert(len(edge_nodes) == 2)
if edge_nodes[0] == current_node:
tmp = edge_nodes[1]
elif edge_nodes[1] == current_node:
tmp = edge_nodes[0]
current_node = next_node
next_node = tmp
return tour
tour = unwrap_tour(edges)
assert (len(tour) == n)
return tour, distance
| [
"logging.getLogger",
"pandas.Series",
"numpy.where",
"pandas.merge",
"numpy.sum",
"collections.defaultdict",
"pandas.melt"
] | [((567, 594), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (584, 594), False, 'import logging\n'), ((692, 710), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (703, 710), False, 'from collections import defaultdict\n'), ((1527, 1637), 'pandas.melt', 'pd.melt', (['df_metric_local'], {'id_vars': '"""request_id"""', 'var_name': "['slot', 'metric_filter_id']", 'value_name': '"""metric"""'}), "(df_metric_local, id_vars='request_id', var_name=['slot',\n 'metric_filter_id'], value_name='metric')\n", (1534, 1637), True, 'import pandas as pd\n'), ((1861, 1931), 'pandas.merge', 'pd.merge', (['dft', 'df[n_reqs_cols]'], {'left_on': '"""request_id"""', 'right_index': '(True)'}), "(dft, df[n_reqs_cols], left_on='request_id', right_index=True)\n", (1869, 1931), True, 'import pandas as pd\n'), ((3128, 3226), 'pandas.merge', 'pd.merge', (['dft', "dfr[['n_usable', 'observable_tonight']]"], {'left_on': '"""request_id"""', 'right_index': '(True)'}), "(dft, dfr[['n_usable', 'observable_tonight']], left_on='request_id',\n right_index=True)\n", (3136, 3226), True, 'import pandas as pd\n'), ((3704, 3733), 'pandas.Series', 'pd.Series', (['yr_dict'], {'name': '"""Yr"""'}), "(yr_dict, name='Yr')\n", (3713, 3733), True, 'import pandas as pd\n'), ((3849, 3882), 'pandas.Series', 'pd.Series', (['yrtf_dict'], {'name': '"""Yrtf"""'}), "(yrtf_dict, name='Yrtf')\n", (3858, 3882), True, 'import pandas as pd\n'), ((5198, 5212), 'numpy.sum', 'np.sum', (['wZUDSt'], {}), '(wZUDSt)\n', (5204, 5212), True, 'import numpy as np\n'), ((19074, 19123), 'numpy.sum', 'np.sum', (['[pairwise_distances[s] for s in selected]'], {}), '([pairwise_distances[s] for s in selected])\n', (19080, 19123), True, 'import numpy as np\n'), ((19236, 19253), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (19247, 19253), False, 'from collections import defaultdict\n'), ((6629, 6646), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6640, 6646), False, 'from collections import defaultdict\n'), ((9651, 9700), 'numpy.sum', 'np.sum', (["dfr.loc[wmsipp, 'total_requests_tonight']"], {}), "(dfr.loc[wmsipp, 'total_requests_tonight'])\n", (9657, 9700), True, 'import numpy as np\n'), ((2681, 2702), 'numpy.where', 'np.where', (['(x > 0)', 'x', '(0)'], {}), '(x > 0, x, 0)\n', (2689, 2702), True, 'import numpy as np\n'), ((4627, 4713), 'numpy.sum', 'np.sum', (["dft.loc[(dft['request_id'] == r) & (dft['metric_filter_id'] == f), 'Yrtf']"], {}), "(dft.loc[(dft['request_id'] == r) & (dft['metric_filter_id'] == f),\n 'Yrtf'])\n", (4633, 4713), True, 'import numpy as np\n'), ((10519, 10589), 'numpy.sum', 'np.sum', (["((dft['program_id'] == p[0]) & (dft['subprogram_name'] == p[1]))"], {}), "((dft['program_id'] == p[0]) & (dft['subprogram_name'] == p[1]))\n", (10525, 10589), True, 'import numpy as np\n'), ((10745, 10837), 'numpy.sum', 'np.sum', (["dft.loc[(dft['program_id'] == p[0]) & (dft['subprogram_name'] == p[1]), 'Yrtf']"], {}), "(dft.loc[(dft['program_id'] == p[0]) & (dft['subprogram_name'] == p[1\n ]), 'Yrtf'])\n", (10751, 10837), True, 'import numpy as np\n'), ((10994, 11086), 'numpy.sum', 'np.sum', (["dft.loc[(dft['program_id'] == p[0]) & (dft['subprogram_name'] == p[1]), 'Yrtf']"], {}), "(dft.loc[(dft['program_id'] == p[0]) & (dft['subprogram_name'] == p[1\n ]), 'Yrtf'])\n", (11000, 11086), True, 'import numpy as np\n'), ((2537, 2553), 'numpy.sum', 'np.sum', (['(x > 0.05)'], {}), '(x > 0.05)\n', (2543, 2553), True, 'import numpy as np\n'), ((11984, 12083), 'numpy.sum', 'np.sum', (["dft.loc[(dft['program_id'] == p[0]) & (dft['subprogram_name'] == p[1]), 'Yrtf'\n ].values"], {}), "(dft.loc[(dft['program_id'] == p[0]) & (dft['subprogram_name'] == p[1\n ]), 'Yrtf'].values)\n", (11990, 12083), True, 'import numpy as np\n'), ((12772, 12871), 'numpy.sum', 'np.sum', (["dft.loc[(dft['program_id'] == p[0]) & (dft['subprogram_name'] == p[1]), 'Yrtf'\n ].values"], {}), "(dft.loc[(dft['program_id'] == p[0]) & (dft['subprogram_name'] == p[1\n ]), 'Yrtf'].values)\n", (12778, 12871), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 3 16:30:52 2021
@author: jakubicek
"""
import os
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
# import pandas as pd
# from pathlib import Path
import torch.optim as optim
import glob
import torch.nn as nn
import torch.nn.functional as F
# from torch.utils.data import DataLoader
# from torch.utils.data import Dataset
# import utilities
import torch
# from torch import torchaudio
# import torchaudio
# import random
import h5py
# import time
import loaders
class Block(nn.Module):
def __init__(self, in_ch, out_ch):
super().__init__()
self.conv1 = nn.Conv1d(in_channels = in_ch, out_channels = out_ch , kernel_size = 3, stride = 1, padding=1, padding_mode='replicate')
self.relu = nn.ReLU()
self.conv2 = nn.Conv1d(in_channels = out_ch, out_channels = out_ch , kernel_size = 3, stride = 1, padding=1, padding_mode='replicate')
self.relu = nn.ReLU()
self.BN = nn.BatchNorm1d(in_ch)
def forward(self, x):
# return self.relu( self.conv1( self.BN( x ) ) )
# y = self.relu( self.conv2( self.relu( self.conv1( self.BN( x ) ) )))
# y = self.relu( self.BN( self.conv2( self.relu( self.conv1( x ) ) )))
# y = y + x.repeat(1,y.shape[1],1)
# y = torch.add(y, x.repeat(1,y.shape[1],1))
# return torch.add(y, x.repeat(1,y.shape[1],1))
# return self.relu( self.conv1( x ) )
return self.relu( self.conv2( self.relu( self.conv1( self.BN( x ) ) )))
class Encoder(nn.Module):
def __init__(self, chs=(1,64,128,256)):
super().__init__()
self.enc_blocks = nn.ModuleList([Block(chs[i], chs[i+1]) for i in range(len(chs)-1)])
self.pool = nn.MaxPool1d(3, stride=2, padding=1)
def forward(self, x):
# ftrs = []
res = x
for block in self.enc_blocks:
x = block(x)
# ftrs.append(x)
x += res.repeat(1,x.shape[1],1)
x = self.pool(x)
res = self.pool(res)
return x
class GenNet(nn.Module):
def __init__(self, enc_chs=(1,64,128,256), lstm_h_size=256, h_size=1024, num_class=1):
super().__init__()
self.lstm_layers = 1
self.lstm_h_size = lstm_h_size
self.encoder = Encoder(enc_chs)
self.lstm = nn.LSTM(enc_chs[-1], lstm_h_size, batch_first=True,num_layers=self.lstm_layers, bidirectional=False, dropout=0.5)
self.linear1 = nn.Linear(lstm_h_size, h_size, bias=True)
self.do = nn.Dropout(p=0.5)
self.linear2 = nn.Linear(h_size, num_class, bias=True)
self.relu = nn.ReLU()
# self.sigm = nn.Sigmoid()
def forward(self, x):
x = x.permute([0,2,1])
y = self.encoder(x)
# y = enc_ftrs.permute([0,2,1])
# y = enc_ftrs[-1]
# y = enc_ftrs
y = y.permute([0,2,1])
_,(self.h,self.c)=self.lstm( y , (self.h,self.c) )
# C = self.c.permute([1,0,2]).repeat(1,y.shape[1],1)
# y = torch.cat((self.h, self.c),2)
y = self.h
y = torch.squeeze(y)
y=self.linear1(y)
y=self.relu(y)
# y=self.do(y)
y=self.linear2(y)
# y=self.relu(y)
# y=self.sigm(y)
# y = F.softmax(y, dim=1)
return y
def init_hiden(self,batch):
self.h=torch.zeros((self.lstm_layers, batch, self.lstm_h_size)).cuda()
self.c=torch.zeros((self.lstm_layers, batch, self.lstm_h_size)).cuda()
# class ClassGen(nn.Module):
# def __init__(self, h_size=256, num_class=1):
# super().__init__()
# self.linear1 = nn.Linear(h_size, h_size, bias=True)
# self.do = nn.Dropout(p=0.5)
# self.linear2 = nn.Linear(h_size, num_class, bias=True)
# self.relu = nn.ReLU()
# # self.sigm = nn.Sigmoid()
# def forward(self, x):
# # x = x.permute([0,2,1])
# x = torch.squeeze(x)
# x=self.linear1(x)
# x=self.relu(x)
# # x=self.do(x)
# x=self.linear2(x)
# x=self.relu(x)
# return x
def CreateDataset(path_data):
h5_list = glob.glob( os.path.normpath( path_data + "**/*.h5"))
sigs_list = []
lbl_ist = []
for file_path in h5_list:
f = h5py.File(file_path)
for a in f.__iter__():
sigs_list.append({'file_path': file_path, 'tname': a})
# lbl_ist.append(np.asarray(f[a]['st']))
lbl_ist.append(np.asarray(dictGen[file_path.split('\\')[-1].split('_')[0]]).astype(np.float32))
return sigs_list, lbl_ist
dictGen = dict(gapA=0 , infB=1 , mdh=2 , pgi=3 , phoE=4 , rpoB=5 , tonB=6 )
path_data = 'C:\data\jakubicek/all_MLST_genes_new_format1'
dataset, lbl_list = CreateDataset(path_data)
batch = 64
classes = 2
net = GenNet( enc_chs=(1,64,128,256,512), lstm_h_size=512, h_size=512, num_class=classes).cuda()
optimizer = optim.Adam(net.parameters(), lr=0.0001,weight_decay=1e-6)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1, verbose=True)
end_gen = 12530
train_list = dataset[0:end_gen:10]
# train_list = np.random.permutation( train_list )
lbl_list = lbl_list[0:end_gen:10]
w1,_= np.histogram(np.array(lbl_list), bins=np.arange(0,7+1,1)-0.5)
weight = torch.tensor((w1/np.sum(w1)).astype(np.float32) ).cuda()
train_loss = []
train_LOSS = []
train_acc = []
train_ACC = []
for epch in range(0,100):
train_list = np.random.permutation( train_list )
net.train()
net.zero_grad()
ii=0
# train_acc=[]
# Pred = torch.zeros((batch, 2, 1), dtype=torch.float32).cuda()
# LBL = torch.zeros((batch, 1), dtype=torch.float32).cuda()
for ite in range(0, len(train_list)-batch, batch):
net.zero_grad()
# net2.zero_grad()
#####################
# Pred = torch.zeros((batch, classes, 1), dtype=torch.float32).cuda()
# Sig = torch.zeros((batch, 1, 50000), dtype=torch.float32).cuda().type(torch.long)
# Lbl = torch.zeros((batch, 1), dtype=torch.float32).cuda().type(torch.long)
# for b in range(0, batch,1):
# # net.zero_grad()
# net.init_hiden(1)
# file = train_list[ite+b]
# sig, lbl = loaders.Load_whole_signal_h5(file, dictGen)
# pred = net(sig.cuda())
# net.zero_grad()
# pred = F.softmax(pred, dim=2)
# pred = pred.permute([0,2,1])
# Pred[b,:,:] = pred
# Lbl[b,:] = torch.tensor(lbl)
# # # torch.tensor(np.expand_dims(np.expand_dims(lbl,0),0)).cuda().type(torch.long)
#####################
#######################
# Sig, Lbl = loaders.Load_cut_signal_h5(ite, batch, train_list, dictGen)
Sig, Lbl = loaders.Load_cut_gen_h5(ite, batch, train_list, dictGen)
net.init_hiden(batch)
Pred = net(Sig.cuda())
# net.init_hiden(batch)
# Feat = net(Sig.cuda())
# Pred = net2(Feat.cuda())
Pred = F.softmax(Pred, dim=1)
# Pred = F.sigmoid(Pred)
# # Pred = Pred.permute([1,2,0])
####################3
# w1,_= np.histogram(np.array(lbl_list), bins=np.arange(0,7+1,1)-0.5)
# weight = torch.tensor((w1/np.sum(w1)).astype(np.float32) ).cuda()
weight =torch.tensor( (0.6, 0.4) ).cuda()
# loss = nn.CrossEntropyLoss(weight)( Pred, Lbl )
# loss = nn.CrossEntropyLoss(weight)( Pred, torch.squeeze(Lbl.cuda().type(torch.long)) )
# loss = nn.CrossEntropyLoss(weight[0:2])( Pred.squeeze(), torch.squeeze(Lbl.cuda().type(torch.long)) )
# loss = nn.CrossEntropyLoss()( Pred.squeeze(), torch.squeeze(Lbl.cuda().type(torch.long)) )
loss = nn.CrossEntropyLoss(weight)( Pred.squeeze(), torch.squeeze(Lbl.cuda().type(torch.long)) )
# one_hot = torch.nn.functional.one_hot(Lbl.squeeze().type(torch.long), 7)
# loss = -torch.mean( torch.log(Pred[one_hot==1]))
# loss = nn.CrossEntropyLoss()( gg, torch.squeeze(Lbl.cuda().type(torch.long)) )
optimizer.zero_grad()
loss.backward()
# nn.utils.clip_grad_norm_(net.parameters(), max_norm=2.0, norm_type=2)
# nn.utils.clip_grad_value_(net.parameters(), clip_value=1.0)
optimizer.step()
# scheduler.step()
# optimizer2.zero_grad()
# # loss.backward()
# # nn.utils.clip_grad_norm_(net.parameters(), max_norm=2.0, norm_type=2)
# # nn.utils.clip_grad_value_(net2.parameters(), clip_value=2.0)
# optimizer2.step()
# # scheduler.step()
net.zero_grad()
# net2.zero_grad()
torch.cuda.empty_cache()
acc = (Lbl.detach().cpu().numpy().squeeze() == (Pred.detach().cpu().numpy().squeeze().argmax(1))).astype(np.dtype(float))
train_loss.append(loss.detach().cpu().numpy())
train_acc.append( np.mean(acc) )
# train_ACC.append(np.mean(train_acc))
if ii%(int((len(train_list))/batch/2)) == 0:
# if ii%10 == 0:
train_LOSS.append(np.mean(train_loss))
plt.figure
plt.plot( train_LOSS )
# plt.plot( train_loss )
# plt.ylim([0, 1.0])
plt.show()
train_ACC.append(np.mean(train_acc))
plt.figure
plt.plot(train_ACC)
# plt.ylim([0.0,1])
plt.show()
train_acc = []
train_loss = []
# plt.figure
# plt.plot( train_loss )
# # plt.ylim([0, 1.0])
# plt.show()
# plt.figure
# plt.plot(train_acc)
# # plt.ylim([0.0,1])
# plt.show()
plt.figure()
plt.plot(Lbl.detach().cpu().numpy())
plt.plot(Pred[:,1].detach().cpu().numpy())
plt.show()
# t = time.time()
ii=ii+1
# train_ACC.append(np.mean(train_acc))
# plt.figure
# plt.plot(train_ACC)
# plt.ylim([0.0,1])
# plt.show()
scheduler.step()
# batch=8
# proc=0.95
# path_data = os.path.normpath( 'C:\data\jakubicek\GEN_Data_reload')
# sigs_list = glob.glob(os.path.normpath( path_data + "\*.npy"))
# N = np.array( np.shape(sigs_list))
# sigs_list = np.random.permutation( sigs_list )
# train_list = sigs_list[0:int(np.round(N*proc))]
# test_list = sigs_list[int(np.round(int(N)*proc))+1:int(N)]
# # # LSTM training○
# # net = NetGEN().cuda()
# net = torch.load(r"D:\jakubicek\Bioinformatika\netv5_0.pt")
# # net = torch.load(r"D:\jakubicek\Bioinformatika\netv3_0.pt")
# # net = torch.load(r"D:\jakubicek\Bioinformatika\netv2_0.pt")
# optimizer = optim.Adam(net.parameters(), lr=0.00001,weight_decay=1e-6)
# # optimizer = optim.SGD(net.parameters(), lr=0.0001, weight_decay=1e-6)
# # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1*3593/batch, gamma=0.1, verbose=False)
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.1, verbose=False)
# # net.init_hiden(batch)
# train_loss = []
# train_acc = []
# train_ACC = []
# for epch in range(0,20):
# net.train()
# ii=0
# iii=1
# indx=0
# train_list = np.random.permutation( train_list )
# # net.init_hiden(batch)
# for ite in range(0, len(train_list), 1):
# # for ite in range(0, 10, batch):
# net.train()
# net.zero_grad()
# batch=8
# if iii==1:
# sample,lbl = loaders.loaderWinGen(indx, train_list, batch, mode='interp')
# indx = indx+batch
# elif iii==2:
# sample,lbl = loaders.loaderWinRand(indx, train_list, batch, mode='interp')
# indx = indx+batch
# elif iii==3:
# sample,lbl = loaders.loaderWhole(indx, train_list, 1)
# iii=0
# indx = indx+1
# batch=1
# net.init_hiden(batch)
# pred = net(sample.cuda())
# pred = F.softmax(pred, dim=2)
# lbl = lbl.permute([0,2,1]).cuda()
# lbl = F.interpolate(lbl, ( pred.shape[1]))
# lbl = lbl[:,0,:]
# # lbl = lbl.permute([0,2,1])
# pred = pred.permute([0,2,1])
# # weight = torch.tensor((0.05, 0.95)).cuda()
# w1 = (torch.sum(lbl[0,:])+0.0001) / (lbl.shape[1] +0.0001)
# weight = torch.tensor((w1, 1-w1)).cuda()
# loss = nn.CrossEntropyLoss(weight)( pred, lbl.type(torch.long) )
# GT = lbl.detach().cpu().numpy()
# P = pred[:,1,:].detach().cpu().numpy()>0.5
# train_acc.append( np.mean( np.sum( GT==P , 1) / GT.shape[1] ) )
# train_loss.append(loss.detach().cpu().numpy())
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# # scheduler.step()
# torch.cuda.empty_cache()
# if ii%(int((len(train_list)/batch)/10)) == 0:
# train_ACC.append(np.mean(train_acc))
# # plt.figure
# # plt.plot(train_loss)
# # plt.ylim([0, 1.0])
# # plt.show()
# plt.figure
# plt.plot(-np.log(train_ACC))
# # plt.ylim([0.0,1])
# plt.show()
# plt.figure
# plt.plot(lbl.detach().cpu().numpy()[0,:])
# plt.plot(pred.detach().cpu().numpy()[0,1,:])
# # plt.plot(P[0,:])
# plt.ylim([0.0,1])
# plt.show()
# train_acc = []
# ii=ii+1
# iii=iii+1
# if indx+8 > len(train_list):
# break
# scheduler.step()
# # n=0
# # for m in range(0, len(test_list), 1):
# # net.init_hiden(batch)
# # net.train(mode=False)
# # # net.zero_grad()
# # sample,lbl = loaderWin(m, test_list, batch )
# # pred = net(sample.cuda())
# # net.zero_grad()
# # pred = F.softmax(pred, dim=2)
# # lbl = lbl.permute([0,2,1]).cuda()
# # lbl = F.interpolate(lbl, ( pred.shape[1]))
# # lbl = lbl[:,0,:]
# # pred = pred.permute([0,2,1])
# # # loss = nn.CrossEntropyLoss(weight=torch.tensor((0.1, 0.9)).cuda() )( pred, lbl.type(torch.long) )
# # GT = lbl.detach().cpu().numpy()
# # P = pred[:,1,:].detach().cpu().numpy()>0.5
# # test_acc.append( np.mean( np.sum( GT==P , 1) / GT.shape[1] ) )
# # torch.cuda.empty_cache()
# # if n%100 == 0:
# # plt.figure
# # plt.plot(test_acc)
# # plt.ylim([0.0,1])
# # plt.show()
# # # plt.figure
# # # plt.plot(lbl.detach().cpu().numpy()[0,:])
# # # plt.plot(pred.detach().cpu().numpy()[0,1,:])
# # # # plt.plot(P[0,:])
# # # # plt.ylim([0.7,1])
# # # plt.show()
# # n=n+1
# torch.cuda.empty_cache()
# class Sig_Loader(Dataset):
# super().__init__()
# def __init__(self, path_data):
# path_data = os.path.normpath(path_data)
# sigs_list = glob.glob(os.path.normpath( path_data + "**/*.h5"))
# # self.sigs_list = []
# # for index in ind:
# # self.sigs_list.append(sigs_list[index])
# for h5dataset_fp in sigs_list:
# self._add_data_infos(str(h5dataset_fp.resolve()))
# def _add_data_infos(self, file_path):
# with h5py.File(file_path) as h5_file:
# # Walk through all groups, extracting datasets
# for gname, group in h5_file.items():
# for dname, ds in group.items():
# self.data_info.append()
# def __len__(self):
# return len(self.sigs_list)
# def __getitem__(self, index):
# sig, lbl = load_H5(sig_path, index)
# return sig, lbl
# dataset = Sig_Loader(path_data)
# train_loader = DataLoader(dataset, shuffle=True, batch_size=batch)
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"numpy.array",
"torch.nn.BatchNorm1d",
"torch.squeeze",
"torch.nn.functional.softmax",
"numpy.arange",
"torch.nn.MaxPool1d",
"numpy.mean",
"torch.nn.LSTM",
"matplotlib.pyplot.plot",
"loaders.Load_cut_gen_h5",
"os.path.normpa... | [((5185, 5260), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(30)', 'gamma': '(0.1)', 'verbose': '(True)'}), '(optimizer, step_size=30, gamma=0.1, verbose=True)\n', (5210, 5260), True, 'import torch.optim as optim\n'), ((5419, 5437), 'numpy.array', 'np.array', (['lbl_list'], {}), '(lbl_list)\n', (5427, 5437), True, 'import numpy as np\n'), ((5644, 5677), 'numpy.random.permutation', 'np.random.permutation', (['train_list'], {}), '(train_list)\n', (5665, 5677), True, 'import numpy as np\n'), ((656, 771), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_ch', 'out_channels': 'out_ch', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'padding_mode': '"""replicate"""'}), "(in_channels=in_ch, out_channels=out_ch, kernel_size=3, stride=1,\n padding=1, padding_mode='replicate')\n", (665, 771), True, 'import torch.nn as nn\n'), ((799, 808), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (806, 808), True, 'import torch.nn as nn\n'), ((830, 946), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'out_ch', 'out_channels': 'out_ch', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'padding_mode': '"""replicate"""'}), "(in_channels=out_ch, out_channels=out_ch, kernel_size=3, stride=1,\n padding=1, padding_mode='replicate')\n", (839, 946), True, 'import torch.nn as nn\n'), ((974, 983), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (981, 983), True, 'import torch.nn as nn\n'), ((1002, 1023), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['in_ch'], {}), '(in_ch)\n', (1016, 1023), True, 'import torch.nn as nn\n'), ((1767, 1803), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['(3)'], {'stride': '(2)', 'padding': '(1)'}), '(3, stride=2, padding=1)\n', (1779, 1803), True, 'import torch.nn as nn\n'), ((2412, 2531), 'torch.nn.LSTM', 'nn.LSTM', (['enc_chs[-1]', 'lstm_h_size'], {'batch_first': '(True)', 'num_layers': 'self.lstm_layers', 'bidirectional': '(False)', 'dropout': '(0.5)'}), '(enc_chs[-1], lstm_h_size, batch_first=True, num_layers=self.\n lstm_layers, bidirectional=False, dropout=0.5)\n', (2419, 2531), True, 'import torch.nn as nn\n'), ((2566, 2607), 'torch.nn.Linear', 'nn.Linear', (['lstm_h_size', 'h_size'], {'bias': '(True)'}), '(lstm_h_size, h_size, bias=True)\n', (2575, 2607), True, 'import torch.nn as nn\n'), ((2635, 2652), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (2645, 2652), True, 'import torch.nn as nn\n'), ((2680, 2719), 'torch.nn.Linear', 'nn.Linear', (['h_size', 'num_class'], {'bias': '(True)'}), '(h_size, num_class, bias=True)\n', (2689, 2719), True, 'import torch.nn as nn\n'), ((2745, 2754), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2752, 2754), True, 'import torch.nn as nn\n'), ((3221, 3237), 'torch.squeeze', 'torch.squeeze', (['y'], {}), '(y)\n', (3234, 3237), False, 'import torch\n'), ((4343, 4382), 'os.path.normpath', 'os.path.normpath', (["(path_data + '**/*.h5')"], {}), "(path_data + '**/*.h5')\n", (4359, 4382), False, 'import os\n'), ((4467, 4487), 'h5py.File', 'h5py.File', (['file_path'], {}), '(file_path)\n', (4476, 4487), False, 'import h5py\n'), ((7008, 7064), 'loaders.Load_cut_gen_h5', 'loaders.Load_cut_gen_h5', (['ite', 'batch', 'train_list', 'dictGen'], {}), '(ite, batch, train_list, dictGen)\n', (7031, 7064), False, 'import loaders\n'), ((7251, 7273), 'torch.nn.functional.softmax', 'F.softmax', (['Pred'], {'dim': '(1)'}), '(Pred, dim=1)\n', (7260, 7273), True, 'import torch.nn.functional as F\n'), ((8927, 8951), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (8949, 8951), False, 'import torch\n'), ((5444, 5466), 'numpy.arange', 'np.arange', (['(0)', '(7 + 1)', '(1)'], {}), '(0, 7 + 1, 1)\n', (5453, 5466), True, 'import numpy as np\n'), ((7985, 8012), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', (['weight'], {}), '(weight)\n', (8004, 8012), True, 'import torch.nn as nn\n'), ((9074, 9089), 'numpy.dtype', 'np.dtype', (['float'], {}), '(float)\n', (9082, 9089), True, 'import numpy as np\n'), ((9181, 9193), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (9188, 9193), True, 'import numpy as np\n'), ((9440, 9460), 'matplotlib.pyplot.plot', 'plt.plot', (['train_LOSS'], {}), '(train_LOSS)\n', (9448, 9460), True, 'import matplotlib.pyplot as plt\n'), ((9545, 9555), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9553, 9555), True, 'import matplotlib.pyplot as plt\n'), ((9653, 9672), 'matplotlib.pyplot.plot', 'plt.plot', (['train_ACC'], {}), '(train_ACC)\n', (9661, 9672), True, 'import matplotlib.pyplot as plt\n'), ((9717, 9727), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9725, 9727), True, 'import matplotlib.pyplot as plt\n'), ((10101, 10113), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10111, 10113), True, 'import matplotlib.pyplot as plt\n'), ((10230, 10240), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10238, 10240), True, 'import matplotlib.pyplot as plt\n'), ((3490, 3546), 'torch.zeros', 'torch.zeros', (['(self.lstm_layers, batch, self.lstm_h_size)'], {}), '((self.lstm_layers, batch, self.lstm_h_size))\n', (3501, 3546), False, 'import torch\n'), ((3569, 3625), 'torch.zeros', 'torch.zeros', (['(self.lstm_layers, batch, self.lstm_h_size)'], {}), '((self.lstm_layers, batch, self.lstm_h_size))\n', (3580, 3625), False, 'import torch\n'), ((7547, 7571), 'torch.tensor', 'torch.tensor', (['(0.6, 0.4)'], {}), '((0.6, 0.4))\n', (7559, 7571), False, 'import torch\n'), ((9384, 9403), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (9391, 9403), True, 'import numpy as np\n'), ((9598, 9616), 'numpy.mean', 'np.mean', (['train_acc'], {}), '(train_acc)\n', (9605, 9616), True, 'import numpy as np\n'), ((5494, 5504), 'numpy.sum', 'np.sum', (['w1'], {}), '(w1)\n', (5500, 5504), True, 'import numpy as np\n')] |
#from __future__ import print_function
#from six.moves import range
from math import log
import numpy as np
class MDLP(object):
'''
Entropy-based Minimum description length principle.
'''
def discretize_feature(self, x, binning):
'''
Discretize a feature x with respective to the given binning
'''
x_discrete = [1 for i in range(len(x))]
for i in range(len(x)):
for cut_value in binning:
if x[i] > cut_value:
x_discrete[i] = x_discrete[i] + 1
return np.array(x_discrete)
def target_table(self, target):
'''
Create a numpy array that counts the occurrences
of values of the input vector
Example:
target_table([1,2,2,3,4,5,5,5,5,6])
>>> array([1,2,1,1,4,1])
'''
levels = self.levels(target)
values = [0 for i in range(len(levels))]
for item in target:
for i in range(len(levels)):
if item == levels[i]:
values[i] = values[i] + 1
return np.array(values)
def stable_log(self, input):
'''
Stable version of natural logarithm, which
replaces elements smaller than 1*e^(-10) by
one to avoid infinite values, then applies log as usual.
The input variable has to be a numpy array.
Example:
stable_log([0,1,2])
>>> array([1,2,3,4,5,6])
'''
variable = input.copy()
for i in range(len(variable)):
if variable[i] <= 1e-10:
variable[i] = 1
return np.log(variable)
def entropy(self, variable):
'''
Compute the Shannon entropy of the input variable
Example:
stable_log(np.array([0,1,2]))
>>> array([0., 0., 0.69314718])
'''
prob = self.target_table(variable)/float(len(variable))
ent = -sum(prob * self.stable_log(prob))
return ent
def levels(self, variable):
'''
Create a numpy array that lists each value of the
input vector once.
Example:
levels([1,2,2,3,4,5,5,5,5,6])
>>> array([1,2,3,4,5,6])
'''
levels = []
for item in variable:
if item not in levels:
levels.append(item)
return np.array(sorted(levels))
def barrier(self, x, value):
'''
Compute the first index of a vector that is larger
than the specified barrier minus one.
This function is intended to be applied to a sorted
list, in order to split the list in two by the barrier.
barrier([1,2,2,3,4,5,5,5,5,6],4)
>>> 5
'''
for i in range(len(x)):
if x[i] > value:
return i
def stopping_criterion(self, cut_idx, target, ent):
'''
Stopping criterion of the MDLP algorithm. Specifying a
cutting index cut_idx, a target vector and the current entropy,
the function will compute the entropy of the vector split by
the cutting point.
If the gain in further splitting, i.e. the decrease in entropy
is too small, the algorithm will return "None" and MDLP will
be stopped.
'''
n = len(target)
target_entropy = self.entropy(target)
left = range(0, cut_idx)
right = range(cut_idx, n)
gain = target_entropy - ent
k = len(self.levels(target))
k1 = len(self.levels(target[left]))
k2 = len(self.levels(target[right]))
delta = (log(3**k - 2) - (k * target_entropy
- k1 * self.entropy(target[left])
- k2 * self.entropy(target[right])))
cond = log(n - 1)/float(n) + delta/float(n)
if gain >= cond:
return gain
else:
return None
def find_cut_index(self, x, y):
'''
Determine the optimal cutting point (in the sense
of minimizing entropy) for a feature vector x and
a corresponding target vector y.
The function will return the index of this point
and the respective entropy.
'''
n = len(y)
init_entropy = 9999
current_entropy = init_entropy
index = None
for i in range(n-1):
if (x[i] != x[i+1]):
cut = (x[i]+x[i+1])/2.0
cutx = self.barrier(x, cut)
weight_cutx = cutx / float(n)
left_entropy = weight_cutx * self.entropy(y[0:cutx])
right_entropy = (1-weight_cutx) * self.entropy(y[cutx:n])
temp = left_entropy + right_entropy
if temp < current_entropy:
current_entropy = temp
index = i + 1
if index is not None:
return [index, current_entropy]
else:
return None
def cut_points(self, x, y):
'''
Main function for the MDLP algorithm. A feature vector x
and a target vector y are given as input, the algorithm
computes a list of cut-values used for binning the variable x.
'''
sorted_index = np.argsort(x)
xo = x[sorted_index]
yo = y[sorted_index]
depth = 1
def getIndex(low, upp, depth=depth):
x = xo[low:upp]
y = yo[low:upp]
cut = self.find_cut_index(x, y)
if cut is None:
return None
cut_index = int(cut[0])
current_entropy = cut[1]
ret = self.stopping_criterion(cut_index, np.array(y),
current_entropy)
if ret is not None:
return [cut_index, depth + 1]
else:
return None
def part(low=0, upp=len(xo)-1, cut_points=np.array([]), depth=depth):
x = xo[low:upp]
if len(x) < 2:
return cut_points
cc = getIndex(low, upp, depth=depth)
if (cc is None):
return cut_points
ci = int(cc[0])
depth = int(cc[1])
cut_points = np.append(cut_points, low + ci)
cut_points = cut_points.astype(int)
cut_points.sort()
return (list(part(low, low + ci, cut_points, depth=depth))
+ list(part(low + ci + 1, upp, cut_points, depth=depth)))
res = part(depth=depth)
cut_index = None
cut_value = []
if res is not None:
cut_index = res
for indices in cut_index:
cut_value.append((xo[indices-1] + xo[indices])/2.0)
result = np.unique(cut_value)
return result
| [
"numpy.unique",
"numpy.log",
"math.log",
"numpy.argsort",
"numpy.array",
"numpy.append"
] | [((564, 584), 'numpy.array', 'np.array', (['x_discrete'], {}), '(x_discrete)\n', (572, 584), True, 'import numpy as np\n'), ((1090, 1106), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (1098, 1106), True, 'import numpy as np\n'), ((1619, 1635), 'numpy.log', 'np.log', (['variable'], {}), '(variable)\n', (1625, 1635), True, 'import numpy as np\n'), ((5186, 5199), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (5196, 5199), True, 'import numpy as np\n'), ((6699, 6719), 'numpy.unique', 'np.unique', (['cut_value'], {}), '(cut_value)\n', (6708, 6719), True, 'import numpy as np\n'), ((3589, 3604), 'math.log', 'log', (['(3 ** k - 2)'], {}), '(3 ** k - 2)\n', (3592, 3604), False, 'from math import log\n'), ((5851, 5863), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5859, 5863), True, 'import numpy as np\n'), ((6164, 6195), 'numpy.append', 'np.append', (['cut_points', '(low + ci)'], {}), '(cut_points, low + ci)\n', (6173, 6195), True, 'import numpy as np\n'), ((3745, 3755), 'math.log', 'log', (['(n - 1)'], {}), '(n - 1)\n', (3748, 3755), False, 'from math import log\n'), ((5604, 5615), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5612, 5615), True, 'import numpy as np\n')] |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Generate a QC plot for (BOLD fMRI) motion estimates of multiple segments.
The generated figure consists of two subplots: one for translation and one for
rotation. The L2-norm for each motion type is plotted. Segment boundaries
are indicated with dashed vertical lines. The following statistics are
visualized
1. Range across subjects (min, max) with a light gray shaded area
2. 50% percentile around the mean with a medium gray shaded area
3. Standard error of the mean (SEM; +/-) with a dark gray shaded area
4. Median across subjects with a black line
5. Outlier subjects are represented as individual red lines
Outliers are defined as subjects that exceed the threshold anywhere within
a given segment. In that case the entire motion time series for that segment is
plotted as an outlier.
Example
pymvpa2 plotmotionqc \
-s sub*/func/*run-1_bold_mc.txt \
-s sub*/func/*run-2_bold_mc.txt \
--savefig motion.png
"""
# magic line for manpage summary
# man: -*- % BOLD fMRI motion QC plot
__docformat__ = 'restructuredtext'
import argparse
import numpy as np
parser_args = {
'formatter_class': argparse.RawDescriptionHelpFormatter,
}
def setup_parser(parser):
parser.add_argument(
'-s', '--segment', metavar='FILE', type=np.loadtxt, nargs='+',
action='append',
help="""two or more text files with motion estimate time series.
This option can be given multiple times (with multiple time series
each to generate a multi-segment plot (e.g. for multiple run).""")
parser.add_argument(
'--estimate-order', metavar='LABEL', default='transrot',
choices=('transrot', 'rottrans'),
help="""column order of estimates in the files. `transrot` indicates
translation first, followed by rotation. `rottrans` refers to the
oposite order. [Default: 'transrot']""")
parser.add_argument(
'--rad2deg', action='store_true',
help="""If specified, rotation estimates are assumed to be in radian
and will be converted to degrees.""")
parser.add_argument(
'--outlier-minthresh', type=float, default=None,
help="""absolute minimum threshold of outlier detection. Only value
larger than this this threshold will ever be considered as an
outlier. [Default: None]""")
parser.add_argument(
'--outlier-stdthresh', type=float, default=None,
help="""minimum threshold in units of standard deviation
for outlier detection. [Default: None]""")
parser.add_argument(
'--savefig', metavar='FILENAME', nargs=1,
help="""file name to store the QC figure under. Without this option
the figure is shown in an interactive viewer.""")
return parser
def motionqc_plot(data, outlier_abs_minthresh=None, outlier_stdthresh=None, ylabel=None):
import pylab as pl
from mvpa2.misc.plot import timeseries_boxplot, concat_ts_boxplot_stats
from mvpa2.misc.stats import compute_ts_boxplot_stats
# segments x [subjects x timepoints x props]
segment_sizes = [d.shape[1] for d in data]
# get stats for all segments and concatenate them
stats = concat_ts_boxplot_stats(
[compute_ts_boxplot_stats(
d,
outlier_abs_minthresh=outlier_abs_minthresh,
outlier_thresh=outlier_stdthresh,
aggfx=np.linalg.norm,
greedy_outlier=True)
for d in data])
outlier = None
if outlier_stdthresh:
outlier = [list(np.where(np.sum(np.logical_not(o.mask), axis=0))[0])
for o in stats[1]]
# plot
timeseries_boxplot(
stats[0]['median'],
mean=stats[0]['mean'],
std=stats[0]['std'],
n=stats[0]['n'],
min=stats[0]['min'],
max=stats[0]['max'],
p25=stats[0]['p25'],
p75=stats[0]['p75'],
outlierd=stats[1],
segment_sizes=segment_sizes)
xp, xl = pl.xticks()
pl.xticks(xp, ['' for i in xl])
pl.xlim((0, len(stats[0]['n'])))
if ylabel:
pl.ylabel(ylabel)
pl.xlabel('time')
return outlier
def run(args):
import pylab as pl
from mvpa2.base import verbose
# segments x [subjects x timepoints x properties]
data = [np.array(s) for s in args.segment]
# put in standard property order: first translation, then rotation
if args.estimate_order == 'rottrans':
data = [d[:, :, (3, 4, 5, 0, 1, 2)] for d in data]
# convert rotations, now known to be last
if args.rad2deg:
for d in data:
v = d[:, :, 3:]
np.rad2deg(v, v)
# and plot
# figure setup
fig = pl.figure(figsize=(12, 5))
# translation
ax = pl.subplot(211)
outlier = motionqc_plot(
[d[..., :3] for d in data],
args.outlier_minthresh,
args.outlier_stdthresh,
"translation\nestimate L2-norm")
if outlier:
verbose(
0,
"Detected per-segment translation outlier input samples {0} (zero-based)".format(
outlier))
# rotation
ax = pl.subplot(212)
outlier = motionqc_plot(
[d[..., 3:] for d in data],
args.outlier_minthresh,
args.outlier_stdthresh,
"rotation\nestimate L2-norm")
if outlier:
verbose(
0,
"Detected per-segment rotation outlier input samples {0} (zero-based)".format(
outlier))
if args.savefig is None:
pl.show()
else:
pl.savefig(args.savefig[0])
| [
"mvpa2.misc.stats.compute_ts_boxplot_stats",
"mvpa2.misc.plot.timeseries_boxplot",
"pylab.subplot",
"pylab.xticks",
"pylab.savefig",
"pylab.xlabel",
"numpy.logical_not",
"pylab.figure",
"numpy.array",
"numpy.rad2deg",
"pylab.ylabel",
"pylab.show"
] | [((3992, 4238), 'mvpa2.misc.plot.timeseries_boxplot', 'timeseries_boxplot', (["stats[0]['median']"], {'mean': "stats[0]['mean']", 'std': "stats[0]['std']", 'n': "stats[0]['n']", 'min': "stats[0]['min']", 'max': "stats[0]['max']", 'p25': "stats[0]['p25']", 'p75': "stats[0]['p75']", 'outlierd': 'stats[1]', 'segment_sizes': 'segment_sizes'}), "(stats[0]['median'], mean=stats[0]['mean'], std=stats[0][\n 'std'], n=stats[0]['n'], min=stats[0]['min'], max=stats[0]['max'], p25=\n stats[0]['p25'], p75=stats[0]['p75'], outlierd=stats[1], segment_sizes=\n segment_sizes)\n", (4010, 4238), False, 'from mvpa2.misc.plot import timeseries_boxplot, concat_ts_boxplot_stats\n'), ((4318, 4329), 'pylab.xticks', 'pl.xticks', ([], {}), '()\n', (4327, 4329), True, 'import pylab as pl\n'), ((4334, 4365), 'pylab.xticks', 'pl.xticks', (['xp', "['' for i in xl]"], {}), "(xp, ['' for i in xl])\n", (4343, 4365), True, 'import pylab as pl\n'), ((4449, 4466), 'pylab.xlabel', 'pl.xlabel', (['"""time"""'], {}), "('time')\n", (4458, 4466), True, 'import pylab as pl\n'), ((5030, 5056), 'pylab.figure', 'pl.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (5039, 5056), True, 'import pylab as pl\n'), ((5084, 5099), 'pylab.subplot', 'pl.subplot', (['(211)'], {}), '(211)\n', (5094, 5099), True, 'import pylab as pl\n'), ((5462, 5477), 'pylab.subplot', 'pl.subplot', (['(212)'], {}), '(212)\n', (5472, 5477), True, 'import pylab as pl\n'), ((4426, 4443), 'pylab.ylabel', 'pl.ylabel', (['ylabel'], {}), '(ylabel)\n', (4435, 4443), True, 'import pylab as pl\n'), ((4629, 4640), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (4637, 4640), True, 'import numpy as np\n'), ((5848, 5857), 'pylab.show', 'pl.show', ([], {}), '()\n', (5855, 5857), True, 'import pylab as pl\n'), ((5876, 5903), 'pylab.savefig', 'pl.savefig', (['args.savefig[0]'], {}), '(args.savefig[0])\n', (5886, 5903), True, 'import pylab as pl\n'), ((3576, 3734), 'mvpa2.misc.stats.compute_ts_boxplot_stats', 'compute_ts_boxplot_stats', (['d'], {'outlier_abs_minthresh': 'outlier_abs_minthresh', 'outlier_thresh': 'outlier_stdthresh', 'aggfx': 'np.linalg.norm', 'greedy_outlier': '(True)'}), '(d, outlier_abs_minthresh=outlier_abs_minthresh,\n outlier_thresh=outlier_stdthresh, aggfx=np.linalg.norm, greedy_outlier=True\n )\n', (3600, 3734), False, 'from mvpa2.misc.stats import compute_ts_boxplot_stats\n'), ((4968, 4984), 'numpy.rad2deg', 'np.rad2deg', (['v', 'v'], {}), '(v, v)\n', (4978, 4984), True, 'import numpy as np\n'), ((3901, 3923), 'numpy.logical_not', 'np.logical_not', (['o.mask'], {}), '(o.mask)\n', (3915, 3923), True, 'import numpy as np\n')] |
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
import numpy
import sys
import re
###Definitions###
infile = sys.argv[1]
###Functions###
def get_scaff_names(file):
names = []
for seq in SeqIO.parse(file, 'fasta'):
names.append(seq.id)
return names
def get_scaffold_stats(scaff_index, scaff_name):
seq = re.sub(">.+\n", "", scaff_index.get_raw(scaff_name).decode()) # Get sequence record andremove metadata
seq = re.sub("\s+", "", seq).upper() # remove whitespace and capitalize
length = len(seq)
num_A = len(re.findall('A', seq))
num_T = len(re.findall('T', seq))
num_G = len(re.findall('G', seq))
num_C = len(re.findall('C', seq))
num_N = len(re.findall('N', seq))
len_non_N = num_A + num_T + num_G + num_C
if num_N == length:
return 1
else:
return 0
###Main###
scaff_names = get_scaff_names(infile) # get scaffold lengths
scaff_index = SeqIO.index(infile, 'fasta')
n_sequences = [get_scaffold_stats(scaff_index, name) for name in scaff_names]
print(numpy.sum(n_sequences))
| [
"numpy.sum",
"Bio.SeqIO.parse",
"Bio.SeqIO.index",
"re.sub",
"re.findall"
] | [((960, 988), 'Bio.SeqIO.index', 'SeqIO.index', (['infile', '"""fasta"""'], {}), "(infile, 'fasta')\n", (971, 988), False, 'from Bio import SeqIO\n'), ((234, 260), 'Bio.SeqIO.parse', 'SeqIO.parse', (['file', '"""fasta"""'], {}), "(file, 'fasta')\n", (245, 260), False, 'from Bio import SeqIO\n'), ((1075, 1097), 'numpy.sum', 'numpy.sum', (['n_sequences'], {}), '(n_sequences)\n', (1084, 1097), False, 'import numpy\n'), ((585, 605), 're.findall', 're.findall', (['"""A"""', 'seq'], {}), "('A', seq)\n", (595, 605), False, 'import re\n'), ((623, 643), 're.findall', 're.findall', (['"""T"""', 'seq'], {}), "('T', seq)\n", (633, 643), False, 'import re\n'), ((661, 681), 're.findall', 're.findall', (['"""G"""', 'seq'], {}), "('G', seq)\n", (671, 681), False, 'import re\n'), ((699, 719), 're.findall', 're.findall', (['"""C"""', 'seq'], {}), "('C', seq)\n", (709, 719), False, 'import re\n'), ((737, 757), 're.findall', 're.findall', (['"""N"""', 'seq'], {}), "('N', seq)\n", (747, 757), False, 'import re\n'), ((481, 504), 're.sub', 're.sub', (['"""\\\\s+"""', '""""""', 'seq'], {}), "('\\\\s+', '', seq)\n", (487, 504), False, 'import re\n')] |
import os
import sys
import numpy as np
from copy import deepcopy
from collections import deque
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
sys.path.append(os.path.join(os.environ["HOME"], "TTTArena"))
from environment import Environment
from alphazero.mcts import MCTS
from alphazero.database import DataBase
from alphazero.database import prepare_state
torch.manual_seed(80085)
np.random.seed(80085)
def softXEnt (inp, target):
logprobs = torch.log(inp)
cross_entropy = -(target * logprobs).sum() / inp.shape[0]
return cross_entropy
# TODO: try out this variant of residual blocks (diff from paper but same as behavioral_cloning) if doesn't work well
# try the regular BasicBlock (same as paper)
class IdentityBlock(nn.Module):
def __init__(self, f, filters, input_dim, use_bias=True):
super().__init__()
pad = int((f - 1)/2) # same padding
F1, F2 = filters
self.conv1 = nn.Conv2d(input_dim, F1, padding=(pad,pad), kernel_size=f, stride=1, bias=use_bias)
self.conv2 = nn.Conv2d(F1, F2, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)
self.conv3 = nn.Conv2d(F2, F1, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = F.leaky_relu(x, 0.2)
x = self.conv2(x)
x = F.leaky_relu(x, 0.2)
x = self.conv3(x)
x += shortcut
x = F.leaky_relu(x, 0.2)
return x
class ConvolutionalBlock(nn.Module):
def __init__(self, f, filters, input_dim, use_bias=True):
super().__init__()
pad = int((f - 1)/2) # same padding
F1, F2, F3 = filters
self.conv1 = nn.Conv2d(input_dim, F1, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)
self.conv2 = nn.Conv2d(F1, F2, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)
self.conv3 = nn.Conv2d(F2, F3, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)
self.conv_change = nn.Conv2d(input_dim, F3, padding=(0,0), kernel_size=1, stride=1, bias=use_bias)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = F.leaky_relu(x, 0.2)
x = self.conv2(x)
x = F.leaky_relu(x, 0.2)
x = self.conv3(x)
shortcut = self.conv_change(shortcut)
x += shortcut
x = F.leaky_relu(x, 0.2)
return x
class PolicyHead(nn.Module):
def __init__(self, board_shape, use_bias):
super().__init__()
self.board_shape = board_shape
self.identity1 = IdentityBlock(3, [24, 48], 24, use_bias)
self.conv1 = nn.Conv2d(24, 1, padding=(1, 1), kernel_size=3, stride=1, bias=use_bias)
self.flatten = nn.Flatten()
def forward(self, x):
p = self.identity1(x)
p = self.conv1(p)
p = self.flatten(p)
p = F.softmax(p, dim=1)
return p
class ValueHead(nn.Module):
def __init__(self, use_bias):
super().__init__()
self.convolutional1 = ConvolutionalBlock(3, [24, 48, 1], 24, use_bias)
self.val_linear1 = nn.Linear(100, 1)
self.flatten = nn.Flatten()
def forward(self, x):
v = self.convolutional1(x)
v = self.flatten(v)
v = self.val_linear1(v)
v = torch.tanh(v)
return v
class Brain(nn.Module):
def __init__(self, input_shape=(3, 30, 30)):
super().__init__()
self.input_shape = input_shape
use_bias = True
self.conv1 = nn.Conv2d(input_shape[0], 16, padding=(2,2), kernel_size=5, stride=1, bias=use_bias)
self.convolutional1 = ConvolutionalBlock(5, [24, 48, 24], 16, use_bias)
self.identity1 = IdentityBlock(5, [24, 48], 24, use_bias)
self.policy_head = PolicyHead(input_shape, use_bias)
self.value_head = ValueHead(use_bias)
def forward(self, x):
x = self.conv1(x)
x = F.leaky_relu(x)
x = self.convolutional1(x)
x = self.identity1(x)
p, v = self.policy_head(x), self.value_head(x)
return p, v
class ZeroTTT():
def __init__(self, brain_path, opt_path, args={"board_len": 10, "lr": 3e-4, "weight_decay": 1e-4}):
'''
brain_path - path to model params
opt_path - path to optimizer state
args:
board_len - # of rows and columns on board
lr - learning rate
weight_decay - weight decay
'''
self.args = args
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.brain = Brain(input_shape=(2, self.args["board_len"], self.args["board_len"])).to(self.device)
self.policy_loss = softXEnt
self.value_loss = nn.MSELoss()
self.optimizer = optim.AdamW(self.brain.parameters(), lr=self.args["lr"], weight_decay=self.args["weight_decay"])
if brain_path is not None:
self.load_brain(brain_path, opt_path)
# TODO: fix for nested Modules
def get_parameter_count(self):
return sum(p.numel() for p in self.brain.parameters() if p.requires_grad)
def save_brain(self, model_name, opt_state_name):
print("Saving brain...")
torch.save(self.brain.state_dict(), os.path.join(os.environ["HOME"], "TTTArena/alphazero/models", model_name))
if opt_state_name is not None:
torch.save(self.optimizer.state_dict(), os.path.join(os.environ["HOME"], "TTTArena/alphazero/models", opt_state_name))
def load_brain(self, model_name, opt_state_name):
print("Loading brain...")
self.brain.load_state_dict(torch.load(os.path.join(os.environ["HOME"], "TTTArena/alphazero/models", model_name), map_location=self.device))
if opt_state_name is not None:
self.optimizer.load_state_dict(torch.load(os.path.join(os.environ["HOME"], "TTTArena/alphazero/models", opt_state_name), map_location=self.device))
return
def predict(self, x, interpret_output=True):
if len(x.shape) < 4:
x = np.expand_dims(x, axis=0)
x = torch.from_numpy(x).float().to(self.device)
policy, value = self.brain(x)
if interpret_output: # return 2d policy map and value in usable form
policy = policy.view(-1, self.args["board_len"], self.args["board_len"])
policy = policy[0].cpu().detach().numpy()
value = value[0][0].item()
return policy, value
| [
"torch.tanh",
"torch.manual_seed",
"torch.nn.functional.leaky_relu",
"torch.log",
"torch.nn.Flatten",
"os.path.join",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"numpy.random.seed",
"torch.nn.Linear",
"numpy.expand_dims",
"torch.nn.functional.soft... | [((411, 435), 'torch.manual_seed', 'torch.manual_seed', (['(80085)'], {}), '(80085)\n', (428, 435), False, 'import torch\n'), ((436, 457), 'numpy.random.seed', 'np.random.seed', (['(80085)'], {}), '(80085)\n', (450, 457), True, 'import numpy as np\n'), ((209, 253), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""TTTArena"""'], {}), "(os.environ['HOME'], 'TTTArena')\n", (221, 253), False, 'import os\n'), ((500, 514), 'torch.log', 'torch.log', (['inp'], {}), '(inp)\n', (509, 514), False, 'import torch\n'), ((956, 1045), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_dim', 'F1'], {'padding': '(pad, pad)', 'kernel_size': 'f', 'stride': '(1)', 'bias': 'use_bias'}), '(input_dim, F1, padding=(pad, pad), kernel_size=f, stride=1, bias=\n use_bias)\n', (965, 1045), False, 'from torch import nn\n'), ((1057, 1134), 'torch.nn.Conv2d', 'nn.Conv2d', (['F1', 'F2'], {'padding': '(pad, pad)', 'kernel_size': 'f', 'stride': '(1)', 'bias': 'use_bias'}), '(F1, F2, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)\n', (1066, 1134), False, 'from torch import nn\n'), ((1152, 1229), 'torch.nn.Conv2d', 'nn.Conv2d', (['F2', 'F1'], {'padding': '(pad, pad)', 'kernel_size': 'f', 'stride': '(1)', 'bias': 'use_bias'}), '(F2, F1, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)\n', (1161, 1229), False, 'from torch import nn\n'), ((1303, 1323), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.2)'], {}), '(x, 0.2)\n', (1315, 1323), True, 'from torch.nn import functional as F\n'), ((1355, 1375), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.2)'], {}), '(x, 0.2)\n', (1367, 1375), True, 'from torch.nn import functional as F\n'), ((1425, 1445), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.2)'], {}), '(x, 0.2)\n', (1437, 1445), True, 'from torch.nn import functional as F\n'), ((1663, 1752), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_dim', 'F1'], {'padding': '(pad, pad)', 'kernel_size': 'f', 'stride': '(1)', 'bias': 'use_bias'}), '(input_dim, F1, padding=(pad, pad), kernel_size=f, stride=1, bias=\n use_bias)\n', (1672, 1752), False, 'from torch import nn\n'), ((1765, 1842), 'torch.nn.Conv2d', 'nn.Conv2d', (['F1', 'F2'], {'padding': '(pad, pad)', 'kernel_size': 'f', 'stride': '(1)', 'bias': 'use_bias'}), '(F1, F2, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)\n', (1774, 1842), False, 'from torch import nn\n'), ((1860, 1937), 'torch.nn.Conv2d', 'nn.Conv2d', (['F2', 'F3'], {'padding': '(pad, pad)', 'kernel_size': 'f', 'stride': '(1)', 'bias': 'use_bias'}), '(F2, F3, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)\n', (1869, 1937), False, 'from torch import nn\n'), ((1961, 2046), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_dim', 'F3'], {'padding': '(0, 0)', 'kernel_size': '(1)', 'stride': '(1)', 'bias': 'use_bias'}), '(input_dim, F3, padding=(0, 0), kernel_size=1, stride=1, bias=use_bias\n )\n', (1970, 2046), False, 'from torch import nn\n'), ((2114, 2134), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.2)'], {}), '(x, 0.2)\n', (2126, 2134), True, 'from torch.nn import functional as F\n'), ((2166, 2186), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.2)'], {}), '(x, 0.2)\n', (2178, 2186), True, 'from torch.nn import functional as F\n'), ((2280, 2300), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.2)'], {}), '(x, 0.2)\n', (2292, 2300), True, 'from torch.nn import functional as F\n'), ((2527, 2599), 'torch.nn.Conv2d', 'nn.Conv2d', (['(24)', '(1)'], {'padding': '(1, 1)', 'kernel_size': '(3)', 'stride': '(1)', 'bias': 'use_bias'}), '(24, 1, padding=(1, 1), kernel_size=3, stride=1, bias=use_bias)\n', (2536, 2599), False, 'from torch import nn\n'), ((2619, 2631), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (2629, 2631), False, 'from torch import nn\n'), ((2737, 2756), 'torch.nn.functional.softmax', 'F.softmax', (['p'], {'dim': '(1)'}), '(p, dim=1)\n', (2746, 2756), True, 'from torch.nn import functional as F\n'), ((2952, 2969), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(1)'], {}), '(100, 1)\n', (2961, 2969), False, 'from torch import nn\n'), ((2989, 3001), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (2999, 3001), False, 'from torch import nn\n'), ((3118, 3131), 'torch.tanh', 'torch.tanh', (['v'], {}), '(v)\n', (3128, 3131), False, 'import torch\n'), ((3313, 3403), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_shape[0]', '(16)'], {'padding': '(2, 2)', 'kernel_size': '(5)', 'stride': '(1)', 'bias': 'use_bias'}), '(input_shape[0], 16, padding=(2, 2), kernel_size=5, stride=1, bias\n =use_bias)\n', (3322, 3403), False, 'from torch import nn\n'), ((3691, 3706), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (3703, 3706), True, 'from torch.nn import functional as F\n'), ((4435, 4447), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4445, 4447), False, 'from torch import nn\n'), ((4909, 4982), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""TTTArena/alphazero/models"""', 'model_name'], {}), "(os.environ['HOME'], 'TTTArena/alphazero/models', model_name)\n", (4921, 4982), False, 'import os\n'), ((5656, 5681), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (5670, 5681), True, 'import numpy as np\n'), ((4238, 4263), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4261, 4263), False, 'import torch\n'), ((5065, 5142), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""TTTArena/alphazero/models"""', 'opt_state_name'], {}), "(os.environ['HOME'], 'TTTArena/alphazero/models', opt_state_name)\n", (5077, 5142), False, 'import os\n'), ((5269, 5342), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""TTTArena/alphazero/models"""', 'model_name'], {}), "(os.environ['HOME'], 'TTTArena/alphazero/models', model_name)\n", (5281, 5342), False, 'import os\n'), ((5456, 5533), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""TTTArena/alphazero/models"""', 'opt_state_name'], {}), "(os.environ['HOME'], 'TTTArena/alphazero/models', opt_state_name)\n", (5468, 5533), False, 'import os\n'), ((5691, 5710), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (5707, 5710), False, 'import torch\n')] |
import numpy as np
import cv2
from libs.util import MaskGenerator, ImageChunker
mask = MaskGenerator(128, 128, 3, rand_seed = 1222)._generate_mask()
mask=mask[0:63,0:63,0]
# import keras.activations as activations
# import tensorflow as tf
# f = np.array([[1, 2, 1],
# [1, 0, 0],
# [-1, 0, 1]])
# img = np.array([
# [2, 3, 7, 4, 6, 2, 9],
# [6, 6, 9, 8, 7, 4, 3],
# [3, 4, 8, 3, 8, 9, 7],
# [7, 8, 3, 6, 6, 3, 4],
# [4, 2, 1, 8, 3, 4, 6],
# [3, 2, 4, 1, 9, 8, 3],
# [4, 5, 3, 9, 2, 1, 4]])
# img=np.random.randint(0,50,(63, 63)) 这是生成随机数字的矩阵
input_img=cv2.imread(r"C:\Users\dell\Desktop\paper2\\figure\Fig3\\img.jpg")
# img0=np.array(input_img)
img=input_img[:,:,0]
masked_img=img*mask
kernel1=np.random.rand(7,7)
kernel2=np.random.rand(5,5)
kernel3=np.random.rand(3,3)
mask_kernel=np.ones([7,7])
# f=round(f,1)
strde=1
def cov2(img, f, strde):
inw, inh = img.shape
w, h = f.shape
outw = int((inw - w) / strde + 1)
outh = int((inh - h) / strde + 1)
arr = np.zeros(shape=(outw, outh))
for g in range(outh):
for t in range(outw):
s = 0
for i in range(w):
for j in range(h):
s += img[i + g * strde][j + t * strde] * f[i][j]
# s = img[i][j] * f[i][j]
arr[g][t] = int(s)
return arr
result1=cov2(masked_img,kernel1,strde)
result2=cov2(masked_img,kernel2,strde)
result3=cov2(masked_img,kernel3,strde)
mask_result=cov2(mask,mask_kernel,strde)
# result2=cov2(result1,f,1)
# result3=cov2(result2,f,1)
# print(img)
# print(kernel)
# print(result1)
# print(mask)
# np.savetxt(r'./RandomMatrix.txt',result1,fmt="%d", delimiter=',', header="行,"+"列",footer='By Accelerator')
np.savetxt(r'C:\Users\dell\Desktop\paper2\figure\Fig3\mask.txt',mask,fmt="%d", delimiter=',')
np.savetxt(r'C:\Users\dell\Desktop\paper2\figure\Fig3\mask_result.txt',mask_result,fmt="%d", delimiter=',')
np.savetxt(r'C:\Users\dell\Desktop\paper2\figure\Fig3\input.txt',img,fmt="%d", delimiter=',')
np.savetxt(r'C:\Users\dell\Desktop\paper2\figure\Fig3\masked_input.txt',masked_img,fmt="%d", delimiter=',')
np.savetxt(r'C:\Users\dell\Desktop\paper2\figure\Fig3\kernel1.txt',kernel1,fmt="%f", delimiter=',')
np.savetxt(r'C:\Users\dell\Desktop\paper2\figure\Fig3\kernel2.txt',kernel2,fmt="%f", delimiter=',')
np.savetxt(r'C:\Users\dell\Desktop\paper2\figure\Fig3\kernel3.txt',kernel3,fmt="%f", delimiter=',')
np.savetxt(r'C:\Users\dell\Desktop\paper2\figure\Fig3\result1.txt',result1,fmt="%d", delimiter=',')
np.savetxt(r'C:\Users\dell\Desktop\paper2\figure\Fig3\result2.txt',result2,fmt="%d", delimiter=',')
np.savetxt(r'C:\Users\dell\Desktop\paper2\figure\Fig3\result3.txt',result3,fmt="%d", delimiter=',')
# print(result2)
# print(result3)
# sess=tf.Session()
# img=np.expand_dims(img,0)
# # img_relu= activations.relu(img, alpha=0.0, max_value=None, threshold=4)
# print(img)
# print(sess.run(img_relu)) | [
"numpy.ones",
"numpy.random.rand",
"libs.util.MaskGenerator",
"numpy.zeros",
"numpy.savetxt",
"cv2.imread"
] | [((610, 683), 'cv2.imread', 'cv2.imread', (['"""C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\\\\\figure\\\\Fig3\\\\\\\\img.jpg"""'], {}), "('C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\\\\\figure\\\\Fig3\\\\\\\\img.jpg')\n", (620, 683), False, 'import cv2\n'), ((753, 773), 'numpy.random.rand', 'np.random.rand', (['(7)', '(7)'], {}), '(7, 7)\n', (767, 773), True, 'import numpy as np\n'), ((781, 801), 'numpy.random.rand', 'np.random.rand', (['(5)', '(5)'], {}), '(5, 5)\n', (795, 801), True, 'import numpy as np\n'), ((809, 829), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (823, 829), True, 'import numpy as np\n'), ((841, 856), 'numpy.ones', 'np.ones', (['[7, 7]'], {}), '([7, 7])\n', (848, 856), True, 'import numpy as np\n'), ((1750, 1855), 'numpy.savetxt', 'np.savetxt', (['"""C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\mask.txt"""', 'mask'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "('C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\mask.txt', mask,\n fmt='%d', delimiter=',')\n", (1760, 1855), True, 'import numpy as np\n'), ((1844, 1963), 'numpy.savetxt', 'np.savetxt', (['"""C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\mask_result.txt"""', 'mask_result'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "('C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\mask_result.txt',\n mask_result, fmt='%d', delimiter=',')\n", (1854, 1963), True, 'import numpy as np\n'), ((1952, 2057), 'numpy.savetxt', 'np.savetxt', (['"""C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\input.txt"""', 'img'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "('C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\input.txt', img,\n fmt='%d', delimiter=',')\n", (1962, 2057), True, 'import numpy as np\n'), ((2046, 2165), 'numpy.savetxt', 'np.savetxt', (['"""C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\masked_input.txt"""', 'masked_img'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "('C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\masked_input.txt',\n masked_img, fmt='%d', delimiter=',')\n", (2056, 2165), True, 'import numpy as np\n'), ((2154, 2265), 'numpy.savetxt', 'np.savetxt', (['"""C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\kernel1.txt"""', 'kernel1'], {'fmt': '"""%f"""', 'delimiter': '""","""'}), "('C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\kernel1.txt',\n kernel1, fmt='%f', delimiter=',')\n", (2164, 2265), True, 'import numpy as np\n'), ((2254, 2365), 'numpy.savetxt', 'np.savetxt', (['"""C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\kernel2.txt"""', 'kernel2'], {'fmt': '"""%f"""', 'delimiter': '""","""'}), "('C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\kernel2.txt',\n kernel2, fmt='%f', delimiter=',')\n", (2264, 2365), True, 'import numpy as np\n'), ((2354, 2465), 'numpy.savetxt', 'np.savetxt', (['"""C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\kernel3.txt"""', 'kernel3'], {'fmt': '"""%f"""', 'delimiter': '""","""'}), "('C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\kernel3.txt',\n kernel3, fmt='%f', delimiter=',')\n", (2364, 2465), True, 'import numpy as np\n'), ((2454, 2565), 'numpy.savetxt', 'np.savetxt', (['"""C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\result1.txt"""', 'result1'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "('C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\result1.txt',\n result1, fmt='%d', delimiter=',')\n", (2464, 2565), True, 'import numpy as np\n'), ((2554, 2665), 'numpy.savetxt', 'np.savetxt', (['"""C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\result2.txt"""', 'result2'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "('C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\result2.txt',\n result2, fmt='%d', delimiter=',')\n", (2564, 2665), True, 'import numpy as np\n'), ((2654, 2765), 'numpy.savetxt', 'np.savetxt', (['"""C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\result3.txt"""', 'result3'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "('C:\\\\Users\\\\dell\\\\Desktop\\\\paper2\\\\figure\\\\Fig3\\\\result3.txt',\n result3, fmt='%d', delimiter=',')\n", (2664, 2765), True, 'import numpy as np\n'), ((1035, 1063), 'numpy.zeros', 'np.zeros', ([], {'shape': '(outw, outh)'}), '(shape=(outw, outh))\n', (1043, 1063), True, 'import numpy as np\n'), ((87, 129), 'libs.util.MaskGenerator', 'MaskGenerator', (['(128)', '(128)', '(3)'], {'rand_seed': '(1222)'}), '(128, 128, 3, rand_seed=1222)\n', (100, 129), False, 'from libs.util import MaskGenerator, ImageChunker\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.